diff --git a/CHANGELOG.md b/CHANGELOG.md index e3c0aba8e0..068a09b8ee 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,42 +1,159 @@ # SQLCipher Change Log -All notable changes to this project will be documented in this file. +Notable changes to this project are documented in this file. + +## [4.12.0] - (December 2025 - [4.12.0 changes]) +- Updates baseline to SQLite 3.51.1 +- Adds `PRAGMA cipher_status` so applications can verify a database handle is using encryption +- Improves guards against key/rekey/attach misuse +- Adds criteria for `PRAGMA cipher_migrate` tests +- Fixes check for `__has_feature` macro to separate it from use +- Fixes CHANGELOG.md markdown formatting, typos, and inline code snippets +- Fixes conditional in SQLCipher pragma handling +- Removes deprecated providers for LibTomCrypt and NSS +- Removes unnecessary shutdown and URI config changes in core tests +- Ensures all test suite database handles are closed before delete + +## [4.11.0] - (October 2025 - [4.11.0 changes]) +- Converts log output to UTF-16 when writing to stdout or stderr on Windows +- Fixes scope issues to allow `--disable-amalgamation` to work properly +- Replaces fortuna seeding mechanism for libtomcrypt with `rng_get_bytes()` +- Removes CocoaPods support (`SQLCipher.podspec.json`) +- Fixes includes and macros to support non-amalgamated builds +- Fixes check for `__has_feature` to resolve issue with compilers that don't support it +- Corrects return value from `sqlcipher_fprintf` +- Fixes use of provider `free_ctx` +- Fixes some compiler warnings + +## [4.10.0] - (August 2025 - [4.10.0 changes]) +- Updates baseline to SQLite 3.50.4 +- Allows compile time override of default log level via `SQLCIPHER_LOG_LEVEL_DEFAULT` macro +- Fixes issue building with `-fsanitize=address` on macOS +- Fixes detection of CommonCrypto version on macOS +- Improves CommonCrypto version detection on iOS + +## [4.9.0] - (May 2025 - [4.9.0 changes]) +- Updates baseline to upstream SQLite 3.49.2 +- Removes use of static mutex in `sqlcipher_extra_shutdown()` + +## [4.8.0] - (April 2025 - [4.8.0 changes]) +- Fixes regression in `PRAGMA cipher_migrate` where an error would be thrown when migrating a current-version database +- Adds selective locking in critical sections of the library for shared cache connections (Note: use of shared cache is still strongly discouraged) +- Standardizes initial private heap size to 48KB to ensure `mlock` under constrained limits +- Removes changes to windows working set sizes +- Improvements to logging of memory stats and other cleanup + +## [4.7.0] - (March 2025 - [4.7.0 changes]) +- Updates baseline to upstream SQLite 3.49.1, including complete upstream SQLite refactoring of build system to use autosetup +- Significantly refactors and optimizes library initialization and cleanup +- Allocates majority of requisite memory at startup to improve memory locking on constrained platforms (i.e. Android and Windows) and reduce fragmentation +- Expands `sqlcipher_provider` interface to include `init` and `shutdown` functions +- Adds support for `.recover` shell command on corrupt databases with a full plaintext first page +- Performs fast random overwrite of freed memory segments for improved security +- Adds basic obfuscation of context key material for improved security +- Generates keyspecs dynamically on demand instead of storing them +- Expands keyspec/raw key format to accept key, HMAC key, and salt +- Improves error handling in `sqlcipher_export()` and `PRAGMA cipher_migrate` +- Allows setting custom compile-time default cryptographic provider via the `SQLCIPHER_CRYPTO_CUSTOM` macro +- Removes support for end-of-life OpenSSL versions older than 3.0 +- __BREAKING CHANGE__: `SELECT` statements (now also including schema independent queries like `SELECT 1`) cannot be executed on encrypted databases prior to setting the database key (behavior inherited from upstream SQLite) +- __BREAKING CHANGE__: Renames `configure` flag `--enable-tempstore=yes` to `--with-tempstore=yes` for alignment with SQLite (change required for upstream SQLite autosetup) +- __BREAKING CHANGE__: Renames default executable and library build outputs from `sqlcipher` and `libsqlcipher` to `sqlite3` and `libsqlite3` (for alignment with SQLite) +- __BREAKING CHANGE__: Removes `configure` flag `--with-crypto-lib` (replace with appropriate `-DSQLCIPHER_CRYPTO_*` CFLAG) +- __BREAKING CHANGE__: Requires defining `SQLITE_EXTRA_INIT=sqlcipher_extra_init` and `SQLITE_EXTRA_SHUTDOWN=sqlcipher_extra_shutdown` at compile time for optimized library initialization and cleanup +- __BREAKING CHANGE__: Enforces thread safe mode (i.e. `SQLITE_THREADSAFE` of 1 or 2) and temporary storage (i.e. `SQLITE_TEMP_STORE` of 2 or 3) settings at compile time + +## [4.6.1] - (August 2024 - [4.6.1 changes]) +- Updates baseline to upstream SQLite 3.46.1 +- Significant refactor to merge `crypto.h`, `crypto.c`, and `crypto_impl.c` into a single `sqlcipher.c` source file for simplicity. +- Updates minimum working set size on windows to increase lockable pages +- Adds new `PRAGMA cipher_log_source` for filtering log output on higher verbosity levels +- Improves log output by including the log level and source prior to message +- Improves error logging in `PRAGMA cipher_migrate` +- Fixes issue where log level and target would be overwritten if set prior to initialization +- Corrects Podspec license element to use specific BSD 3 Clause +- Fixes default log output to console for macOS + +## [4.6.0] - (May 2024 - [4.6.0 changes]) +- Sets default log level to WARN +- Sends default log output to: logcat for Android; Console for iOS and macOS; and stderr for all other platforms +- General improvements to log level assignments, output, and sanitization +- Fixes Apple Privacy Manifest by removing empty NSPrivacyCollectedDataType from PrivacyInfo.xcprivacy +- Moves Swift support defines for podspec user_target_xcconfig so they only apply to the consuming project + +## [4.5.7] - (April 2024 - [4.5.7 changes]) +- Updates baseline to upstream SQLite 3.45.3 +- Adds "device" logging and profile target using `os_log` for Apple (and logcat on Android) +- Fixes issues compiling with `SQLITE_OMIT_LOG` +- Fixes malformed man page caused by old merge conflict +- Updates podspec for current Xcode versions, improved Swift support, and Privacy Manifest + +## [4.5.6] - (January 2024 - [4.5.6 changes]) +- Updates baseline to upstream SQLite 3.44.2 +- Improves `PRAGMA cipher_integrity_check` to report expected page size if invalid +- Implements `PRAGMA page_size` compatibility with `PRAGMA cipher_page_size` so both will operate properly on encrypted databases +- Updates `LICENSE.md` with SQLCipher license to avoid ambiguity and remove redundancy + +## [4.5.5] - (August 2023 - [4.5.5 changes]) +- Updates baseline to upstream SQLite 3.42.0 +- Do not allow key to be changed on a connection after it has been successfully used for an encryption or decryption operation to prevent accidental database corruption +- Raise an error if a rekey operation is attempted on an unencrypted database +- Raise an error when a key or rekey operation is passed an empty key +- Minor improvements to constant time functions +- Miscellaneous code and comment cleanup + +## [4.5.4] - (April 2023 - [4.5.4 changes]) +- Updates baseline to upstream SQLite 3.41.2 +- Updates minimum Apple SDK versions in podspec for new Xcode compatibility +- Return runtime OpenSSL version from `PRAGMA cipher_provider_version` (instead of hardcoded value) +- Adds guard against zero block size and crash if cryptographic provider initialization fails +- When an ATTACH occurs creating a new encrypted database as the first operation after keying the main database, the new database will have the same salt value. + +## [4.5.3] - (December 2022 - [4.5.3 changes]) +- Updates baseline to upstream SQLite 3.39.4 + +## [4.5.2] - (August 2022 - [4.5.2 changes]) +- Updates source code baseline to upstream SQLite 3.39.2 +- Simplifies OpenSSL version conditional code +- Fixes issue where `PRAGMA cipher_memory_security` could report OFF when it was actually ON +- Fixes unfreed OpenSSL allocation when compiled against version 3 +- Fixes support for building against recent versions of BoringSSL ## [4.5.1] - (March 2022 - [4.5.1 changes]) - Updates source code baseline to upstream SQLite 3.37.2 -- Adds PRAGMA cipher_log and cipher_log_level features to allow logging of TRACE, DEBUG, INFO, WARN, and ERROR messages to stdout, stderr, file, or logcat -- Modifies PRAGMA cipher_profile to use sqlite3_trace_v2 and adds logcat target for Android -- Updates OpenSSL provider to use EVP_MAC API with version 3+ -- Adds new PRAGMA cipher_test_on, cipher_test_off, and cipher_test_rand (available when compiled with -DSQLCIPHER_TEST) to facilitate simulation of error conditions -- Fixes PRAGMA cipher_integrity_check to work properly with databases larger that 2GB -- Fixes missing munlock before free for context internal buffer (thanks to Fedor Indutny) +- Adds `PRAGMA cipher_log` and `PRAGMA cipher_log_level` features to allow logging of TRACE, DEBUG, INFO, WARN, and ERROR messages to stdout, stderr, file, or logcat +- Modifies `PRAGMA cipher_profile` to use `sqlite3_trace_v2` and adds logcat target for Android +- Updates OpenSSL provider to use `EVP_MAC` API with version 3+ +- Adds new `PRAGMA cipher_test_on`, `PRAGMA cipher_test_off`, and `PRAGMA cipher_test_rand` (available when compiled with `-DSQLCIPHER_TEST`) to facilitate simulation of error conditions +- Fixes `PRAGMA cipher_integrity_check` to work properly with databases larger that 2GB +- Fixes missing `munlock` before free for context internal buffer (thanks to Fedor Indutny) ## [4.5.0] - (October 2021 - [4.5.0 changes]) - Updates baseline to upstream SQLite 3.36.0 -- Changes the enhanced memory security feature to be DISABLED by default; once enabled by PRAGMA cipher_memory_security = ON, it can't be turned off for the lifetime of the process -- Changes PRAGMA cipher_migrate to permanently enter an error state if a migration fails +- Changes the enhanced memory security feature to be DISABLED by default; once enabled by `PRAGMA cipher_memory_security = ON`, it can't be turned off for the lifetime of the process +- Changes `PRAGMA cipher_migrate` to permanently enter an error state if a migration fails - Fixes memory locking/unlocking issue with realloc implementation on hardened runtimes when memory security is enabled -- Fixes cipher_migrate to cleanup the temporary database if a migration fails +- Fixes `PRAGMA cipher_migrate` to clean up the temporary database if a migration fails - Removes logging of non-string pointers when compiling with trace level logging ## [4.4.3] - (February 2021 - [4.4.3 changes]) -- Updates baseline to ustream SQLite 3.34.1 -- Fixes sqlcipher_export handling of NULL parameters +- Updates baseline to upstream SQLite 3.34.1 +- Fixes `sqlcipher_export` handling of NULL parameters - Removes randomization of rekey-delete tests to avoid false test failures -- Changes internal usage of sqlite_master to sqlite_schema -- Omits unusued profiling function under certain defines to avoid compiler warnings +- Changes internal usage of `sqlite_master` to `sqlite_schema` +- Omits unused profiling function under certain defines to avoid compiler warnings ## [4.4.2] - (November 2020 - [4.4.2 changes]) -- Improve error handling to resolve potential corruption if an encryption operation failed while operating in WAL mode +- Improves error handling to resolve potential corruption if an encryption operation failed while operating in WAL mode - Changes to OpenSSL library cryptographic provider to reduce initialization complexity -- Adjust cipher_integrity_check to skip locking page to avoid a spurious error report for very large databases +- Adjust `cipher_integrity_check` to skip locking page to avoid a spurious error report for very large databases - Miscellaneous code and comment cleanup ## [4.4.1] - (October 2020 - [4.4.1 changes]) - Updates baseline to upstream SQLite 3.33.0 -- Fixes double-free bug in cipher_default_plaintext_header_size +- Fixes double-free bug in `cipher_default_plaintext_header_size` - Changes SQLCipher tests to use suite runner -- Improvement to cipher_integrity_check tests to minimize false negatives -- Deprecates PRAGMA cipher_store_pass +- Improvement to `cipher_integrity_check` tests to minimize false negatives +- Deprecates `PRAGMA cipher_store_pass` ## [4.4.0] - (May 2020 - [4.4.0 changes]) - Updates baseline to upstream SQLite 3.31.0 @@ -46,40 +163,40 @@ All notable changes to this project will be documented in this file. ## [4.3.0] - (November 2019 - [4.3.0 changes]) - Updates baseline to upstream SQLite 3.30.1 -- PRAGMA key now returns text result value "ok" after execution +- `PRAGMA key` now returns text result value "ok" after execution - Adjusts backup API so that encrypted to encrypted backups are permitted - Adds NSS crypto provider implementation - Fixes OpenSSL provider compatibility with BoringSSL - Separates memory related traces to reduce verbosity of logging -- Fixes output of PRAGMA cipher_integrity_check on big endian platforms -- Cryptograpic provider interface cleanup +- Fixes output of `PRAGMA cipher_integrity_check` on big endian platforms +- Cryptographic provider interface cleanup - Rework of mutex allocation and management - Resolves miscellaneous build warnings - Force error state at database pager level if SQLCipher initialization fails ## [4.2.0] - (May 2019 - [4.2.0 changes]) -- Adds PRAGMA cipher_integrity_check to perform independent verification of page HMACs +- Adds `PRAGMA cipher_integrity_check` to perform independent verification of page HMACs - Updates baseline to upstream SQLite 3.28.0 -- Improves PRAGMA cipher_migrate to handle keys containing non-terminating zero bytes +- Improves `PRAGMA cipher_migrate` to handle keys containing non-terminating zero bytes ## [4.1.0] - (March 2019 - [4.1.0 changes]) - Defer reading salt from header until key derivation is triggered -- Clarify usage of sqlite3_rekey for plaintext databases in header +- Clarify usage of `sqlite3_rekey` for plaintext databases in header - Normalize attach behavior when key is not yet derived -- Adds PRAGMA cipher_settings to query current database codec settings -- Adds PRAGMA cipher_default_settings to query current default SQLCipher options -- PRAGMA cipher_hmac_pgno is now deprecated -- PRAGMA cipher_hmac_salt_mask is now deprecated -- PRAGMA fast_kdf_iter is now deprecated -- Improve sqlcipher_export routine and restore all database flags -- Clear codec data buffers if a crypographic provider operation fails +- Adds `PRAGMA cipher_settings` to query current database codec settings +- Adds `PRAGMA cipher_default_settings` to query current default SQLCipher options +- `PRAGMA cipher_hmac_pgno` is now deprecated +- `PRAGMA cipher_hmac_salt_mask` is now deprecated +- `PRAGMA fast_kdf_iter` is now deprecated +- Improve `sqlcipher_export` routine and restore all database flags +- Clear codec data buffers if a cryptographic provider operation fails - Disable backup API for encrypted databases (this was previously documented as not-working and non-supported, but will now explicitly error out on initialization) - Updates baseline to upstream SQLite 3.27.2 ## [4.0.1] - (December 2018 - [4.0.1 changes]) - Based on upstream SQLite 3.26.0 (addresses SQLite “Magellan” issue) -- Adds PRAGMA cipher_compatibility and cipher_default_compatibility which take automatcially configure appropriate compatibility settings for the specified SQLCipher major version number -- Filters attach statements with KEY parameters from readline history +- Adds `PRAGMA cipher_compatibility` and `PRAGMA cipher_default_compatibility` which automatically configure appropriate compatibility settings for the specified SQLCipher major version number +- Filters attach statements with `KEY` parameters from readline history - Fixes crash in command line shell with empty input (i.e. ^D) - Fixes warnings when compiled with strict-prototypes @@ -89,32 +206,32 @@ All notable changes to this project will be documented in this file. - Default PBKDF2 iterations increased to 256,000 (up from 64,000) * - Default KDF algorithm is now PBKDF2-HMAC-SHA512 (from PBKDF2-HMAC-SHA1) * - Default HMAC algorithm is now HMAC-SHA512 (from HMAC-SHA1) * -- PRAGMA cipher is now disabled and no longer supported (after multi-year deprecation) * -- PRAGMA rekey_cipher is now disabled and no longer supported * -- PRAGMA rekey_kdf_iter is now disabled and no longer supported * -- By default all memory allocated internally by SQLite before the memory is wiped before it is freed -- PRAGMA cipher_memory_security: allows full memory wiping to be disabled for performance when the feature is not required -- PRAGMA cipher_kdf_algorithm, cipher_default_kdf_algorithm to control KDF algorithm selection between PBKDF2-HMAC-SHA1, PBKDF2-HMAC-SHA256 and PBKDF2-HMAC-SHA512 -- PRAGMA cipher_hmac_algorithm, cipher_default_hmac_algorithm to control HMAC algorithm selection between HMAC-SHA1, HMAC-SHA256 and PBKDF2-HMAC-SHA512 +- `PRAGMA cipher` is now disabled and no longer supported (after multi-year deprecation) * +- `PRAGMA rekey_cipher` is now disabled and no longer supported * +- `PRAGMA rekey_kdf_iter` is now disabled and no longer supported * +- By default all memory allocated internally by SQLite is wiped before it is freed +- `PRAGMA cipher_memory_security`: allows full memory wiping to be disabled for performance when the feature is not required +- `PRAGMA cipher_kdf_algorithm`, `PRAGMA cipher_default_kdf_algorithm` to control KDF algorithm selection between PBKDF2-HMAC-SHA1, PBKDF2-HMAC-SHA256 and PBKDF2-HMAC-SHA512 +- `PRAGMA cipher_hmac_algorithm`, `PRAGMA cipher_default_hmac_algorithm` to control HMAC algorithm selection between HMAC-SHA1, HMAC-SHA256 and PBKDF2-HMAC-SHA512 - Based on upstream SQLite 3.25.2 -- When compiled with readline support, PRAGMA key and rekey lines will no longer be +- When compiled with readline support, `PRAGMA key` and `PRAGMA rekey` lines will no longer be saved to history -- Adds second optional parameter to sqlcipher_export to specify source database to +- Adds second optional parameter to `sqlcipher_export` to specify source database to support bidirectional exports - Fixes compatibility with LibreSSL 2.7.0+ - Fixes compatibility with OpenSSL 1.1.x -- Simplified and improved performance for PRAGMA cipher_migrate when migrating older database versions +- Simplified and improved performance for `PRAGMA cipher_migrate` when migrating older database versions - Refactoring of SQLCipher tests into separate files by test type -- PRAGMA cipher_plaintext_header_size and cipher_default_plaintext_header_size: allocates a portion of the database header which will not be encrypted to allow identification as a SQLite database -- PRAGMA cipher_salt: retrieve or set the salt value for the database +- `PRAGMA cipher_plaintext_header_size` and `PRAGMA cipher_default_plaintext_header_size`: allocates a portion of the database header which will not be encrypted to allow identification as a SQLite database +- `PRAGMA cipher_salt`: retrieve or set the salt value for the database - Adds Podspec for using tagged versions of SQLCipher -- Define SQLCIPHER_PROFILE_USE_FOPEN for WinXP support +- Define `SQLCIPHER_PROFILE_USE_FOPEN` for WinXP support - Improved error handling for cryptographic providers -- Improved memory handling for PRAGMA commands that return values +- Improved memory handling for `PRAGMA` commands that return values - Improved version reporting to assist with identification of distribution - Major rewrite and simplification of internal codec and pager extension -- Fixes compilation with --disable-amalgamation -- Removes sqlcipher.xcodeproj build support +- Fixes compilation with `--disable-amalgamation` +- Removes `sqlcipher.xcodeproj` build support ## [3.4.2] - (December 2017 - [3.4.2 changes]) ### Added @@ -126,7 +243,7 @@ All notable changes to this project will be documented in this file. - Remove static modifier for codec password functions - Page alignment for `mlock` - Fix segfault in `sqlcipher_cipher_ctx_cmp` during rekey operation -- Fix `sqlcipher_export` and `cipher_migrate` when tracing API in use +- Fix `sqlcipher_export` and `PRAGMA cipher_migrate` when tracing API in use - Validate codec page size when setting - Guard OpenSSL initialization and cleanup routines - Allow additional linker options to be passed via command line for Windows platforms @@ -169,7 +286,7 @@ All notable changes to this project will be documented in this file. ### Changed - Merged upstream SQLite 3.8.6 -- Renmed README to README.md +- Renamed `README` to `README.md` ## [3.1.0] - (April 2014 - [3.1.0 changes]) ### Added @@ -192,7 +309,7 @@ All notable changes to this project will be documented in this file. ### Changed - Merged upstream SQLite 3.8.0.2 -- Remove usage of VirtualLock/Unlock on WinRT and Windows Phone +- Remove usage of `VirtualLock/Unlock` on WinRT and Windows Phone - Ignore HMAC read during Btree file copy - Fix lib naming for pkg-config - Use _v2 version of `sqlite3_key` and `sqlite3_rekey` @@ -201,7 +318,34 @@ All notable changes to this project will be documented in this file. ### Security - Change KDF iteration length from 4,000 to 64,000 -[unreleased]: https://github.com/sqlcipher/sqlcipher/compare/v4.5.1...prerelease +[4.12.0]: https://github.com/sqlcipher/sqlcipher/tree/v4.12.0 +[4.12.0 changes]: https://github.com/sqlcipher/sqlcipher/compare/v4.11.0...v4.12.0 +[4.11.0]: https://github.com/sqlcipher/sqlcipher/tree/v4.11.0 +[4.11.0 changes]: https://github.com/sqlcipher/sqlcipher/compare/v4.10.0...v4.11.0 +[4.10.0]: https://github.com/sqlcipher/sqlcipher/tree/v4.10.0 +[4.10.0 changes]: https://github.com/sqlcipher/sqlcipher/compare/v4.9.0...v4.10.0 +[4.9.0]: https://github.com/sqlcipher/sqlcipher/tree/v4.9.0 +[4.9.0 changes]: https://github.com/sqlcipher/sqlcipher/compare/v4.8.0...v4.9.0 +[4.8.0]: https://github.com/sqlcipher/sqlcipher/tree/v4.8.0 +[4.8.0 changes]: https://github.com/sqlcipher/sqlcipher/compare/v4.7.0...v4.8.0 +[4.7.0]: https://github.com/sqlcipher/sqlcipher/tree/v4.7.0 +[4.7.0 changes]: https://github.com/sqlcipher/sqlcipher/compare/v4.6.1...v4.7.0 +[4.6.1]: https://github.com/sqlcipher/sqlcipher/tree/v4.6.1 +[4.6.1 changes]: https://github.com/sqlcipher/sqlcipher/compare/v4.6.0...v4.6.1 +[4.6.0]: https://github.com/sqlcipher/sqlcipher/tree/v4.6.0 +[4.6.0 changes]: https://github.com/sqlcipher/sqlcipher/compare/v4.5.7...v4.6.0 +[4.5.7]: https://github.com/sqlcipher/sqlcipher/tree/v4.5.7 +[4.5.7 changes]: https://github.com/sqlcipher/sqlcipher/compare/v4.5.6...v4.5.7 +[4.5.6]: https://github.com/sqlcipher/sqlcipher/tree/v4.5.6 +[4.5.6 changes]: https://github.com/sqlcipher/sqlcipher/compare/v4.5.5...v4.5.6 +[4.5.5]: https://github.com/sqlcipher/sqlcipher/tree/v4.5.5 +[4.5.5 changes]: https://github.com/sqlcipher/sqlcipher/compare/v4.5.4...v4.5.5 +[4.5.4]: https://github.com/sqlcipher/sqlcipher/tree/v4.5.4 +[4.5.4 changes]: https://github.com/sqlcipher/sqlcipher/compare/v4.5.3...v4.5.4 +[4.5.3]: https://github.com/sqlcipher/sqlcipher/tree/v4.5.3 +[4.5.3 changes]: https://github.com/sqlcipher/sqlcipher/compare/v4.5.2...v4.5.3 +[4.5.2]: https://github.com/sqlcipher/sqlcipher/tree/v4.5.2 +[4.5.2 changes]: https://github.com/sqlcipher/sqlcipher/compare/v4.5.1...v4.5.2 [4.5.1]: https://github.com/sqlcipher/sqlcipher/tree/v4.5.1 [4.5.1 changes]: https://github.com/sqlcipher/sqlcipher/compare/v4.5.0...v4.5.1 [4.5.0]: https://github.com/sqlcipher/sqlcipher/tree/v4.5.0 diff --git a/LICENSE.md b/LICENSE.md index f68a6c175f..3f71443161 100644 --- a/LICENSE.md +++ b/LICENSE.md @@ -1,6 +1,24 @@ -The author disclaims copyright to this source code. In place of -a legal notice, here is a blessing: +Copyright (c) 2025, ZETETIC LLC +All rights reserved. - * May you do good and not evil. - * May you find forgiveness for yourself and forgive others. - * May you share freely, never taking more than you give. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of the ZETETIC LLC nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY ZETETIC LLC ''AS IS'' AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL ZETETIC LLC BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/LICENSE b/LICENSE.txt similarity index 97% rename from LICENSE rename to LICENSE.txt index bebe1e7eb1..3f71443161 100644 --- a/LICENSE +++ b/LICENSE.txt @@ -1,4 +1,4 @@ -Copyright (c) 2008, ZETETIC LLC +Copyright (c) 2025, ZETETIC LLC All rights reserved. Redistribution and use in source and binary forms, with or without diff --git a/Makefile.in b/Makefile.in index ad4be7b867..a597a86a35 100644 --- a/Makefile.in +++ b/Makefile.in @@ -1,130 +1,230 @@ -#!/usr/make +#!/usr/bin/make +# ^^^^ help out editors which guess this file's type. # # Makefile for SQLITE # -# This makefile is suppose to be configured automatically using the -# autoconf. But if that does not work for you, you can configure -# the makefile manually. Just set the parameters below to values that -# work well for your system. +# This makefile is intended to be configured automatically using the +# configure script. # -# If the configure script does not work out-of-the-box, you might -# be able to get it to work by giving it some hints. See the comment -# at the beginning of configure.in for additional information. +# The docs for many of its variables are in the primary static +# makefile, main.mk (which this one includes at runtime). # - -# The toplevel directory of the source tree. This is the directory -# that contains this "Makefile.in" and the "configure.in" script. +all: +######################################################################## +# Maintenance reminders: # -TOP = @abs_srcdir@ - - -# C Compiler and options for use in building executables that -# will run on the platform that is doing the build. +# - This makefile should remain as POSIX-make-compatible as possible: +# https://pubs.opengroup.org/onlinepubs/9799919799/utilities/make.html # -BCC = @BUILD_CC@ @BUILD_CFLAGS@ - -# TCC is the C Compile and options for use in building executables that -# will run on the target platform. (BCC and TCC are usually the -# same unless your are cross-compiling.) Separate CC and CFLAGS macros -# are provide so that these aspects of the build process can be changed -# on the "make" command-line. Ex: "make CC=clang CFLAGS=-fsanitize=undefined" +# - The naming convention of some vars, using periods instead of +# underscores, though unconventional, was selected for a couple of +# reasons: 1) Personal taste (for which there is no accounting). 2) +# It is thought to help defend against inadvertent injection of +# those vars via environment variables (because X.Y is not a legal +# environment variable name). "Feature or bug?" is debatable and +# this naming convention may be reverted if it causes any grief. # -CC = @CC@ -CFLAGS = @CPPFLAGS@ @CFLAGS@ -TCC = ${CC} ${CFLAGS} -I. -I${TOP}/src -I${TOP}/ext/rtree -I${TOP}/ext/icu -TCC += -I${TOP}/ext/fts3 -I${TOP}/ext/async -I${TOP}/ext/session -TCC += -I${TOP}/ext/userauth -# Define this for the autoconf-based build, so that the code knows it can -# include the generated config.h # -TCC += -D_HAVE_SQLITE_CONFIG_H -DBUILD_sqlite - -# Define -DNDEBUG to compile without debugging (i.e., for production usage) -# Omitting the define will cause extra debugging code to be inserted and -# includes extra comments when "EXPLAIN stmt" is used. +# The top-most directory of the source tree. This is the directory +# that contains this "Makefile.in" and the "configure" script. # -TCC += @TARGET_DEBUG@ +TOP = @abs_top_srcdir@ -# Compiler options needed for programs that use the TCL library. # -TCC += @TCL_INCLUDE_SPEC@ - -# The library that programs using TCL must link against. +# Autotools-conventional vars which are used by package installation +# rules in main.mk. To get sane handling when a user overrides only +# a subset of these, we perform some acrobatics with these vars +# in the configure script: see [proj-remap-autoconf-dir-vars] for +# full details. # -LIBTCL = @TCL_LIB_SPEC@ - -# Compiler options needed for programs that use the readline() library. +# For completeness's sake, the aforementioned conventional vars which +# are relevant to our installation rules are: # -READLINE_FLAGS = -DHAVE_READLINE=@TARGET_HAVE_READLINE@ @TARGET_READLINE_INC@ -READLINE_FLAGS += -DHAVE_EDITLINE=@TARGET_HAVE_EDITLINE@ - -# The library that programs using readline() must link against. +# datadir = $(prefix)/share +# mandir = $(datadir)/man +# includedir = $(prefix)/include +# exec_prefix = $(prefix) +# bindir = $(exec_prefix)/bin +# libdir = $(exec_prefix)/lib # -LIBREADLINE = @TARGET_READLINE_LIBS@ - -# Should the database engine be compiled threadsafe +# Our builds do not require any of their relatives: +# +# sbindir = $(exec_prefix)/sbin +# sysconfdir = /etc +# sharedstatedir = $(prefix)/com +# localstatedir = /var +# runstatedir = /run +# infodir = $(datadir)/info +# libexecdir = $(exec_prefix)/libexec # -TCC += -DSQLITE_THREADSAFE=@SQLITE_THREADSAFE@ +prefix = @prefix@ +datadir = @datadir@ +mandir = @mandir@ +includedir = @includedir@ +exec_prefix = @exec_prefix@ +bindir = @bindir@ +libdir = @libdir@ -# Any target libraries which libsqlite must be linked against +INSTALL = @BIN_INSTALL@ +AR = @AR@ +AR.flags = cr +CC = @CC@ +B.cc = @CC_FOR_BUILD@ @BUILD_CFLAGS@ +T.cc = $(CC) +# +# $(CFLAGS) is problematic because it is frequently overridden when +# invoking make, which loses things like -fPIC. So... we avoid using +# it directly and instead add a level of indirection. We combine +# $(CFLAGS) and $(CPPFLAGS) here because that's the way the legacy +# build did it and many builds rely on that. See main.mk for more +# details. # -TLIBS = @LIBS@ $(LIBS) +# Historical note: the pre-3.48 build only honored CPPFLAGS at +# configure-time, and expanded them into the generated Makefile. There +# are, in that build, no uses of CPPFLAGS in the configure-expanded +# Makefile. Ergo: if a client configures with CPPFLAGS=... and then +# explicitly passes CFLAGS=... to make, the CPPFLAGS will be +# lost. That behavior is retained in 3.48+. +# +CFLAGS = @CFLAGS@ @CPPFLAGS@ +# +# $(LDFLAGS.configure) represents any LDFLAGS=... the client passes to +# configure. See main.mk. +# +LDFLAGS.configure = @LDFLAGS@ -# Flags controlling use of the in memory btree implementation # -# SQLITE_TEMP_STORE is 0 to force temporary tables to be in a file, 1 to -# default to file, 2 to default to memory, and 3 to force temporary -# tables to always be in memory. +# CFLAGS.core is documented in main.mk. # -TEMP_STORE = -DSQLITE_TEMP_STORE=@TEMP_STORE@ +CFLAGS.core = @SH_CFLAGS@ +LDFLAGS.shlib = @SH_LDFLAGS@ +LDFLAGS.zlib = @LDFLAGS_ZLIB@ +LDFLAGS.math = @LDFLAGS_MATH@ +LDFLAGS.rpath = @LDFLAGS_RPATH@ +LDFLAGS.pthread = @LDFLAGS_PTHREAD@ +LDFLAGS.dlopen = @LDFLAGS_DLOPEN@ +LDFLAGS.readline = @LDFLAGS_READLINE@ +CFLAGS.readline = @CFLAGS_READLINE@ +LDFLAGS.icu = @LDFLAGS_ICU@ +LDFLAGS.rt = @LDFLAGS_RT@ +CFLAGS.icu = @CFLAGS_ICU@ +LDFLAGS.libsqlite3.soname = @LDFLAGS_LIBSQLITE3_SONAME@ +# soname: see https://sqlite.org/src/forumpost/5a3b44f510df8ded +LDFLAGS.libsqlite3.os-specific = \ + @LDFLAGS_MAC_CVERSION@ @LDFLAGS_MAC_INSTALL_NAME@ @LDFLAGS_OUT_IMPLIB@ +# os-specific: see +# - https://sqlite.org/forum/forumpost/9dfd5b8fd525a5d7 +# - https://sqlite.org/forum/forumpost/0c7fc097b2 +# - https://sqlite.org/forum/forumpost/5651662b8875ec0a + +libsqlite3.DLL.basename = @SQLITE_DLL_BASENAME@ +# DLL.basename: see https://sqlite.org/forum/forumpost/828fdfe904 +libsqlite3.out.implib = @SQLITE_OUT_IMPLIB@ +# libsqlite3.out.implib => the output filename part of LDFLAGS_OUT_IMPLIB. +ENABLE_LIB_SHARED = @ENABLE_LIB_SHARED@ +ENABLE_LIB_STATIC = @ENABLE_LIB_STATIC@ +HAVE_WASI_SDK = @HAVE_WASI_SDK@ +libsqlite3.DLL.install-rules = @SQLITE_DLL_INSTALL_RULES@ -# Enable/disable loadable extensions, and other optional features -# based on configuration. (-DSQLITE_OMIT*, -DSQLITE_ENABLE*). -# The same set of OMIT and ENABLE flags should be passed to the -# LEMON parser generator and the mkkeywordhash tool as well. -OPT_FEATURE_FLAGS = @OPT_FEATURE_FLAGS@ +# -fsanitize flags for the fuzzcheck-asap app +CFLAGS.fuzzcheck-asan.fsanitize = @CFLAGS_ASAN_FSANITIZE@ -TCC += $(OPT_FEATURE_FLAGS) +# +# Intended to either be empty or be set to -g -DSQLITE_DEBUG=1. +# +T.cc.TARGET_DEBUG = @TARGET_DEBUG@ -# Add in any optional parameters specified on the make commane line -# ie. make "OPTS=-DSQLITE_ENABLE_FOO=1 -DSQLITE_OMIT_FOO=1". -TCC += $(OPTS) +# +# $(JIMSH) and $(CFLAGS.jimsh) are documented in main.mk. $(JIMSH) +# must start with a path component so that it can be invoked as a +# shell command. +# +CFLAGS.jimsh = @CFLAGS_JIMSH@ +JIMSH = ./jimsh$(T.exe) -# Add in compile-time options for some libraries used by extensions -TCC += @HAVE_ZLIB@ +# +# $(B.tclsh) is documented in main.mk. +# +B.tclsh = @BTCLSH@ +$(B.tclsh): + +# +# $(OPT_FEATURE_FLAGS) is documented in main.mk. +# +# The appending of $(OPTIONS) to $(OPT_FEATURE_FLAGS) is historical +# and somewhat confusing because there's another var, $(OPTS), which +# has a similar (but not identical) role. +# +OPT_FEATURE_FLAGS = @OPT_FEATURE_FLAGS@ $(OPTIONS) -# Version numbers and release number for the SQLite being compiled. # -VERSION = @VERSION@ -VERSION_NUMBER = @VERSION_NUMBER@ -RELEASE = @RELEASE@ +# Version (X.Y.Z) number for the SQLite being compiled. +# +PACKAGE_VERSION = @PACKAGE_VERSION@ -# Filename extensions # -BEXE = @BUILD_EXEEXT@ -TEXE = @TARGET_EXEEXT@ +# Filename extensions for binaries and libraries +# +B.exe = @BUILD_EXEEXT@ +T.exe = @TARGET_EXEEXT@ +B.dll = @BUILD_DLLEXT@ +T.dll = @TARGET_DLLEXT@ +B.lib = @BUILD_LIBEXT@ +T.lib = @TARGET_LIBEXT@ -# The following variable is "1" if the configure script was able to locate -# the tclConfig.sh file. It is an empty string otherwise. When this -# variable is "1", the TCL extension library (libtclsqlite3.so) is built -# and installed. +# +# $(HAVE_TCL) is 1 if the configure script was able to locate the +# tclConfig.sh file, else it is 0. When this variable is 1, the TCL +# extension library (libtclsqlite3.so) and related testing apps are +# built. # HAVE_TCL = @HAVE_TCL@ -# This is the command to use for tclsh - normally just "tclsh", but we may -# know the specific version we want to use +# +# $(TCLSH_CMD) is the command to use for tclsh - normally just +# "tclsh", but we may know the specific version we want to use. This +# must point to a canonical TCL interpreter, not JimTCL. # TCLSH_CMD = @TCLSH_CMD@ +TCL_CONFIG_SH = @TCL_CONFIG_SH@ + +# +# TCL config info from tclConfig.sh +# +# We have to inject this differently in main.mk to accommodate static +# makefiles, so we don't currently bother to export it here. This +# block is retained in case we decide that we do indeed need to export +# it at configure-time instead of calculate it at make-time. +# +#TCL_INCLUDE_SPEC = @TCL_INCLUDE_SPEC@ +#TCL_LIB_SPEC = @TCL_LIB_SPEC@ +#TCL_STUB_LIB_SPEC = @TCL_STUB_LIB_SPEC@ +#TCL_EXEC_PREFIX = @TCL_EXEC_PREFIX@ +#TCL_VERSION = @TCL_VERSION@ +TCL_MAJOR_VERSION = @TCL_MAJOR_VERSION@ +# ^^^ main.mk optionally uses this for determining the Tcl extension's +# DLL name. +TCL_EXT_DLL_BASENAME = @TCL_EXT_DLL_BASENAME@ +# ^^^ base name of the Tcl extension DLL. It varies by platform and +# Tcl version. -# Where do we want to install the tcl plugin +# +# $(TCLLIBDIR) = where to install the tcl plugin. If this is empty, it +# is calculated at make-time by the targets which need it but we +# export it here so that it can be set at configure-time, so that +# clients are not required to pass it at make-time, or may set it in +# their environment to override it. # TCLLIBDIR = @TCLLIBDIR@ -# The suffix used on shared libraries. Ex: ".dll", ".so", ".dylib" # -SHLIB_SUFFIX = @TCL_SHLIB_SUFFIX@ +# Additional options when running tests using testrunner.tcl +# This is usually either blank or --status. +# +TSTRNNR_OPTS = @TSTRNNR_OPTS@ +# # If gcov support was enabled by the configure script, add the appropriate # flags here. It's not always as easy as just having the user add the right # CFLAGS / LDFLAGS, because libtool wants to use CFLAGS when linking, which @@ -136,1415 +236,104 @@ SHLIB_SUFFIX = @TCL_SHLIB_SUFFIX@ # # for more info. # -GCOV_CFLAGS1 = -DSQLITE_COVERAGE_TEST=1 -fprofile-arcs -ftest-coverage -GCOV_LDFLAGS1 = -lgcov +CFLAGS.gcov1 = -DSQLITE_COVERAGE_TEST=1 -fprofile-arcs -ftest-coverage +LDFLAGS.gcov1 = -lgcov USE_GCOV = @USE_GCOV@ -LTCOMPILE_EXTRAS += $(GCOV_CFLAGS$(USE_GCOV)) -LTLINK_EXTRAS += $(GCOV_LDFLAGS$(USE_GCOV)) - -# BEGIN CRYPTO -CRYPTOLIBOBJ = \ - crypto.lo \ - crypto_impl.lo \ - crypto_openssl.lo \ - crypto_libtomcrypt.lo \ - crypto_nss.lo \ - crypto_cc.lo - -CRYPTOSRC = \ - $(TOP)/src/crypto.h \ - $(TOP)/src/sqlcipher.h \ - $(TOP)/src/crypto.c \ - $(TOP)/src/crypto_impl.c \ - $(TOP)/src/crypto_libtomcrypt.c \ - $(TOP)/src/crypto_nss.c \ - $(TOP)/src/crypto_openssl.c \ - $(TOP)/src/crypto_cc.c +T.compile.gcov = $(CFLAGS.gcov$(USE_GCOV)) +T.link.gcov = $(LDFLAGS.gcov$(USE_GCOV)) -# END CRYPTO - -# The directory into which to store package information for - -# Some standard variables and programs # -prefix = @prefix@ -exec_prefix = @exec_prefix@ -libdir = @libdir@ -pkgconfigdir = $(libdir)/pkgconfig -bindir = @bindir@ -includedir = @includedir@/sqlcipher -INSTALL = @INSTALL@ -LIBTOOL = ./libtool -ALLOWRELEASE = @ALLOWRELEASE@ - -# libtool compile/link/install -LTCOMPILE = $(LIBTOOL) --mode=compile --tag=CC $(TCC) $(LTCOMPILE_EXTRAS) -LTLINK = $(LIBTOOL) --mode=link $(TCC) $(LTCOMPILE_EXTRAS) @LDFLAGS@ $(LTLINK_EXTRAS) -LTINSTALL = $(LIBTOOL) --mode=install $(INSTALL) - -# You should not have to change anything below this line -############################################################################### - -USE_AMALGAMATION = @USE_AMALGAMATION@ -AMALGAMATION_LINE_MACROS = @AMALGAMATION_LINE_MACROS@ - -# Object files for the SQLite library (non-amalgamation). -# -LIBOBJS0 = alter.lo analyze.lo attach.lo auth.lo \ - backup.lo bitvec.lo btmutex.lo btree.lo build.lo \ - callback.lo complete.lo ctime.lo \ - date.lo dbpage.lo dbstat.lo delete.lo \ - expr.lo fault.lo fkey.lo \ - fts3.lo fts3_aux.lo fts3_expr.lo fts3_hash.lo fts3_icu.lo \ - fts3_porter.lo fts3_snippet.lo fts3_tokenizer.lo fts3_tokenizer1.lo \ - fts3_tokenize_vtab.lo \ - fts3_unicode.lo fts3_unicode2.lo fts3_write.lo \ - fts5.lo \ - func.lo global.lo hash.lo \ - icu.lo insert.lo json1.lo legacy.lo loadext.lo \ - main.lo malloc.lo mem0.lo mem1.lo mem2.lo mem3.lo mem5.lo \ - memdb.lo memjournal.lo \ - mutex.lo mutex_noop.lo mutex_unix.lo mutex_w32.lo \ - notify.lo opcodes.lo os.lo os_unix.lo os_win.lo \ - pager.lo parse.lo pcache.lo pcache1.lo pragma.lo prepare.lo printf.lo \ - random.lo resolve.lo rowset.lo rtree.lo \ - sqlite3session.lo select.lo sqlite3rbu.lo status.lo stmt.lo \ - table.lo threads.lo tokenize.lo treeview.lo trigger.lo \ - update.lo userauth.lo upsert.lo util.lo vacuum.lo \ - vdbe.lo vdbeapi.lo vdbeaux.lo vdbeblob.lo vdbemem.lo vdbesort.lo \ - vdbetrace.lo vdbevtab.lo \ - wal.lo walker.lo where.lo wherecode.lo whereexpr.lo \ - window.lo utf.lo vtab.lo $(CRYPTOLIBOBJ) - -# Object files for the amalgamation. +# Vars with the AS_ prefix are specifically related to AutoSetup. # -LIBOBJS1 = sqlite3.lo - -# Determine the real value of LIBOBJ based on the 'configure' script +# AS_AUTO_DEF is the main configure script. # -LIBOBJ = $(LIBOBJS$(USE_AMALGAMATION)) - - -# All of the source code files. -# -SRC = \ - $(CRYPTOSRC) \ - $(TOP)/src/alter.c \ - $(TOP)/src/analyze.c \ - $(TOP)/src/attach.c \ - $(TOP)/src/auth.c \ - $(TOP)/src/backup.c \ - $(TOP)/src/bitvec.c \ - $(TOP)/src/btmutex.c \ - $(TOP)/src/btree.c \ - $(TOP)/src/btree.h \ - $(TOP)/src/btreeInt.h \ - $(TOP)/src/build.c \ - $(TOP)/src/callback.c \ - $(TOP)/src/complete.c \ - $(TOP)/src/ctime.c \ - $(TOP)/src/date.c \ - $(TOP)/src/dbpage.c \ - $(TOP)/src/dbstat.c \ - $(TOP)/src/delete.c \ - $(TOP)/src/expr.c \ - $(TOP)/src/fault.c \ - $(TOP)/src/fkey.c \ - $(TOP)/src/func.c \ - $(TOP)/src/global.c \ - $(TOP)/src/hash.c \ - $(TOP)/src/hash.h \ - $(TOP)/src/hwtime.h \ - $(TOP)/src/insert.c \ - $(TOP)/src/legacy.c \ - $(TOP)/src/loadext.c \ - $(TOP)/src/main.c \ - $(TOP)/src/malloc.c \ - $(TOP)/src/mem0.c \ - $(TOP)/src/mem1.c \ - $(TOP)/src/mem2.c \ - $(TOP)/src/mem3.c \ - $(TOP)/src/mem5.c \ - $(TOP)/src/memdb.c \ - $(TOP)/src/memjournal.c \ - $(TOP)/src/msvc.h \ - $(TOP)/src/mutex.c \ - $(TOP)/src/mutex.h \ - $(TOP)/src/mutex_noop.c \ - $(TOP)/src/mutex_unix.c \ - $(TOP)/src/mutex_w32.c \ - $(TOP)/src/notify.c \ - $(TOP)/src/os.c \ - $(TOP)/src/os.h \ - $(TOP)/src/os_common.h \ - $(TOP)/src/os_setup.h \ - $(TOP)/src/os_unix.c \ - $(TOP)/src/os_win.c \ - $(TOP)/src/os_win.h \ - $(TOP)/src/pager.c \ - $(TOP)/src/pager.h \ - $(TOP)/src/parse.y \ - $(TOP)/src/pcache.c \ - $(TOP)/src/pcache.h \ - $(TOP)/src/pcache1.c \ - $(TOP)/src/pragma.c \ - $(TOP)/src/pragma.h \ - $(TOP)/src/prepare.c \ - $(TOP)/src/printf.c \ - $(TOP)/src/random.c \ - $(TOP)/src/resolve.c \ - $(TOP)/src/rowset.c \ - $(TOP)/src/select.c \ - $(TOP)/src/status.c \ - $(TOP)/src/shell.c.in \ - $(TOP)/src/sqlite.h.in \ - $(TOP)/src/sqlite3ext.h \ - $(TOP)/src/sqliteInt.h \ - $(TOP)/src/sqliteLimit.h \ - $(TOP)/src/table.c \ - $(TOP)/src/tclsqlite.c \ - $(TOP)/src/threads.c \ - $(TOP)/src/tokenize.c \ - $(TOP)/src/treeview.c \ - $(TOP)/src/trigger.c \ - $(TOP)/src/utf.c \ - $(TOP)/src/update.c \ - $(TOP)/src/upsert.c \ - $(TOP)/src/util.c \ - $(TOP)/src/vacuum.c \ - $(TOP)/src/vdbe.c \ - $(TOP)/src/vdbe.h \ - $(TOP)/src/vdbeapi.c \ - $(TOP)/src/vdbeaux.c \ - $(TOP)/src/vdbeblob.c \ - $(TOP)/src/vdbemem.c \ - $(TOP)/src/vdbesort.c \ - $(TOP)/src/vdbetrace.c \ - $(TOP)/src/vdbevtab.c \ - $(TOP)/src/vdbeInt.h \ - $(TOP)/src/vtab.c \ - $(TOP)/src/vxworks.h \ - $(TOP)/src/wal.c \ - $(TOP)/src/wal.h \ - $(TOP)/src/walker.c \ - $(TOP)/src/where.c \ - $(TOP)/src/wherecode.c \ - $(TOP)/src/whereexpr.c \ - $(TOP)/src/whereInt.h \ - $(TOP)/src/window.c - -# Source code for extensions -# -SRC += \ - $(TOP)/ext/fts1/fts1.c \ - $(TOP)/ext/fts1/fts1.h \ - $(TOP)/ext/fts1/fts1_hash.c \ - $(TOP)/ext/fts1/fts1_hash.h \ - $(TOP)/ext/fts1/fts1_porter.c \ - $(TOP)/ext/fts1/fts1_tokenizer.h \ - $(TOP)/ext/fts1/fts1_tokenizer1.c -SRC += \ - $(TOP)/ext/fts2/fts2.c \ - $(TOP)/ext/fts2/fts2.h \ - $(TOP)/ext/fts2/fts2_hash.c \ - $(TOP)/ext/fts2/fts2_hash.h \ - $(TOP)/ext/fts2/fts2_icu.c \ - $(TOP)/ext/fts2/fts2_porter.c \ - $(TOP)/ext/fts2/fts2_tokenizer.h \ - $(TOP)/ext/fts2/fts2_tokenizer.c \ - $(TOP)/ext/fts2/fts2_tokenizer1.c -SRC += \ - $(TOP)/ext/fts3/fts3.c \ - $(TOP)/ext/fts3/fts3.h \ - $(TOP)/ext/fts3/fts3Int.h \ - $(TOP)/ext/fts3/fts3_aux.c \ - $(TOP)/ext/fts3/fts3_expr.c \ - $(TOP)/ext/fts3/fts3_hash.c \ - $(TOP)/ext/fts3/fts3_hash.h \ - $(TOP)/ext/fts3/fts3_icu.c \ - $(TOP)/ext/fts3/fts3_porter.c \ - $(TOP)/ext/fts3/fts3_snippet.c \ - $(TOP)/ext/fts3/fts3_tokenizer.h \ - $(TOP)/ext/fts3/fts3_tokenizer.c \ - $(TOP)/ext/fts3/fts3_tokenizer1.c \ - $(TOP)/ext/fts3/fts3_tokenize_vtab.c \ - $(TOP)/ext/fts3/fts3_unicode.c \ - $(TOP)/ext/fts3/fts3_unicode2.c \ - $(TOP)/ext/fts3/fts3_write.c -SRC += \ - $(TOP)/ext/icu/sqliteicu.h \ - $(TOP)/ext/icu/icu.c -SRC += \ - $(TOP)/ext/rtree/rtree.h \ - $(TOP)/ext/rtree/rtree.c \ - $(TOP)/ext/rtree/geopoly.c -SRC += \ - $(TOP)/ext/session/sqlite3session.c \ - $(TOP)/ext/session/sqlite3session.h -SRC += \ - $(TOP)/ext/userauth/userauth.c \ - $(TOP)/ext/userauth/sqlite3userauth.h -SRC += \ - $(TOP)/ext/rbu/sqlite3rbu.h \ - $(TOP)/ext/rbu/sqlite3rbu.c -SRC += \ - $(TOP)/ext/misc/json1.c \ - $(TOP)/ext/misc/stmt.c - -# Generated source code files -# -SRC += \ - keywordhash.h \ - opcodes.c \ - opcodes.h \ - parse.c \ - parse.h \ - config.h \ - shell.c \ - sqlite3.h - -# Source code to the test files. -# -TESTSRC = \ - $(TOP)/src/test1.c \ - $(TOP)/src/test2.c \ - $(TOP)/src/test3.c \ - $(TOP)/src/test4.c \ - $(TOP)/src/test5.c \ - $(TOP)/src/test6.c \ - $(TOP)/src/test7.c \ - $(TOP)/src/test8.c \ - $(TOP)/src/test9.c \ - $(TOP)/src/test_autoext.c \ - $(TOP)/src/test_async.c \ - $(TOP)/src/test_backup.c \ - $(TOP)/src/test_bestindex.c \ - $(TOP)/src/test_blob.c \ - $(TOP)/src/test_btree.c \ - $(TOP)/src/test_config.c \ - $(TOP)/src/test_delete.c \ - $(TOP)/src/test_demovfs.c \ - $(TOP)/src/test_devsym.c \ - $(TOP)/src/test_fs.c \ - $(TOP)/src/test_func.c \ - $(TOP)/src/test_hexio.c \ - $(TOP)/src/test_init.c \ - $(TOP)/src/test_intarray.c \ - $(TOP)/src/test_journal.c \ - $(TOP)/src/test_malloc.c \ - $(TOP)/src/test_md5.c \ - $(TOP)/src/test_multiplex.c \ - $(TOP)/src/test_mutex.c \ - $(TOP)/src/test_onefile.c \ - $(TOP)/src/test_osinst.c \ - $(TOP)/src/test_pcache.c \ - $(TOP)/src/test_quota.c \ - $(TOP)/src/test_rtree.c \ - $(TOP)/src/test_schema.c \ - $(TOP)/src/test_server.c \ - $(TOP)/src/test_superlock.c \ - $(TOP)/src/test_syscall.c \ - $(TOP)/src/test_tclsh.c \ - $(TOP)/src/test_tclvar.c \ - $(TOP)/src/test_thread.c \ - $(TOP)/src/test_vdbecov.c \ - $(TOP)/src/test_vfs.c \ - $(TOP)/src/test_windirent.c \ - $(TOP)/src/test_window.c \ - $(TOP)/src/test_wsd.c \ - $(TOP)/ext/fts3/fts3_term.c \ - $(TOP)/ext/fts3/fts3_test.c \ - $(TOP)/ext/session/test_session.c \ - $(TOP)/ext/rbu/test_rbu.c - -# Statically linked extensions -# -TESTSRC += \ - $(TOP)/ext/expert/sqlite3expert.c \ - $(TOP)/ext/expert/test_expert.c \ - $(TOP)/ext/misc/amatch.c \ - $(TOP)/ext/misc/appendvfs.c \ - $(TOP)/ext/misc/carray.c \ - $(TOP)/ext/misc/cksumvfs.c \ - $(TOP)/ext/misc/closure.c \ - $(TOP)/ext/misc/csv.c \ - $(TOP)/ext/misc/decimal.c \ - $(TOP)/ext/misc/eval.c \ - $(TOP)/ext/misc/explain.c \ - $(TOP)/ext/misc/fileio.c \ - $(TOP)/ext/misc/fuzzer.c \ - $(TOP)/ext/fts5/fts5_tcl.c \ - $(TOP)/ext/fts5/fts5_test_mi.c \ - $(TOP)/ext/fts5/fts5_test_tok.c \ - $(TOP)/ext/misc/ieee754.c \ - $(TOP)/ext/misc/mmapwarm.c \ - $(TOP)/ext/misc/nextchar.c \ - $(TOP)/ext/misc/normalize.c \ - $(TOP)/ext/misc/percentile.c \ - $(TOP)/ext/misc/prefixes.c \ - $(TOP)/ext/misc/regexp.c \ - $(TOP)/ext/misc/remember.c \ - $(TOP)/ext/misc/series.c \ - $(TOP)/ext/misc/spellfix.c \ - $(TOP)/ext/misc/totype.c \ - $(TOP)/ext/misc/unionvtab.c \ - $(TOP)/ext/misc/wholenumber.c \ - $(TOP)/ext/misc/zipfile.c \ - $(TOP)/ext/userauth/userauth.c \ - $(TOP)/ext/rtree/test_rtreedoc.c - -# Source code to the library files needed by the test fixture -# -TESTSRC2 = \ - $(TOP)/src/attach.c \ - $(TOP)/src/backup.c \ - $(TOP)/src/bitvec.c \ - $(TOP)/src/btree.c \ - $(TOP)/src/build.c \ - $(TOP)/src/ctime.c \ - $(TOP)/src/date.c \ - $(TOP)/src/dbpage.c \ - $(TOP)/src/dbstat.c \ - $(TOP)/src/expr.c \ - $(TOP)/src/func.c \ - $(TOP)/src/global.c \ - $(TOP)/src/insert.c \ - $(TOP)/src/wal.c \ - $(TOP)/src/main.c \ - $(TOP)/src/mem5.c \ - $(TOP)/src/os.c \ - $(TOP)/src/os_unix.c \ - $(TOP)/src/os_win.c \ - $(TOP)/src/pager.c \ - $(TOP)/src/pragma.c \ - $(TOP)/src/prepare.c \ - $(TOP)/src/printf.c \ - $(TOP)/src/random.c \ - $(TOP)/src/pcache.c \ - $(TOP)/src/pcache1.c \ - $(TOP)/src/select.c \ - $(TOP)/src/tokenize.c \ - $(TOP)/src/utf.c \ - $(TOP)/src/util.c \ - $(TOP)/src/vdbeapi.c \ - $(TOP)/src/vdbeaux.c \ - $(TOP)/src/vdbe.c \ - $(TOP)/src/vdbemem.c \ - $(TOP)/src/vdbetrace.c \ - $(TOP)/src/vdbevtab.c \ - $(TOP)/src/where.c \ - $(TOP)/src/wherecode.c \ - $(TOP)/src/whereexpr.c \ - $(TOP)/src/window.c \ - parse.c \ - $(TOP)/ext/fts3/fts3.c \ - $(TOP)/ext/fts3/fts3_aux.c \ - $(TOP)/ext/fts3/fts3_expr.c \ - $(TOP)/ext/fts3/fts3_term.c \ - $(TOP)/ext/fts3/fts3_tokenizer.c \ - $(TOP)/ext/fts3/fts3_write.c \ - $(TOP)/ext/async/sqlite3async.c \ - $(TOP)/ext/session/sqlite3session.c \ - $(TOP)/ext/misc/stmt.c \ - fts5.c - -# Header files used by all library source files. -# -HDR = \ - $(TOP)/src/btree.h \ - $(TOP)/src/btreeInt.h \ - $(TOP)/src/hash.h \ - $(TOP)/src/hwtime.h \ - keywordhash.h \ - $(TOP)/src/msvc.h \ - $(TOP)/src/mutex.h \ - opcodes.h \ - $(TOP)/src/os.h \ - $(TOP)/src/os_common.h \ - $(TOP)/src/os_setup.h \ - $(TOP)/src/os_win.h \ - $(TOP)/src/pager.h \ - $(TOP)/src/pcache.h \ - parse.h \ - $(TOP)/src/pragma.h \ - sqlite3.h \ - $(TOP)/src/sqlite3ext.h \ - $(TOP)/src/sqliteInt.h \ - $(TOP)/src/sqliteLimit.h \ - $(TOP)/src/vdbe.h \ - $(TOP)/src/vdbeInt.h \ - $(TOP)/src/vxworks.h \ - $(TOP)/src/whereInt.h \ - config.h - -# Header files used by extensions -# -EXTHDR += \ - $(TOP)/ext/fts1/fts1.h \ - $(TOP)/ext/fts1/fts1_hash.h \ - $(TOP)/ext/fts1/fts1_tokenizer.h -EXTHDR += \ - $(TOP)/ext/fts2/fts2.h \ - $(TOP)/ext/fts2/fts2_hash.h \ - $(TOP)/ext/fts2/fts2_tokenizer.h -EXTHDR += \ - $(TOP)/ext/fts3/fts3.h \ - $(TOP)/ext/fts3/fts3Int.h \ - $(TOP)/ext/fts3/fts3_hash.h \ - $(TOP)/ext/fts3/fts3_tokenizer.h -EXTHDR += \ - $(TOP)/ext/rtree/rtree.h \ - $(TOP)/ext/rtree/geopoly.c -EXTHDR += \ - $(TOP)/ext/icu/sqliteicu.h -EXTHDR += \ - $(TOP)/ext/rtree/sqlite3rtree.h -EXTHDR += \ - $(TOP)/ext/userauth/sqlite3userauth.h - -# executables needed for testing -# -TESTPROGS = \ - testfixture$(TEXE) \ - sqlite3$(TEXE) \ - sqlite3_analyzer$(TEXE) \ - sqldiff$(TEXE) \ - dbhash$(TEXE) \ - sqltclsh$(TEXE) - -# Databases containing fuzzer test cases -# -FUZZDATA = \ - $(TOP)/test/fuzzdata1.db \ - $(TOP)/test/fuzzdata2.db \ - $(TOP)/test/fuzzdata3.db \ - $(TOP)/test/fuzzdata4.db \ - $(TOP)/test/fuzzdata5.db \ - $(TOP)/test/fuzzdata6.db \ - $(TOP)/test/fuzzdata7.db \ - $(TOP)/test/fuzzdata8.db - -# Standard options to testfixture +AS_AUTO_DEF = $(TOP)/auto.def # -TESTOPTS = --verbose=file --output=test-out.txt - -# Extra compiler options for various shell tools -# -SHELL_OPT = -DSQLITE_ENABLE_JSON1 -DSQLITE_ENABLE_FTS4 -#SHELL_OPT += -DSQLITE_ENABLE_FTS5 -SHELL_OPT += -DSQLITE_ENABLE_RTREE -SHELL_OPT += -DSQLITE_ENABLE_EXPLAIN_COMMENTS -SHELL_OPT += -DSQLITE_ENABLE_UNKNOWN_SQL_FUNCTION -SHELL_OPT += -DSQLITE_ENABLE_STMTVTAB -SHELL_OPT += -DSQLITE_ENABLE_DBPAGE_VTAB -SHELL_OPT += -DSQLITE_ENABLE_DBSTAT_VTAB -SHELL_OPT += -DSQLITE_ENABLE_BYTECODE_VTAB -SHELL_OPT += -DSQLITE_ENABLE_OFFSET_SQL_FUNC -FUZZERSHELL_OPT = -DSQLITE_ENABLE_JSON1 -FUZZCHECK_OPT = -DSQLITE_ENABLE_JSON1 -DSQLITE_ENABLE_MEMSYS5 -DSQLITE_OSS_FUZZ -FUZZCHECK_OPT += -DSQLITE_MAX_MEMORY=50000000 -FUZZCHECK_OPT += -DSQLITE_PRINTF_PRECISION_LIMIT=1000 -FUZZCHECK_OPT += -DSQLITE_ENABLE_FTS4 -FUZZCHECK_OPT += -DSQLITE_ENABLE_FTS3_PARENTHESIS -FUZZCHECK_OPT += -DSQLITE_ENABLE_FTS5 -FUZZCHECK_OPT += -DSQLITE_ENABLE_RTREE -FUZZCHECK_OPT += -DSQLITE_ENABLE_GEOPOLY -FUZZCHECK_OPT += -DSQLITE_ENABLE_DBSTAT_VTAB -FUZZCHECK_OPT += -DSQLITE_ENABLE_BYTECODE_VTAB -FUZZCHECK_SRC = $(TOP)/test/fuzzcheck.c $(TOP)/test/ossfuzz.c -DBFUZZ_OPT = - -# This is the default Makefile target. The objects listed here -# are what get build when you type just "make" with no arguments. +# Shell commands to re-run $(TOP)/configure with the same args it was +# invoked with to produce this makefile. # -all: sqlite3.h libsqlcipher.la sqlcipher$(TEXE) $(HAVE_TCL:1=libtclsqlite3.la) - -Makefile: $(TOP)/Makefile.in - ./config.status - -sqlcipher.pc: $(TOP)/sqlcipher.pc.in - ./config.status - -libsqlcipher.la: $(LIBOBJ) - $(LTLINK) -no-undefined -o $@ $(LIBOBJ) $(TLIBS) \ - ${ALLOWRELEASE} -rpath "$(libdir)" -version-info "8:6:8" - -libtclsqlite3.la: tclsqlite.lo libsqlcipher.la - $(LTLINK) -no-undefined -o $@ tclsqlite.lo \ - libsqlcipher.la @TCL_STUB_LIB_SPEC@ $(TLIBS) \ - -rpath "$(TCLLIBDIR)" \ - -version-info "8:6:8" \ - -avoid-version - -sqlcipher$(TEXE): shell.c sqlite3.c - $(LTLINK) $(READLINE_FLAGS) $(SHELL_OPT) -o $@ \ - shell.c sqlite3.c \ - $(LIBREADLINE) $(TLIBS) -rpath "$(libdir)" - -sqldiff$(TEXE): $(TOP)/tool/sqldiff.c sqlite3.lo sqlite3.h - $(LTLINK) -o $@ $(TOP)/tool/sqldiff.c sqlite3.lo $(TLIBS) - -dbhash$(TEXE): $(TOP)/tool/dbhash.c sqlite3.lo sqlite3.h - $(LTLINK) -o $@ $(TOP)/tool/dbhash.c sqlite3.lo $(TLIBS) +AS_AUTORECONFIG = @SQLITE_AUTORECONFIG@ +.PHONY: config reconfigure +config reconfigure: + $(AS_AUTORECONFIG) +USE_AMALGAMATION ?= @USE_AMALGAMATION@ +LINK_TOOLS_DYNAMICALLY ?= @LINK_TOOLS_DYNAMICALLY@ +AMALGAMATION_GEN_FLAGS ?= --linemacros=@AMALGAMATION_LINE_MACROS@ +EXTRA_SRC ?= @AMALGAMATION_EXTRA_SRC@ +STATIC_TCLSQLITE3 = @STATIC_TCLSQLITE3@ +STATIC_CLI_SHELL = @STATIC_CLI_SHELL@ -scrub$(TEXE): $(TOP)/ext/misc/scrub.c sqlite3.lo - $(LTLINK) -o $@ -I. -DSCRUB_STANDALONE \ - $(TOP)/ext/misc/scrub.c sqlite3.lo $(TLIBS) - -srcck1$(BEXE): $(TOP)/tool/srcck1.c - $(BCC) -o srcck1$(BEXE) $(TOP)/tool/srcck1.c - -sourcetest: srcck1$(BEXE) sqlite3.c - ./srcck1 sqlite3.c - -fuzzershell$(TEXE): $(TOP)/tool/fuzzershell.c sqlite3.c sqlite3.h - $(LTLINK) -o $@ $(FUZZERSHELL_OPT) \ - $(TOP)/tool/fuzzershell.c sqlite3.c $(TLIBS) - -fuzzcheck$(TEXE): $(FUZZCHECK_SRC) sqlite3.c sqlite3.h - $(LTLINK) -o $@ $(FUZZCHECK_OPT) $(FUZZCHECK_SRC) sqlite3.c $(TLIBS) - -ossshell$(TEXE): $(TOP)/test/ossfuzz.c $(TOP)/test/ossshell.c sqlite3.c sqlite3.h - $(LTLINK) -o $@ $(FUZZCHECK_OPT) $(TOP)/test/ossshell.c \ - $(TOP)/test/ossfuzz.c sqlite3.c $(TLIBS) - -sessionfuzz$(TEXE): $(TOP)/test/sessionfuzz.c sqlite3.c sqlite3.h - $(LTLINK) -o $@ $(TOP)/test/sessionfuzz.c $(TLIBS) - -dbfuzz$(TEXE): $(TOP)/test/dbfuzz.c sqlite3.c sqlite3.h - $(LTLINK) -o $@ $(DBFUZZ_OPT) $(TOP)/test/dbfuzz.c sqlite3.c $(TLIBS) - -DBFUZZ2_OPTS = \ - -DSQLITE_THREADSAFE=0 \ - -DSQLITE_OMIT_LOAD_EXTENSION \ - -DSQLITE_DEBUG \ - -DSQLITE_ENABLE_DBSTAT_VTAB \ - -DSQLITE_ENABLE_BYTECODE_VTAB \ - -DSQLITE_ENABLE_RTREE \ - -DSQLITE_ENABLE_FTS4 \ - -DSQLITE_ENABLE_FTS5 - -dbfuzz2$(TEXE): $(TOP)/test/dbfuzz2.c sqlite3.c sqlite3.h - $(CC) $(OPT_FEATURE_FLAGS) $(OPTS) -I. -g -O0 \ - -DSTANDALONE -o dbfuzz2 \ - $(DBFUZZ2_OPTS) $(TOP)/test/dbfuzz2.c sqlite3.c $(TLIBS) - mkdir -p dbfuzz2-dir - cp $(TOP)/test/dbfuzz2-seed* dbfuzz2-dir - -dbfuzz2-asan: $(TOP)/test/dbfuzz2.c sqlite3.c sqlite3.h - clang-6.0 $(OPT_FEATURE_FLAGS) $(OPTS) -I. -g -O0 \ - -fsanitize=fuzzer,undefined,address -o dbfuzz2-asan \ - $(DBFUZZ2_OPTS) $(TOP)/test/dbfuzz2.c sqlite3.c $(TLIBS) - mkdir -p dbfuzz2-dir - cp $(TOP)/test/dbfuzz2-seed* dbfuzz2-dir - -dbfuzz2-msan: $(TOP)/test/dbfuzz2.c sqlite3.c sqlite3.h - clang-6.0 $(OPT_FEATURE_FLAGS) $(OPTS) -I. -g -O0 \ - -fsanitize=fuzzer,undefined,memory -o dbfuzz2-msan \ - $(DBFUZZ2_OPTS) $(TOP)/test/dbfuzz2.c sqlite3.c $(TLIBS) - mkdir -p dbfuzz2-dir - cp $(TOP)/test/dbfuzz2-seed* dbfuzz2-dir - -mptester$(TEXE): sqlite3.lo $(TOP)/mptest/mptest.c - $(LTLINK) -o $@ -I. $(TOP)/mptest/mptest.c sqlite3.lo \ - $(TLIBS) -rpath "$(libdir)" - -MPTEST1=./mptester$(TEXE) mptest.db $(TOP)/mptest/crash01.test --repeat 20 -MPTEST2=./mptester$(TEXE) mptest.db $(TOP)/mptest/multiwrite01.test --repeat 20 -mptest: mptester$(TEXE) - rm -f mptest.db - $(MPTEST1) --journalmode DELETE - $(MPTEST2) --journalmode WAL - $(MPTEST1) --journalmode WAL - $(MPTEST2) --journalmode PERSIST - $(MPTEST1) --journalmode PERSIST - $(MPTEST2) --journalmode TRUNCATE - $(MPTEST1) --journalmode TRUNCATE - $(MPTEST2) --journalmode DELETE - - -# This target creates a directory named "tsrc" and fills it with -# copies of all of the C source code and header files needed to -# build on the target system. Some of the C source code and header -# files are automatically generated. This target takes care of -# all that automatic generation. -# -.target_source: $(SRC) $(TOP)/tool/vdbe-compress.tcl fts5.c - rm -rf tsrc - mkdir tsrc - cp -f $(SRC) tsrc - rm tsrc/sqlite.h.in tsrc/parse.y - $(TCLSH_CMD) $(TOP)/tool/vdbe-compress.tcl $(OPTS) vdbe.new - mv vdbe.new tsrc/vdbe.c - cp fts5.c fts5.h tsrc - touch .target_source - -sqlite3.c: .target_source $(TOP)/tool/mksqlite3c.tcl - $(TCLSH_CMD) $(TOP)/tool/mksqlite3c.tcl $(AMALGAMATION_LINE_MACROS) - cp tsrc/sqlite3ext.h . - cp $(TOP)/ext/session/sqlite3session.h . - -sqlite3ext.h: .target_source - cp tsrc/sqlite3ext.h . - -tclsqlite3.c: sqlite3.c - echo '#ifndef USE_SYSTEM_SQLITE' >tclsqlite3.c - cat sqlite3.c >>tclsqlite3.c - echo '#endif /* USE_SYSTEM_SQLITE */' >>tclsqlite3.c - cat $(TOP)/src/tclsqlite.c >>tclsqlite3.c - -sqlite3-all.c: sqlite3.c $(TOP)/tool/split-sqlite3c.tcl - $(TCLSH_CMD) $(TOP)/tool/split-sqlite3c.tcl - -# Rule to build the amalgamation # -sqlite3.lo: sqlite3.c - $(LTCOMPILE) $(TEMP_STORE) -c sqlite3.c - -# Rules to build the LEMON compiler generator -# -lemon$(BEXE): $(TOP)/tool/lemon.c $(TOP)/tool/lempar.c - $(BCC) -o $@ $(TOP)/tool/lemon.c - cp $(TOP)/tool/lempar.c . - -# Rules to build the program that generates the source-id +# CFLAGS for sqlite3$(T.exe) # -mksourceid$(BEXE): $(TOP)/tool/mksourceid.c - $(BCC) -o $@ $(TOP)/tool/mksourceid.c - -# Rules to build individual *.o files from generated *.c files. This -# applies to: -# -# parse.o -# opcodes.o -# -parse.lo: parse.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c parse.c - -opcodes.lo: opcodes.c - $(LTCOMPILE) $(TEMP_STORE) -c opcodes.c - -# BEGIN CRYPTO -crypto.lo: $(TOP)/src/crypto.c $(HDR) - $(LTCOMPILE) -c $(TOP)/src/crypto.c -crypto_impl.lo: $(TOP)/src/crypto_impl.c $(HDR) - $(LTCOMPILE) -c $(TOP)/src/crypto_impl.c -crypto_openssl.lo: $(TOP)/src/crypto_openssl.c $(HDR) - $(LTCOMPILE) -c $(TOP)/src/crypto_openssl.c -crypto_nss.lo: $(TOP)/src/crypto_nss.c $(HDR) - $(LTCOMPILE) -c $(TOP)/src/crypto_nss.c -crypto_libtomcrypt.lo: $(TOP)/src/crypto_libtomcrypt.c $(HDR) - $(LTCOMPILE) -c $(TOP)/src/crypto_libtomcrypt.c -crypto_cc.lo: $(TOP)/src/crypto_cc.c $(HDR) - $(LTCOMPILE) -c $(TOP)/src/crypto_cc.c -# END CRYPTO - -# Rules to build individual *.o files from files in the src directory. -# -alter.lo: $(TOP)/src/alter.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/alter.c - -analyze.lo: $(TOP)/src/analyze.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/analyze.c - -attach.lo: $(TOP)/src/attach.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/attach.c - -auth.lo: $(TOP)/src/auth.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/auth.c - -backup.lo: $(TOP)/src/backup.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/backup.c - -bitvec.lo: $(TOP)/src/bitvec.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/bitvec.c - -btmutex.lo: $(TOP)/src/btmutex.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/btmutex.c - -btree.lo: $(TOP)/src/btree.c $(HDR) $(TOP)/src/pager.h - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/btree.c - -build.lo: $(TOP)/src/build.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/build.c - -callback.lo: $(TOP)/src/callback.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/callback.c - -complete.lo: $(TOP)/src/complete.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/complete.c - -ctime.lo: $(TOP)/src/ctime.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/ctime.c - -date.lo: $(TOP)/src/date.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/date.c - -dbpage.lo: $(TOP)/src/dbpage.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/dbpage.c - -dbstat.lo: $(TOP)/src/dbstat.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/dbstat.c - -delete.lo: $(TOP)/src/delete.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/delete.c - -expr.lo: $(TOP)/src/expr.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/expr.c - -fault.lo: $(TOP)/src/fault.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/fault.c - -fkey.lo: $(TOP)/src/fkey.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/fkey.c - -func.lo: $(TOP)/src/func.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/func.c - -global.lo: $(TOP)/src/global.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/global.c - -hash.lo: $(TOP)/src/hash.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/hash.c - -insert.lo: $(TOP)/src/insert.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/insert.c - -legacy.lo: $(TOP)/src/legacy.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/legacy.c - -loadext.lo: $(TOP)/src/loadext.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/loadext.c - -main.lo: $(TOP)/src/main.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/main.c - -malloc.lo: $(TOP)/src/malloc.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/malloc.c - -mem0.lo: $(TOP)/src/mem0.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/mem0.c - -mem1.lo: $(TOP)/src/mem1.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/mem1.c - -mem2.lo: $(TOP)/src/mem2.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/mem2.c - -mem3.lo: $(TOP)/src/mem3.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/mem3.c - -mem5.lo: $(TOP)/src/mem5.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/mem5.c - -memdb.lo: $(TOP)/src/memdb.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/memdb.c - -memjournal.lo: $(TOP)/src/memjournal.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/memjournal.c - -mutex.lo: $(TOP)/src/mutex.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/mutex.c - -mutex_noop.lo: $(TOP)/src/mutex_noop.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/mutex_noop.c - -mutex_unix.lo: $(TOP)/src/mutex_unix.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/mutex_unix.c - -mutex_w32.lo: $(TOP)/src/mutex_w32.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/mutex_w32.c - -notify.lo: $(TOP)/src/notify.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/notify.c - -pager.lo: $(TOP)/src/pager.c $(HDR) $(TOP)/src/pager.h - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/pager.c - -pcache.lo: $(TOP)/src/pcache.c $(HDR) $(TOP)/src/pcache.h - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/pcache.c +SHELL_OPT ?= @OPT_SHELL@ -pcache1.lo: $(TOP)/src/pcache1.c $(HDR) $(TOP)/src/pcache.h - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/pcache1.c +Makefile: $(TOP)/Makefile.in $(AS_AUTO_DEF) + $(AS_AUTORECONFIG) + @touch $@ -os.lo: $(TOP)/src/os.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/os.c +sqlite3.pc: $(TOP)/sqlite3.pc.in $(AS_AUTO_DEF) + $(AS_AUTORECONFIG) + @touch $@ +install: install-pc # defined in main.mk -os_unix.lo: $(TOP)/src/os_unix.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/os_unix.c +sqlite_cfg.h: $(AS_AUTO_DEF) + $(AS_AUTORECONFIG) + @touch $@ -os_win.lo: $(TOP)/src/os_win.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/os_win.c - -pragma.lo: $(TOP)/src/pragma.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/pragma.c - -prepare.lo: $(TOP)/src/prepare.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/prepare.c - -printf.lo: $(TOP)/src/printf.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/printf.c - -random.lo: $(TOP)/src/random.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/random.c - -resolve.lo: $(TOP)/src/resolve.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/resolve.c - -rowset.lo: $(TOP)/src/rowset.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/rowset.c - -select.lo: $(TOP)/src/select.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/select.c - -status.lo: $(TOP)/src/status.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/status.c - -table.lo: $(TOP)/src/table.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/table.c - -threads.lo: $(TOP)/src/threads.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/threads.c - -tokenize.lo: $(TOP)/src/tokenize.c keywordhash.h $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/tokenize.c - -treeview.lo: $(TOP)/src/treeview.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/treeview.c - -trigger.lo: $(TOP)/src/trigger.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/trigger.c - -update.lo: $(TOP)/src/update.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/update.c - -upsert.lo: $(TOP)/src/upsert.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/upsert.c - -utf.lo: $(TOP)/src/utf.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/utf.c - -util.lo: $(TOP)/src/util.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/util.c - -vacuum.lo: $(TOP)/src/vacuum.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/vacuum.c - -vdbe.lo: $(TOP)/src/vdbe.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/vdbe.c - -vdbeapi.lo: $(TOP)/src/vdbeapi.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/vdbeapi.c - -vdbeaux.lo: $(TOP)/src/vdbeaux.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/vdbeaux.c - -vdbeblob.lo: $(TOP)/src/vdbeblob.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/vdbeblob.c - -vdbemem.lo: $(TOP)/src/vdbemem.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/vdbemem.c - -vdbesort.lo: $(TOP)/src/vdbesort.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/vdbesort.c - -vdbetrace.lo: $(TOP)/src/vdbetrace.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/vdbetrace.c - -vdbevtab.lo: $(TOP)/src/vdbevtab.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/vdbevtab.c - -vtab.lo: $(TOP)/src/vtab.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/vtab.c - -wal.lo: $(TOP)/src/wal.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/wal.c - -walker.lo: $(TOP)/src/walker.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/walker.c - -where.lo: $(TOP)/src/where.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/where.c - -wherecode.lo: $(TOP)/src/wherecode.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/wherecode.c - -whereexpr.lo: $(TOP)/src/whereexpr.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/whereexpr.c - -window.lo: $(TOP)/src/window.c $(HDR) - $(LTCOMPILE) $(TEMP_STORE) -c $(TOP)/src/window.c - -tclsqlite.lo: $(TOP)/src/tclsqlite.c $(HDR) - $(LTCOMPILE) -DUSE_TCL_STUBS=1 -c $(TOP)/src/tclsqlite.c - -tclsqlite-shell.lo: $(TOP)/src/tclsqlite.c $(HDR) - $(LTCOMPILE) -DTCLSH -o $@ -c $(TOP)/src/tclsqlite.c - -tclsqlite-stubs.lo: $(TOP)/src/tclsqlite.c $(HDR) - $(LTCOMPILE) -DUSE_TCL_STUBS=1 -o $@ -c $(TOP)/src/tclsqlite.c - -tclsqlcipher$(TEXE): tclsqlite-shell.lo libsqlcipher.la - $(LTLINK) -o $@ tclsqlite-shell.lo \ - libsqlcipher.la $(LIBTCL) - -# Rules to build opcodes.c and opcodes.h # -opcodes.c: opcodes.h $(TOP)/tool/mkopcodec.tcl - $(TCLSH_CMD) $(TOP)/tool/mkopcodec.tcl opcodes.h >opcodes.c - -opcodes.h: parse.h $(TOP)/src/vdbe.c $(TOP)/tool/mkopcodeh.tcl - cat parse.h $(TOP)/src/vdbe.c | $(TCLSH_CMD) $(TOP)/tool/mkopcodeh.tcl >opcodes.h - -# Rules to build parse.c and parse.h - the outputs of lemon. +# Fiddle app # -parse.h: parse.c - -parse.c: $(TOP)/src/parse.y lemon$(BEXE) - cp $(TOP)/src/parse.y . - ./lemon$(BEXE) $(OPT_FEATURE_FLAGS) $(OPTS) -S parse.y - -sqlite3.h: $(TOP)/src/sqlite.h.in $(TOP)/manifest mksourceid$(BEXE) $(TOP)/VERSION - $(TCLSH_CMD) $(TOP)/tool/mksqlite3h.tcl $(TOP) >sqlite3.h - -sqlite3rc.h: $(TOP)/src/sqlite3.rc $(TOP)/VERSION - echo '#ifndef SQLITE_RESOURCE_VERSION' >$@ - echo -n '#define SQLITE_RESOURCE_VERSION ' >>$@ - cat $(TOP)/VERSION | $(TCLSH_CMD) $(TOP)/tool/replace.tcl exact . , >>$@ - echo '#endif' >>sqlite3rc.h - -keywordhash.h: $(TOP)/tool/mkkeywordhash.c - $(BCC) -o mkkeywordhash$(BEXE) $(OPT_FEATURE_FLAGS) $(OPTS) $(TOP)/tool/mkkeywordhash.c - ./mkkeywordhash$(BEXE) >keywordhash.h - -# Source files that go into making shell.c -SHELL_SRC = \ - $(TOP)/src/shell.c.in \ - $(TOP)/ext/misc/appendvfs.c \ - $(TOP)/ext/misc/completion.c \ - $(TOP)/ext/misc/decimal.c \ - $(TOP)/ext/misc/fileio.c \ - $(TOP)/ext/misc/ieee754.c \ - $(TOP)/ext/misc/regexp.c \ - $(TOP)/ext/misc/series.c \ - $(TOP)/ext/misc/shathree.c \ - $(TOP)/ext/misc/sqlar.c \ - $(TOP)/ext/misc/uint.c \ - $(TOP)/ext/expert/sqlite3expert.c \ - $(TOP)/ext/expert/sqlite3expert.h \ - $(TOP)/ext/misc/zipfile.c \ - $(TOP)/ext/misc/memtrace.c \ - $(TOP)/src/test_windirent.c - -shell.c: $(SHELL_SRC) $(TOP)/tool/mkshellc.tcl - $(TCLSH_CMD) $(TOP)/tool/mkshellc.tcl >shell.c - - - - -# Rules to build the extension objects. +# EMCC_WRAPPER must refer to the genuine emcc binary, or a +# call-compatible wrapper, e.g. $(TOP)/tool/emcc.sh. If it's empty, +# build components requiring Emscripten will not build. # -icu.lo: $(TOP)/ext/icu/icu.c $(HDR) $(EXTHDR) - $(LTCOMPILE) -DSQLITE_CORE -c $(TOP)/ext/icu/icu.c - -fts2.lo: $(TOP)/ext/fts2/fts2.c $(HDR) $(EXTHDR) - $(LTCOMPILE) -DSQLITE_CORE -c $(TOP)/ext/fts2/fts2.c - -fts2_hash.lo: $(TOP)/ext/fts2/fts2_hash.c $(HDR) $(EXTHDR) - $(LTCOMPILE) -DSQLITE_CORE -c $(TOP)/ext/fts2/fts2_hash.c - -fts2_icu.lo: $(TOP)/ext/fts2/fts2_icu.c $(HDR) $(EXTHDR) - $(LTCOMPILE) -DSQLITE_CORE -c $(TOP)/ext/fts2/fts2_icu.c - -fts2_porter.lo: $(TOP)/ext/fts2/fts2_porter.c $(HDR) $(EXTHDR) - $(LTCOMPILE) -DSQLITE_CORE -c $(TOP)/ext/fts2/fts2_porter.c - -fts2_tokenizer.lo: $(TOP)/ext/fts2/fts2_tokenizer.c $(HDR) $(EXTHDR) - $(LTCOMPILE) -DSQLITE_CORE -c $(TOP)/ext/fts2/fts2_tokenizer.c - -fts2_tokenizer1.lo: $(TOP)/ext/fts2/fts2_tokenizer1.c $(HDR) $(EXTHDR) - $(LTCOMPILE) -DSQLITE_CORE -c $(TOP)/ext/fts2/fts2_tokenizer1.c - -fts3.lo: $(TOP)/ext/fts3/fts3.c $(HDR) $(EXTHDR) - $(LTCOMPILE) -DSQLITE_CORE -c $(TOP)/ext/fts3/fts3.c - -fts3_aux.lo: $(TOP)/ext/fts3/fts3_aux.c $(HDR) $(EXTHDR) - $(LTCOMPILE) -DSQLITE_CORE -c $(TOP)/ext/fts3/fts3_aux.c - -fts3_expr.lo: $(TOP)/ext/fts3/fts3_expr.c $(HDR) $(EXTHDR) - $(LTCOMPILE) -DSQLITE_CORE -c $(TOP)/ext/fts3/fts3_expr.c - -fts3_hash.lo: $(TOP)/ext/fts3/fts3_hash.c $(HDR) $(EXTHDR) - $(LTCOMPILE) -DSQLITE_CORE -c $(TOP)/ext/fts3/fts3_hash.c - -fts3_icu.lo: $(TOP)/ext/fts3/fts3_icu.c $(HDR) $(EXTHDR) - $(LTCOMPILE) -DSQLITE_CORE -c $(TOP)/ext/fts3/fts3_icu.c - -fts3_porter.lo: $(TOP)/ext/fts3/fts3_porter.c $(HDR) $(EXTHDR) - $(LTCOMPILE) -DSQLITE_CORE -c $(TOP)/ext/fts3/fts3_porter.c - -fts3_snippet.lo: $(TOP)/ext/fts3/fts3_snippet.c $(HDR) $(EXTHDR) - $(LTCOMPILE) -DSQLITE_CORE -c $(TOP)/ext/fts3/fts3_snippet.c - -fts3_tokenizer.lo: $(TOP)/ext/fts3/fts3_tokenizer.c $(HDR) $(EXTHDR) - $(LTCOMPILE) -DSQLITE_CORE -c $(TOP)/ext/fts3/fts3_tokenizer.c - -fts3_tokenizer1.lo: $(TOP)/ext/fts3/fts3_tokenizer1.c $(HDR) $(EXTHDR) - $(LTCOMPILE) -DSQLITE_CORE -c $(TOP)/ext/fts3/fts3_tokenizer1.c - -fts3_tokenize_vtab.lo: $(TOP)/ext/fts3/fts3_tokenize_vtab.c $(HDR) $(EXTHDR) - $(LTCOMPILE) -DSQLITE_CORE -c $(TOP)/ext/fts3/fts3_tokenize_vtab.c - -fts3_unicode.lo: $(TOP)/ext/fts3/fts3_unicode.c $(HDR) $(EXTHDR) - $(LTCOMPILE) -DSQLITE_CORE -c $(TOP)/ext/fts3/fts3_unicode.c - -fts3_unicode2.lo: $(TOP)/ext/fts3/fts3_unicode2.c $(HDR) $(EXTHDR) - $(LTCOMPILE) -DSQLITE_CORE -c $(TOP)/ext/fts3/fts3_unicode2.c - -fts3_write.lo: $(TOP)/ext/fts3/fts3_write.c $(HDR) $(EXTHDR) - $(LTCOMPILE) -DSQLITE_CORE -c $(TOP)/ext/fts3/fts3_write.c - -rtree.lo: $(TOP)/ext/rtree/rtree.c $(HDR) $(EXTHDR) - $(LTCOMPILE) -DSQLITE_CORE -c $(TOP)/ext/rtree/rtree.c - -userauth.lo: $(TOP)/ext/userauth/userauth.c $(HDR) $(EXTHDR) - $(LTCOMPILE) -DSQLITE_CORE -c $(TOP)/ext/userauth/userauth.c - -sqlite3session.lo: $(TOP)/ext/session/sqlite3session.c $(HDR) $(EXTHDR) - $(LTCOMPILE) -DSQLITE_CORE -c $(TOP)/ext/session/sqlite3session.c - -json1.lo: $(TOP)/ext/misc/json1.c - $(LTCOMPILE) -DSQLITE_CORE -c $(TOP)/ext/misc/json1.c - -stmt.lo: $(TOP)/ext/misc/stmt.c - $(LTCOMPILE) -DSQLITE_CORE -c $(TOP)/ext/misc/stmt.c - -# FTS5 things -# -FTS5_SRC = \ - $(TOP)/ext/fts5/fts5.h \ - $(TOP)/ext/fts5/fts5Int.h \ - $(TOP)/ext/fts5/fts5_aux.c \ - $(TOP)/ext/fts5/fts5_buffer.c \ - $(TOP)/ext/fts5/fts5_main.c \ - $(TOP)/ext/fts5/fts5_config.c \ - $(TOP)/ext/fts5/fts5_expr.c \ - $(TOP)/ext/fts5/fts5_hash.c \ - $(TOP)/ext/fts5/fts5_index.c \ - fts5parse.c fts5parse.h \ - $(TOP)/ext/fts5/fts5_storage.c \ - $(TOP)/ext/fts5/fts5_tokenize.c \ - $(TOP)/ext/fts5/fts5_unicode2.c \ - $(TOP)/ext/fts5/fts5_varint.c \ - $(TOP)/ext/fts5/fts5_vocab.c \ - -fts5parse.c: $(TOP)/ext/fts5/fts5parse.y lemon$(BEXE) - cp $(TOP)/ext/fts5/fts5parse.y . - rm -f fts5parse.h - ./lemon$(BEXE) $(OPTS) -S fts5parse.y - -fts5parse.h: fts5parse.c - -fts5.c: $(FTS5_SRC) - $(TCLSH_CMD) $(TOP)/ext/fts5/tool/mkfts5c.tcl - cp $(TOP)/ext/fts5/fts5.h . - -fts5.lo: fts5.c $(HDR) $(EXTHDR) - $(LTCOMPILE) -DSQLITE_CORE -c fts5.c - -sqlite3rbu.lo: $(TOP)/ext/rbu/sqlite3rbu.c $(HDR) $(EXTHDR) - $(LTCOMPILE) -DSQLITE_CORE -c $(TOP)/ext/rbu/sqlite3rbu.c - - -# Rules to build the 'testfixture' application. -# -# If using the amalgamation, use sqlite3.c directly to build the test -# fixture. Otherwise link against libsqlcipher.la. (This distinction is -# necessary because the test fixture requires non-API symbols which are -# hidden when the library is built via the amalgamation). -# -TESTFIXTURE_FLAGS = -DSQLITE_TEST=1 -DSQLITE_CRASH_TEST=1 -TESTFIXTURE_FLAGS += -DTCLSH_INIT_PROC=sqlite3TestInit -TESTFIXTURE_FLAGS += -DSQLITE_SERVER=1 -DSQLITE_PRIVATE="" -DSQLITE_CORE -TESTFIXTURE_FLAGS += -DBUILD_sqlite -TESTFIXTURE_FLAGS += -DSQLITE_SERIES_CONSTRAINT_VERIFY=1 -TESTFIXTURE_FLAGS += -DSQLITE_DEFAULT_PAGE_SIZE=1024 -TESTFIXTURE_FLAGS += -DSQLITE_ENABLE_STMTVTAB -TESTFIXTURE_FLAGS += -DSQLITE_ENABLE_DBPAGE_VTAB -TESTFIXTURE_FLAGS += -DSQLITE_ENABLE_BYTECODE_VTAB -TESTFIXTURE_FLAGS += -DSQLITE_CKSUMVFS_STATIC - -TESTFIXTURE_SRC0 = $(TESTSRC2) libsqlcipher.la -TESTFIXTURE_SRC1 = sqlite3.c -TESTFIXTURE_SRC = $(TESTSRC) $(TOP)/src/tclsqlite.c -TESTFIXTURE_SRC += $(TESTFIXTURE_SRC$(USE_AMALGAMATION)) - -testfixture$(TEXE): $(TESTFIXTURE_SRC) - $(LTLINK) -DSQLITE_NO_SYNC=1 $(TEMP_STORE) $(TESTFIXTURE_FLAGS) \ - -o $@ $(TESTFIXTURE_SRC) $(LIBTCL) $(TLIBS) - -coretestprogs: $(TESTPROGS) - -testprogs: coretestprogs srcck1$(BEXE) fuzzcheck$(TEXE) sessionfuzz$(TEXE) - -# A very detailed test running most or all test cases -fulltest: alltest fuzztest - -# Run most or all tcl test cases -alltest: $(TESTPROGS) - ./testfixture$(TEXE) $(TOP)/test/all.test $(TESTOPTS) - -# Really really long testing -soaktest: $(TESTPROGS) - ./testfixture$(TEXE) $(TOP)/test/all.test -soak=1 $(TESTOPTS) - -# Do extra testing but not everything. -fulltestonly: $(TESTPROGS) fuzztest - ./testfixture$(TEXE) $(TOP)/test/full.test - -# Fuzz testing -fuzztest: fuzzcheck$(TEXE) $(FUZZDATA) sessionfuzz$(TEXE) $(TOP)/test/sessionfuzz-data1.db - ./fuzzcheck$(TEXE) $(FUZZDATA) - ./sessionfuzz$(TEXE) run $(TOP)/test/sessionfuzz-data1.db - -valgrindfuzz: fuzzcheck$(TEXT) $(FUZZDATA) sessionfuzz$(TEXE) $(TOP)/test/sessionfuzz-data1.db - valgrind ./fuzzcheck$(TEXE) --cell-size-check --limit-mem 10M $(FUZZDATA) - valgrind ./sessionfuzz$(TEXE) run $(TOP)/test/sessionfuzz-data1.db - -# The veryquick.test TCL tests. +# Achtung: though _this_ makefile is POSIX-make compatible, the fiddle +# build requires GNU make. # -tcltest: ./testfixture$(TEXE) - ./testfixture$(TEXE) $(TOP)/test/veryquick.test $(TESTOPTS) +EMCC_WRAPPER = @EMCC_WRAPPER@ +fiddle: sqlite3.c shell.c + @if [ x = "x$(EMCC_WRAPPER)" ]; then \ + echo "Emscripten SDK not found by configure. Cannot build fiddle." 1&>2; \ + exit 1; \ + fi + $(MAKE) -C ext/wasm fiddle emcc_opt=-Os -# Minimal testing that runs in less than 3 minutes # -quicktest: ./testfixture$(TEXE) - ./testfixture$(TEXE) $(TOP)/test/extraquick.test $(TESTOPTS) - -# This is the common case. Run many tests that do not take too long, -# including fuzzcheck, sqlite3_analyzer, and sqldiff tests. +# Spell-checking for source comments +# The sources checked are either C sources or C source templates. +# Their comments are extracted and processed through aspell using +# a custom dictionary that contains scads of odd identifiers that +# find their way into the comments. # -test: fuzztest sourcetest $(TESTPROGS) tcltest - -# Run a test using valgrind. This can take a really long time -# because valgrind is so much slower than a native machine. -# -valgrindtest: $(TESTPROGS) valgrindfuzz - OMIT_MISUSE=1 valgrind -v ./testfixture$(TEXE) $(TOP)/test/permutations.test valgrind $(TESTOPTS) - -# A very fast test that checks basic sanity. The name comes from -# the 60s-era electronics testing: "Turn it on and see if smoke -# comes out." +# Currently, this target is setup to be "made" in-tree only. +# The output is ephemeral. Redirect it to guide spelling fixups, +# either to correct spelling or add words to tool/custom.txt. # -smoketest: $(TESTPROGS) fuzzcheck$(TEXE) - ./testfixture$(TEXE) $(TOP)/test/main.test $(TESTOPTS) - -shelltest: $(TESTPROGS) - ./testfixture$(TEXT) $(TOP)/test/permutations.test shell - -sqlite3_analyzer.c: sqlite3.c $(TOP)/src/tclsqlite.c $(TOP)/tool/spaceanal.tcl $(TOP)/tool/mkccode.tcl $(TOP)/tool/sqlite3_analyzer.c.in - $(TCLSH_CMD) $(TOP)/tool/mkccode.tcl $(TOP)/tool/sqlite3_analyzer.c.in >sqlite3_analyzer.c - -sqlite3_analyzer$(TEXE): sqlite3_analyzer.c - $(LTLINK) sqlite3_analyzer.c -o $@ $(LIBTCL) $(TLIBS) - -sqltclsh.c: sqlite3.c $(TOP)/src/tclsqlite.c $(TOP)/tool/sqltclsh.tcl $(TOP)/ext/misc/appendvfs.c $(TOP)/tool/mkccode.tcl $(TOP)/tool/sqltclsh.c.in - $(TCLSH_CMD) $(TOP)/tool/mkccode.tcl $(TOP)/tool/sqltclsh.c.in >sqltclsh.c - -sqltclsh$(TEXE): sqltclsh.c - $(LTLINK) sqltclsh.c -o $@ $(LIBTCL) $(TLIBS) - -sqlite3_expert$(TEXE): $(TOP)/ext/expert/sqlite3expert.h $(TOP)/ext/expert/sqlite3expert.c $(TOP)/ext/expert/expert.c sqlite3.c - $(LTLINK) $(TOP)/ext/expert/sqlite3expert.h $(TOP)/ext/expert/sqlite3expert.c $(TOP)/ext/expert/expert.c sqlite3.c -o sqlite3_expert $(TLIBS) - -CHECKER_DEPS =\ - $(TOP)/tool/mkccode.tcl \ - sqlite3.c \ - $(TOP)/src/tclsqlite.c \ - $(TOP)/ext/repair/sqlite3_checker.tcl \ - $(TOP)/ext/repair/checkindex.c \ - $(TOP)/ext/repair/checkfreelist.c \ - $(TOP)/ext/misc/btreeinfo.c \ - $(TOP)/ext/repair/sqlite3_checker.c.in - -sqlite3_checker.c: $(CHECKER_DEPS) - $(TCLSH_CMD) $(TOP)/tool/mkccode.tcl $(TOP)/ext/repair/sqlite3_checker.c.in >$@ - -sqlite3_checker$(TEXE): sqlite3_checker.c - $(LTLINK) sqlite3_checker.c -o $@ $(LIBTCL) $(TLIBS) +./custom.rws: ./tool/custom.txt + @echo 'Updating custom dictionary from tool/custom.txt' + aspell --lang=en create master ./custom.rws < ./tool/custom.txt +misspell: ./custom.rws has_tclsh84 +# $(JIMSH) does not work with spellsift.tcl + $(TCLSH_CMD) ./tool/spellsift.tcl ./src/*.c ./src/*.h ./src/*.in -dbdump$(TEXE): $(TOP)/ext/misc/dbdump.c sqlite3.lo - $(LTLINK) -DDBDUMP_STANDALONE -o $@ \ - $(TOP)/ext/misc/dbdump.c sqlite3.lo $(TLIBS) - -dbtotxt$(TEXE): $(TOP)/tool/dbtotxt.c - $(LTLINK)-o $@ $(TOP)/tool/dbtotxt.c - -showdb$(TEXE): $(TOP)/tool/showdb.c sqlite3.lo - $(LTLINK) -o $@ $(TOP)/tool/showdb.c sqlite3.lo $(TLIBS) - -showstat4$(TEXE): $(TOP)/tool/showstat4.c sqlite3.lo - $(LTLINK) -o $@ $(TOP)/tool/showstat4.c sqlite3.lo $(TLIBS) - -showjournal$(TEXE): $(TOP)/tool/showjournal.c sqlite3.lo - $(LTLINK) -o $@ $(TOP)/tool/showjournal.c sqlite3.lo $(TLIBS) - -showwal$(TEXE): $(TOP)/tool/showwal.c sqlite3.lo - $(LTLINK) -o $@ $(TOP)/tool/showwal.c sqlite3.lo $(TLIBS) - -showshm$(TEXE): $(TOP)/tool/showshm.c - $(LTLINK) -o $@ $(TOP)/tool/showshm.c - -index_usage$(TEXE): $(TOP)/tool/index_usage.c sqlite3.lo - $(LTLINK) $(SHELL_OPT) -o $@ $(TOP)/tool/index_usage.c sqlite3.lo $(TLIBS) - -changeset$(TEXE): $(TOP)/ext/session/changeset.c sqlite3.lo - $(LTLINK) -o $@ $(TOP)/ext/session/changeset.c sqlite3.lo $(TLIBS) - -changesetfuzz$(TEXE): $(TOP)/ext/session/changesetfuzz.c sqlite3.lo - $(LTLINK) -o $@ $(TOP)/ext/session/changesetfuzz.c sqlite3.lo $(TLIBS) - -rollback-test$(TEXE): $(TOP)/tool/rollback-test.c sqlite3.lo - $(LTLINK) -o $@ $(TOP)/tool/rollback-test.c sqlite3.lo $(TLIBS) - -atrc$(TEXX): $(TOP)/test/atrc.c sqlite3.lo - $(LTLINK) -o $@ $(TOP)/test/atrc.c sqlite3.lo $(TLIBS) - -LogEst$(TEXE): $(TOP)/tool/logest.c sqlite3.h - $(LTLINK) -I. -o $@ $(TOP)/tool/logest.c - -wordcount$(TEXE): $(TOP)/test/wordcount.c sqlite3.lo - $(LTLINK) -o $@ $(TOP)/test/wordcount.c sqlite3.lo $(TLIBS) - -speedtest1$(TEXE): $(TOP)/test/speedtest1.c sqlite3.c - $(LTLINK) $(ST_OPT) -o $@ $(TOP)/test/speedtest1.c sqlite3.c $(TLIBS) - -startup$(TEXE): $(TOP)/test/startup.c sqlite3.c - $(CC) -Os -g -DSQLITE_THREADSAFE=0 -o $@ $(TOP)/test/startup.c sqlite3.c $(TLIBS) - -KV_OPT += -DSQLITE_DIRECT_OVERFLOW_READ - -kvtest$(TEXE): $(TOP)/test/kvtest.c sqlite3.c - $(LTLINK) $(KV_OPT) -o $@ $(TOP)/test/kvtest.c sqlite3.c $(TLIBS) - -rbu$(EXE): $(TOP)/ext/rbu/rbu.c $(TOP)/ext/rbu/sqlite3rbu.c sqlite3.lo - $(LTLINK) -I. -o $@ $(TOP)/ext/rbu/rbu.c sqlite3.lo $(TLIBS) - -loadfts$(EXE): $(TOP)/tool/loadfts.c libsqlite3.la - $(LTLINK) $(TOP)/tool/loadfts.c libsqlite3.la -o $@ $(TLIBS) - -# This target will fail if the SQLite amalgamation contains any exported -# symbols that do not begin with "sqlite3_". It is run as part of the -# releasetest.tcl script. -# -VALIDIDS=' sqlite3(changeset|changegroup|session)?_' -checksymbols: sqlite3.o - nm -g --defined-only sqlite3.o - nm -g --defined-only sqlite3.o | egrep -v $(VALIDIDS); test $$? -ne 0 - echo '0 errors out of 1 tests' - -# Build the amalgamation-autoconf package. The amalamgation-tarball target builds -# a tarball named for the version number. Ex: sqlite-autoconf-3110000.tar.gz. -# The snapshot-tarball target builds a tarball named by the SHA1 hash # -amalgamation-tarball: sqlite3.c sqlite3rc.h - TOP=$(TOP) sh $(TOP)/tool/mkautoconfamal.sh --normal - -snapshot-tarball: sqlite3.c sqlite3rc.h - TOP=$(TOP) sh $(TOP)/tool/mkautoconfamal.sh --snapshot - -# The next two rules are used to support the "threadtest" target. Building -# threadtest runs a few thread-safety tests that are implemented in C. This -# target is invoked by the releasetest.tcl script. -# -THREADTEST3_SRC = $(TOP)/test/threadtest3.c \ - $(TOP)/test/tt3_checkpoint.c \ - $(TOP)/test/tt3_index.c \ - $(TOP)/test/tt3_vacuum.c \ - $(TOP)/test/tt3_stress.c \ - $(TOP)/test/tt3_lookaside1.c - -threadtest3$(TEXE): sqlite3.lo $(THREADTEST3_SRC) - $(LTLINK) $(TOP)/test/threadtest3.c $(TOP)/src/test_multiplex.c sqlite3.lo -o $@ $(TLIBS) - -threadtest: threadtest3$(TEXE) - ./threadtest3$(TEXE) - -threadtest5: sqlite3.c $(TOP)/test/threadtest5.c - $(LTLINK) $(TOP)/test/threadtest5.c sqlite3.c -o $@ $(TLIBS) - -releasetest: - $(TCLSH_CMD) $(TOP)/test/releasetest.tcl - -# Standard install and cleanup targets +# clean/distclean are mostly defined in main.mk. In this makefile we +# perform cleanup known to be relevant to (only) the autosetup-driven +# build. # -lib_install: libsqlcipher.la - $(INSTALL) -d $(DESTDIR)$(libdir) - $(LTINSTALL) libsqlcipher.la $(DESTDIR)$(libdir) - -install: sqlcipher$(TEXE) lib_install sqlite3.h sqlcipher.pc ${HAVE_TCL:1=tcl_install} - $(INSTALL) -d $(DESTDIR)$(bindir) - $(LTINSTALL) sqlcipher$(TEXE) $(DESTDIR)$(bindir) - $(INSTALL) -d $(DESTDIR)$(includedir) - $(INSTALL) -m 0644 sqlite3.h $(DESTDIR)$(includedir) - $(INSTALL) -m 0644 $(TOP)/src/sqlite3ext.h $(DESTDIR)$(includedir) - $(INSTALL) -d $(DESTDIR)$(pkgconfigdir) - $(INSTALL) -m 0644 sqlcipher.pc $(DESTDIR)$(pkgconfigdir) - -pkgIndex.tcl: - echo 'package ifneeded sqlite3 $(RELEASE) [list load [file join $$dir libtclsqlite3[info sharedlibextension]] sqlite3]' > $@ -tcl_install: lib_install libtclsqlite3.la pkgIndex.tcl - $(INSTALL) -d $(DESTDIR)$(TCLLIBDIR) - $(LTINSTALL) libtclsqlite3.la $(DESTDIR)$(TCLLIBDIR) - rm -f $(DESTDIR)$(TCLLIBDIR)/libtclsqlite3.la $(DESTDIR)$(TCLLIBDIR)/libtclsqlite3.a - $(INSTALL) -m 0644 pkgIndex.tcl $(DESTDIR)$(TCLLIBDIR) - -clean: - rm -f *.lo *.la *.o sqlcipher$(TEXE) libsqlcipher.la - rm -f sqlite3.h opcodes.* - rm -rf .libs .deps - rm -f lemon$(BEXE) lempar.c parse.* sqlite*.tar.gz - rm -f mkkeywordhash$(BEXE) keywordhash.h - rm -f *.da *.bb *.bbg gmon.out - rm -rf tsrc .target_source - rm -f tclsqlcipher$(TEXE) - rm -f testfixture$(TEXE) test.db - rm -f LogEst$(TEXE) fts3view$(TEXE) rollback-test$(TEXE) showdb$(TEXE) - rm -f showjournal$(TEXE) showstat4$(TEXE) showwal$(TEXE) speedtest1$(TEXE) - rm -f wordcount$(TEXE) changeset$(TEXE) - rm -f sqlite3.dll sqlite3.lib sqlite3.exp sqlite3.def - rm -f sqlite3.c - rm -f sqlite3rc.h - rm -f shell.c sqlite3ext.h - rm -f sqlite3_analyzer$(TEXE) sqlite3_analyzer.c - rm -f sqlite-*-output.vsix - rm -f mptester mptester.exe - rm -f rbu rbu.exe - rm -f srcck1 srcck1.exe - rm -f fuzzershell fuzzershell.exe - rm -f fuzzcheck fuzzcheck.exe - rm -f sqldiff sqldiff.exe - rm -f dbhash dbhash.exe - rm -f fts5.* fts5parse.* - rm -f threadtest5 - -distclean: clean - rm -f config.h config.log config.status libtool Makefile sqlcipher.pc +distclean-autosetup: + rm -f sqlite_cfg.h config.log config.status config.defines.* Makefile sqlite3.pc + rm -f $(TOP)/tool/emcc.sh + rm -f libsqlite3*$(T.dll) + rm -f jimsh0* +distclean: distclean-autosetup # -# Windows section +# tool/version-info: a utility for emitting sqlite3 version info +# in various forms. It's used by ext/wasm/. # -dll: sqlite3.dll - -REAL_LIBOBJ = $(LIBOBJ:%.lo=.libs/%.o) - -$(REAL_LIBOBJ): $(LIBOBJ) - -sqlite3.def: $(REAL_LIBOBJ) - echo 'EXPORTS' >sqlite3.def - nm $(REAL_LIBOBJ) | grep ' T ' | grep ' _sqlite3_' \ - | sed 's/^.* _//' >>sqlite3.def +version-info$(T.exe): $(TOP)/tool/version-info.c Makefile sqlite3.h + $(T.link) $(ST_OPT) -o $@ $(TOP)/tool/version-info.c -sqlite3.dll: $(REAL_LIBOBJ) sqlite3.def - $(TCC) -shared -o $@ sqlite3.def \ - -Wl,"--strip-all" $(REAL_LIBOBJ) +include $(TOP)/main.mk diff --git a/Makefile.linux-gcc b/Makefile.linux-gcc deleted file mode 100644 index ad5d4dd093..0000000000 --- a/Makefile.linux-gcc +++ /dev/null @@ -1,115 +0,0 @@ -#!/usr/make -# -# Makefile for SQLITE -# -# This is a template makefile for SQLite. Most people prefer to -# use the autoconf generated "configure" script to generate the -# makefile automatically. But that does not work for everybody -# and in every situation. If you are having problems with the -# "configure" script, you might want to try this makefile as an -# alternative. Create a copy of this file, edit the parameters -# below and type "make". -# - -#### The toplevel directory of the source tree. This is the directory -# that contains this "Makefile.in" and the "configure.in" script. -# -TOP = ../sqlite - -#### C Compiler and options for use in building executables that -# will run on the platform that is doing the build. -# -BCC = gcc -g -O0 -#BCC = /opt/ancic/bin/c89 -0 - -#### If the target operating system supports the "usleep()" system -# call, then define the HAVE_USLEEP macro for all C modules. -# -#USLEEP = -USLEEP = -DHAVE_USLEEP=1 - -#### If you want the SQLite library to be safe for use within a -# multi-threaded program, then define the following macro -# appropriately: -# -#THREADSAFE = -DTHREADSAFE=1 -THREADSAFE = -DTHREADSAFE=0 - -#### Specify any extra linker options needed to make the library -# thread safe -# -THREADLIB = -lpthread -lm -ldl -#THREADLIB = - -#### Specify any extra libraries needed to access required functions. -# -#TLIBS = -lrt # fdatasync on Solaris 8 -TLIBS = - -#### Leave SQLITE_DEBUG undefined for maximum speed. Use SQLITE_DEBUG=1 -# to check for memory leaks. Use SQLITE_DEBUG=2 to print a log of all -# malloc()s and free()s in order to track down memory leaks. -# -# SQLite uses some expensive assert() statements in the inner loop. -# You can make the library go almost twice as fast if you compile -# with -DNDEBUG=1 -# -OPTS += -DSQLITE_DEBUG=1 -OPTS += -DSQLITE_ENABLE_WHERETRACE -OPTS += -DSQLITE_ENABLE_SELECTTRACE - -#### The suffix to add to executable files. ".exe" for windows. -# Nothing for unix. -# -#EXE = .exe -EXE = - -#### C Compile and options for use in building executables that -# will run on the target platform. This is usually the same -# as BCC, unless you are cross-compiling. -# -TCC = gcc -O0 -#TCC = gcc -g -O0 -Wall -#TCC = gcc -g -O0 -Wall -fprofile-arcs -ftest-coverage -#TCC = /opt/mingw/bin/i386-mingw32-gcc -O6 -#TCC = /opt/ansic/bin/c89 -O +z -Wl,-a,archive - -#### Tools used to build a static library. -# -AR = ar cr -#AR = /opt/mingw/bin/i386-mingw32-ar cr -RANLIB = ranlib -#RANLIB = /opt/mingw/bin/i386-mingw32-ranlib - -MKSHLIB = gcc -shared -SO = so -SHPREFIX = lib -# SO = dll -# SHPREFIX = - -#### Extra compiler options needed for programs that use the TCL library. -# -TCL_FLAGS = -I/home/drh/tcl/include/tcl8.6 - -#### Linker options needed to link against the TCL library. -# -#LIBTCL = -ltcl -lm -ldl -LIBTCL = /home/drh/tcl/lib/libtcl8.6.a -lm -lpthread -ldl -lz - -#### Additional objects for SQLite library when TCL support is enabled. -#TCLOBJ = -TCLOBJ = tclsqlite.o - -#### Compiler options needed for programs that use the readline() library. -# -READLINE_FLAGS = -#READLINE_FLAGS = -DHAVE_READLINE=1 -I/usr/include/readline - -#### Linker options needed by programs using readline() must link against. -# -LIBREADLINE = -#LIBREADLINE = -static -lreadline -ltermcap - -# You should not have to change anything below this line -############################################################################### -include $(TOP)/main.mk diff --git a/Makefile.linux-generic b/Makefile.linux-generic new file mode 100644 index 0000000000..c7441fa517 --- /dev/null +++ b/Makefile.linux-generic @@ -0,0 +1,64 @@ +#!/usr/make +all: +# +# Makefile for SQLITE +# +# This is a template makefile for SQLite. Most people prefer to +# use the autoconf generated "configure" script to generate the +# makefile automatically. But that does not work for everybody +# and in every situation. If you are having problems with the +# "configure" script, you might want to try this makefile as an +# alternative. Create a copy of this file, edit the parameters +# below and type "make". +# +# Maintenance note: because this is the template for Linux systems, it +# is assumed that the platform has GNU make and this file takes +# advantage of that. +# +#### +# +# $(TOP) = The toplevel directory of the source tree. This is the +# directory that contains "Makefile.in" and "auto.def". +# +TOP ?= $(realpath $(dir $(lastword $(MAKEFILE_LIST)))) + +# +# $(CFLAGS) will be used when compiling the library and most +# utilities. It must normally contain -fPIC on Linux systems, +# but overriding CFLAGS is an easy way for users to inadvertently +# remove -fPIC from their builds, so we generally expect to see +# -fPIC in $(CFLAGS.core), which main.mk will integrate with +# the CFLAGS where needed. +# +CFLAGS = +CFLAGS.core = -fPIC + +# +# $(SHELL_OPT) contains CFLAGS for building the sqlite3 CLI shell. +# See main.mk for other potentially-relevant vars which may need +# tweaking, like $(LDFLAGS_READLINE). +# +SHELL_OPT += -DHAVE_READLINE=1 +SHELL_OPT += -DSQLITE_HAVE_ZLIB=1 +LDFLAGS.readline = -lreadline # may need -lcurses etc, depending on the system +CFLAGS.readline = # needs -I... if readline.h is in an unusual place. +LDFLAGS.zlib = -lz + +# +# Library's version number. +# +PACKAGE_VERSION ?= $(shell cat $(TOP)/VERSION 2>/dev/null) + +# sqlite_cfg.h is typically created by the configure script. It's +# commonly not needed but main.mk does not know that so we have to +# create a dummy if we don't already have one. +sqlite_cfg.h: + touch $@ +distclean-.: + rm -f sqlite_cfg.h + +# +# With the above in place, we can now import the rules make use of +# it... +# +include $(TOP)/main.mk diff --git a/Makefile.msc b/Makefile.msc index 49e7daa7fa..aa52221357 100644 --- a/Makefile.msc +++ b/Makefile.msc @@ -18,6 +18,15 @@ USE_AMALGAMATION = 1 !ENDIF # <> +# Optionally set EXTRA_SRC to a list of C files to append to +# the generated sqlite3.c. Any sqlite3 extensions added this +# way may require manual editing, as described in +# https://sqlite.org/forum/forumpost/903f721f3e7c0d25 +# +!IFNDEF EXTRA_SRC +EXTRA_SRC = +!ENDIF + # Set this non-0 to enable full warnings (-W4, etc) when compiling. # !IFNDEF USE_FULLWARN @@ -52,6 +61,21 @@ MINIMAL_AMALGAMATION = 0 USE_STDCALL = 0 !ENDIF +# Use the USE_SEH=0 option on the nmake command line to omit structured +# exception handling (SEH) support. SEH is on by default. +# +!IFNDEF USE_SEH +USE_SEH = 1 +!ENDIF + +# Use STATICALLY_LINK_TCL=1 to statically link against TCL +# +!IFNDEF STATICALLY_LINK_TCL +STATICALLY_LINK_TCL = 0 +!ELSEIF $(STATICALLY_LINK_TCL)!=0 +CCOPTS = $(CCOPTS) -DSTATIC_BUILD +!ENDIF + # Set this non-0 to have the shell executable link against the core dynamic # link library. # @@ -218,6 +242,12 @@ WIN32HEAP = 0 OSTRACE = 0 !ENDIF +# enable address sanitizer using ASAN=1 on the command-line. +# +!IFNDEF ASAN +ASAN = 0 +!ENDIF + # Set this to one of the following values to enable various debugging # features. Each level includes the debugging options from the previous # levels. Currently, the recognized values for DEBUG are: @@ -264,6 +294,12 @@ SESSION = 0 RBU = 0 !ENDIF +# Set this to non-0 to enable support for blocking locks. +# +!IFNDEF SETLK_TIMEOUT +SETLK_TIMEOUT = 0 +!ENDIF + # Set the source code file to be used by executables and libraries when # they need the amalgamation. # @@ -359,19 +395,29 @@ SQLITE_TCL_DEP = # the Windows platform. # !IFNDEF OPT_FEATURE_FLAGS +OPT_FEATURE_FLAGS = $(OPT_XTRA) !IF $(MINIMAL_AMALGAMATION)==0 OPT_FEATURE_FLAGS = $(OPT_FEATURE_FLAGS) -DSQLITE_ENABLE_FTS3=1 +OPT_FEATURE_FLAGS = $(OPT_FEATURE_FLAGS) -DSQLITE_ENABLE_FTS5=1 OPT_FEATURE_FLAGS = $(OPT_FEATURE_FLAGS) -DSQLITE_ENABLE_RTREE=1 OPT_FEATURE_FLAGS = $(OPT_FEATURE_FLAGS) -DSQLITE_ENABLE_GEOPOLY=1 -OPT_FEATURE_FLAGS = $(OPT_FEATURE_FLAGS) -DSQLITE_ENABLE_JSON1=1 OPT_FEATURE_FLAGS = $(OPT_FEATURE_FLAGS) -DSQLITE_ENABLE_STMTVTAB=1 OPT_FEATURE_FLAGS = $(OPT_FEATURE_FLAGS) -DSQLITE_ENABLE_DBPAGE_VTAB=1 OPT_FEATURE_FLAGS = $(OPT_FEATURE_FLAGS) -DSQLITE_ENABLE_DBSTAT_VTAB=1 OPT_FEATURE_FLAGS = $(OPT_FEATURE_FLAGS) -DSQLITE_ENABLE_BYTECODE_VTAB=1 +OPT_FEATURE_FLAGS = $(OPT_FEATURE_FLAGS) -DSQLITE_ENABLE_CARRAY=1 !ENDIF OPT_FEATURE_FLAGS = $(OPT_FEATURE_FLAGS) -DSQLITE_ENABLE_COLUMN_METADATA=1 !ENDIF +# Additional feature-options above and beyond what are normally used can be +# be added using OPTIONS=.... on the command-line. These values are +# appended to the OPT_FEATURE_FLAGS variable. +# +!IFDEF OPTIONS +OPT_FEATURE_FLAGS = $(OPT_FEATURE_FLAGS) $(OPTIONS) +!ENDIF + # Should the session extension be enabled? If so, add compilation options # to enable it. # @@ -382,6 +428,7 @@ OPT_FEATURE_FLAGS = $(OPT_FEATURE_FLAGS) -DSQLITE_ENABLE_PREUPDATE_HOOK=1 # Always enable math functions on Windows OPT_FEATURE_FLAGS = $(OPT_FEATURE_FLAGS) -DSQLITE_ENABLE_MATH_FUNCTIONS +OPT_FEATURE_FLAGS = $(OPT_FEATURE_FLAGS) -DSQLITE_ENABLE_PERCENTILE # Should the rbu extension be enabled? If so, add compilation options # to enable it. @@ -390,6 +437,14 @@ OPT_FEATURE_FLAGS = $(OPT_FEATURE_FLAGS) -DSQLITE_ENABLE_MATH_FUNCTIONS OPT_FEATURE_FLAGS = $(OPT_FEATURE_FLAGS) -DSQLITE_ENABLE_RBU=1 !ENDIF +# Should structured exception handling (SEH) be enabled for WAL mode in +# the core library? It is on by default. Only omit it if the +# USE_SEH=0 option is provided on the nmake command-line. +# +!IF $(USE_SEH)==0 +OPT_FEATURE_FLAGS = $(OPT_FEATURE_FLAGS) -DSQLITE_OMIT_SEH=1 +!ENDIF + # These are the "extended" SQLite compilation options used when compiling for # the Windows 10 platform. # @@ -403,6 +458,10 @@ EXT_FEATURE_FLAGS = !ENDIF !ENDIF +!IF $(SETLK_TIMEOUT)!=0 +OPT_FEATURE_FLAGS = $(OPT_FEATURE_FLAGS) -DSQLITE_ENABLE_SETLK_TIMEOUT +!ENDIF + ############################################################################### ############################### END OF OPTIONS ################################ ############################################################################### @@ -580,17 +639,17 @@ RCC = $(RC) -DSQLITE_OS_WIN=1 -I. -I$(TOP) -I$(TOP)\src $(RCOPTS) $(RCCOPTS) # !IF $(USE_STDCALL)!=0 || $(FOR_WIN10)!=0 !IF "$(PLATFORM)"=="x86" -CORE_CCONV_OPTS = -Gz -DSQLITE_CDECL=__cdecl -DSQLITE_APICALL=__stdcall -DSQLITE_CALLBACK=__stdcall -DSQLITE_SYSAPI=__stdcall -SHELL_CCONV_OPTS = -Gz -DSQLITE_CDECL=__cdecl -DSQLITE_APICALL=__stdcall -DSQLITE_CALLBACK=__stdcall -DSQLITE_SYSAPI=__stdcall +CORE_CCONV_OPTS = -Gz -guard:cf -DSQLITE_CDECL=__cdecl -DSQLITE_APICALL=__stdcall -DSQLITE_CALLBACK=__stdcall -DSQLITE_SYSAPI=__stdcall +SHELL_CCONV_OPTS = -Gz -guard:cf -DSQLITE_CDECL=__cdecl -DSQLITE_APICALL=__stdcall -DSQLITE_CALLBACK=__stdcall -DSQLITE_SYSAPI=__stdcall # <> -TEST_CCONV_OPTS = -Gz -DSQLITE_CDECL=__cdecl -DSQLITE_APICALL=__stdcall -DSQLITE_CALLBACK=__stdcall -DSQLITE_SYSAPI=__stdcall -DINCLUDE_SQLITE_TCL_H=1 -DSQLITE_TCLAPI=__cdecl +TEST_CCONV_OPTS = -Gz -guard:cf -DSQLITE_CDECL=__cdecl -DSQLITE_APICALL=__stdcall -DSQLITE_CALLBACK=__stdcall -DSQLITE_SYSAPI=__stdcall -DINCLUDE_SQLITE_TCL_H=1 -DSQLITE_TCLAPI=__cdecl # <> !ELSE !IFNDEF PLATFORM -CORE_CCONV_OPTS = -Gz -DSQLITE_CDECL=__cdecl -DSQLITE_APICALL=__stdcall -DSQLITE_CALLBACK=__stdcall -DSQLITE_SYSAPI=__stdcall -SHELL_CCONV_OPTS = -Gz -DSQLITE_CDECL=__cdecl -DSQLITE_APICALL=__stdcall -DSQLITE_CALLBACK=__stdcall -DSQLITE_SYSAPI=__stdcall +CORE_CCONV_OPTS = -Gz -guard:cf -DSQLITE_CDECL=__cdecl -DSQLITE_APICALL=__stdcall -DSQLITE_CALLBACK=__stdcall -DSQLITE_SYSAPI=__stdcall +SHELL_CCONV_OPTS = -Gz -guard:cf -DSQLITE_CDECL=__cdecl -DSQLITE_APICALL=__stdcall -DSQLITE_CALLBACK=__stdcall -DSQLITE_SYSAPI=__stdcall # <> -TEST_CCONV_OPTS = -Gz -DSQLITE_CDECL=__cdecl -DSQLITE_APICALL=__stdcall -DSQLITE_CALLBACK=__stdcall -DSQLITE_SYSAPI=__stdcall -DINCLUDE_SQLITE_TCL_H=1 -DSQLITE_TCLAPI=__cdecl +TEST_CCONV_OPTS = -Gz -guard:cf -DSQLITE_CDECL=__cdecl -DSQLITE_APICALL=__stdcall -DSQLITE_CALLBACK=__stdcall -DSQLITE_SYSAPI=__stdcall -DINCLUDE_SQLITE_TCL_H=1 -DSQLITE_TCLAPI=__cdecl # <> !ELSE CORE_CCONV_OPTS = @@ -827,7 +886,7 @@ RCC = $(RCC) -DSQLITE_ENABLE_API_ARMOR=1 !ENDIF !IF $(DEBUG)>2 -TCC = $(TCC) -DSQLITE_DEBUG=1 +TCC = $(TCC) -DSQLITE_DEBUG=1 -DSQLITE_USE_W32_FOR_CONSOLE_IO RCC = $(RCC) -DSQLITE_DEBUG=1 !IF $(DYNAMIC_SHELL)==0 TCC = $(TCC) -DSQLITE_ENABLE_WHERETRACE -DSQLITE_ENABLE_SELECTTRACE @@ -878,6 +937,13 @@ RCC = $(RCC) -DSQLITE_WIN32_MALLOC_VALIDATE=1 !ENDIF !ENDIF + +# Address sanitizer if ASAN=1 +# +!IF $(ASAN)>0 +TCC = $(TCC) /fsanitize=address +!ENDIF + # <> # The locations of the Tcl header and library files. Also, the library that # non-stubs enabled programs using Tcl must link against. These variables @@ -885,16 +951,28 @@ RCC = $(RCC) -DSQLITE_WIN32_MALLOC_VALIDATE=1 # prior to running nmake in order to match the actual installed location and # version on this machine. # -!IFNDEF TCLVERSION -TCLVERSION = 86 +!IF $(STATICALLY_LINK_TCL)!=0 +TCLSUFFIX = s !ENDIF - !IFNDEF TCLSUFFIX TCLSUFFIX = !ENDIF !IFNDEF TCLDIR -TCLDIR = $(TOP)\compat\tcl +TCLDIR = C:\Tcl +!ENDIF + +!IFNDEF TCLVERSION +!IF EXISTS("$(TCLDIR)\lib\tcl90$(TCLSUFFIX).lib") +TCLVERSION = 90 +!ELSEIF EXISTS("$(TCLDIR)\lib\tcl86$(TCLSUFFIX).lib") +TCLVERSION = 86 +!ELSEIF EXISTS("$(TCLDIR)\lib\tcl86t.lib") +TCLSUFFIX = t +TCLVERSION = 86 +!ELSE +TCLVERSION = 90 +!ENDIF !ENDIF !IFNDEF TCLINCDIR @@ -909,8 +987,24 @@ TCLLIBDIR = $(TCLDIR)\lib LIBTCL = tcl$(TCLVERSION)$(TCLSUFFIX).lib !ENDIF +!IFNDEF TCLLIBS +!IF $(STATICALLY_LINK_TCL)!=0 +TCLLIBS = /NODEFAULTLIB:libucrt.lib netapi32.lib user32.lib ucrt.lib +!ELSE +TCLLIBS = +!ENDIF +!ENDIF + !IFNDEF LIBTCLSTUB +!IF EXISTS("$(TCLLIBDIR)\tclstub$(TCLVERSION)$(TCLSUFFIX).lib") LIBTCLSTUB = tclstub$(TCLVERSION)$(TCLSUFFIX).lib +!ELSEIF EXISTS("$(TCLLIBDIR)\tclstub$(TCLSUFFIX).lib") +LIBTCLSTUB = tclstub$(TCLSUFFIX).lib +!ELSEIF EXISTS("$(TCLLIBDIR)\tclstub$(TCLVERSION).lib") +LIBTCLSTUB = tclstub$(TCLVERSION).lib +!ELSE +LIBTCLSTUB = tclstub.lib +!ENDIF !ENDIF !IFNDEF LIBTCLPATH @@ -969,12 +1063,32 @@ LIBICU = icuuc.lib icuin.lib # specific Tcl shell to use. # !IFNDEF TCLSH_CMD -!IF $(USE_TCLSH_IN_PATH)!=0 || !EXIST("$(TCLDIR)\bin\tclsh.exe") -TCLSH_CMD = tclsh -!ELSE +!IF EXISTS("$(TCLDIR)\bin\tclsh$(TCLVERSION).exe") +TCLSH_CMD = $(TCLDIR)\bin\tclsh$(TCLVERSION).exe +!ELSEIF EXISTS("$(TCLDIR)\bin\tclsh90.exe") +TCLSH_CMD = $(TCLDIR)\bin\tclsh90.exe +!ELSEIF EXISTS("$(TCLDIR)\bin\tclsh86.exe") +TCLSH_CMD = $(TCLDIR)\bin\tclsh86.exe +!ELSEIF EXISTS("$(TCLDIR)\bin\tclsh86t.exe") +TCLSH_CMD = $(TCLDIR)\bin\tclsh86t.exe +!ELSEIF EXISTS("$(TCLDIR)\bin\tclsh.exe") TCLSH_CMD = $(TCLDIR)\bin\tclsh.exe +!ELSE +TCLSH_CMD = tclsh !ENDIF !ENDIF + +# A light-weight TCLSH replacement that can be used for code generation +# but which is not adequate for testing. This is "jimsh0" by default, +# with source code in the repository. To force the whole build to use +# the full, official tclsh, add WITHOUT_JIMSH=1 to the nmake command line. +# +!IFDEF WITHOUT_JIMSH +JIM_TCLSH = $(TCLSH_CMD) +!ENDIF +!IFNDEF JIM_TCLSH +JIM_TCLSH = jimsh0.exe +!ENDIF # <> # Compiler options needed for programs that use the readline() library. @@ -1005,15 +1119,6 @@ RCC = $(RCC) -DSQLITE_THREAD_OVERRIDE_LOCK=-1 TLIBS = !ENDIF -# Flags controlling use of the in memory btree implementation -# -# SQLITE_TEMP_STORE is 0 to force temporary tables to be in a file, 1 to -# default to file, 2 to default to memory, and 3 to force temporary -# tables to always be in memory. -# -TCC = $(TCC) -DSQLITE_TEMP_STORE=1 -RCC = $(RCC) -DSQLITE_TEMP_STORE=1 - # Enable/disable loadable extensions, and other optional features # based on configuration. (-DSQLITE_OMIT*, -DSQLITE_ENABLE*). # The same set of OMIT and ENABLE flags should be passed to the @@ -1240,7 +1345,7 @@ LTLIBS = $(LTLIBS) $(LIBICU) # LIBOBJS0 = vdbe.lo parse.lo alter.lo analyze.lo attach.lo auth.lo \ backup.lo bitvec.lo btmutex.lo btree.lo build.lo \ - callback.lo complete.lo ctime.lo \ + callback.lo carray.lo complete.lo ctime.lo \ date.lo dbpage.lo dbstat.lo delete.lo \ expr.lo fault.lo fkey.lo \ fts3.lo fts3_aux.lo fts3_expr.lo fts3_hash.lo fts3_icu.lo \ @@ -1248,11 +1353,11 @@ LIBOBJS0 = vdbe.lo parse.lo alter.lo analyze.lo attach.lo auth.lo \ fts3_tokenize_vtab.lo fts3_unicode.lo fts3_unicode2.lo fts3_write.lo \ fts5.lo \ func.lo global.lo hash.lo \ - icu.lo insert.lo json1.lo legacy.lo loadext.lo \ + icu.lo insert.lo json.lo legacy.lo loadext.lo \ main.lo malloc.lo mem0.lo mem1.lo mem2.lo mem3.lo mem5.lo \ memdb.lo memjournal.lo \ mutex.lo mutex_noop.lo mutex_unix.lo mutex_w32.lo \ - notify.lo opcodes.lo os.lo os_unix.lo os_win.lo \ + notify.lo opcodes.lo os.lo os_kv.lo os_unix.lo os_win.lo \ pager.lo pcache.lo pcache1.lo pragma.lo prepare.lo printf.lo \ random.lo resolve.lo rowset.lo rtree.lo \ sqlite3session.lo select.lo sqlite3rbu.lo status.lo stmt.lo \ @@ -1292,13 +1397,9 @@ LIBRESOBJS = # Core source code files, part 1. # SRC00 = \ - $(TOP)\src\crypto.c \ + $(TOP)\src\sqlcipher.c \ $(TOP)\src\crypto_cc.c \ - $(TOP)\src\crypto_impl.c \ - $(TOP)\src\crypto_libtomcrypt.c \ - $(TOP)\src\crypto_nss.c \ $(TOP)\src\crypto_openssl.c \ - $(TOP)\src\crypto.h \ $(TOP)\src\sqlcipher.h \ $(TOP)\src\alter.c \ $(TOP)\src\analyze.c \ @@ -1310,8 +1411,9 @@ SRC00 = \ $(TOP)\src\btree.c \ $(TOP)\src\build.c \ $(TOP)\src\callback.c \ + $(TOP)\src\carray.c \ $(TOP)\src\complete.c \ - $(TOP)\src\ctime.c \ + ctime.c \ $(TOP)\src\date.c \ $(TOP)\src\dbpage.c \ $(TOP)\src\dbstat.c \ @@ -1323,6 +1425,7 @@ SRC00 = \ $(TOP)\src\global.c \ $(TOP)\src\hash.c \ $(TOP)\src\insert.c \ + $(TOP)\src\json.c \ $(TOP)\src\legacy.c \ $(TOP)\src\loadext.c \ $(TOP)\src\main.c \ @@ -1340,6 +1443,7 @@ SRC00 = \ $(TOP)\src\mutex_w32.c \ $(TOP)\src\notify.c \ $(TOP)\src\os.c \ + $(TOP)\src\os_kv.c \ $(TOP)\src\os_unix.c \ $(TOP)\src\os_win.c @@ -1408,7 +1512,7 @@ SRC04 = \ SRC05 = \ $(TOP)\src\pager.h \ $(TOP)\src\pcache.h \ - $(TOP)\src\pragma.h \ + pragma.h \ $(TOP)\src\sqlite.h.in \ $(TOP)\src\sqlite3ext.h \ $(TOP)\src\sqliteInt.h \ @@ -1419,20 +1523,6 @@ SRC05 = \ $(TOP)\src\wal.h \ $(TOP)\src\whereInt.h -# Extension source code files, part 1. -# -SRC06 = \ - $(TOP)\ext\fts1\fts1.c \ - $(TOP)\ext\fts1\fts1_hash.c \ - $(TOP)\ext\fts1\fts1_porter.c \ - $(TOP)\ext\fts1\fts1_tokenizer1.c \ - $(TOP)\ext\fts2\fts2.c \ - $(TOP)\ext\fts2\fts2_hash.c \ - $(TOP)\ext\fts2\fts2_icu.c \ - $(TOP)\ext\fts2\fts2_porter.c \ - $(TOP)\ext\fts2\fts2_tokenizer.c \ - $(TOP)\ext\fts2\fts2_tokenizer1.c - # Extension source code files, part 2. # SRC07 = \ @@ -1453,19 +1543,8 @@ SRC07 = \ $(TOP)\ext\rtree\rtree.c \ $(TOP)\ext\session\sqlite3session.c \ $(TOP)\ext\rbu\sqlite3rbu.c \ - $(TOP)\ext\misc\json1.c \ $(TOP)\ext\misc\stmt.c -# Extension header files, part 1. -# -SRC08 = \ - $(TOP)\ext\fts1\fts1.h \ - $(TOP)\ext\fts1\fts1_hash.h \ - $(TOP)\ext\fts1\fts1_tokenizer.h \ - $(TOP)\ext\fts2\fts2.h \ - $(TOP)\ext\fts2\fts2_hash.h \ - $(TOP)\ext\fts2\fts2_tokenizer.h - # Extension header files, part 2. # SRC09 = \ @@ -1506,7 +1585,7 @@ SRC12 = # All source code files. # -SRC = $(SRC00) $(SRC01) $(SRC03) $(SRC04) $(SRC05) $(SRC06) $(SRC07) $(SRC08) $(SRC09) $(SRC10) $(SRC11) $(SRC12) +SRC = $(SRC00) $(SRC01) $(SRC03) $(SRC04) $(SRC05) $(SRC07) $(SRC09) $(SRC10) $(SRC11) $(SRC12) # Source code to the test files. # @@ -1517,11 +1596,9 @@ TESTSRC = \ $(TOP)\src\test4.c \ $(TOP)\src\test5.c \ $(TOP)\src\test6.c \ - $(TOP)\src\test7.c \ $(TOP)\src\test8.c \ $(TOP)\src\test9.c \ $(TOP)\src\test_autoext.c \ - $(TOP)\src\test_async.c \ $(TOP)\src\test_backup.c \ $(TOP)\src\test_bestindex.c \ $(TOP)\src\test_blob.c \ @@ -1546,7 +1623,6 @@ TESTSRC = \ $(TOP)\src\test_quota.c \ $(TOP)\src\test_rtree.c \ $(TOP)\src\test_schema.c \ - $(TOP)\src\test_server.c \ $(TOP)\src\test_superlock.c \ $(TOP)\src\test_syscall.c \ $(TOP)\src\test_tclsh.c \ @@ -1554,13 +1630,12 @@ TESTSRC = \ $(TOP)\src\test_thread.c \ $(TOP)\src\test_vdbecov.c \ $(TOP)\src\test_vfs.c \ - $(TOP)\src\test_windirent.c \ $(TOP)\src\test_window.c \ $(TOP)\src\test_wsd.c \ $(TOP)\ext\fts3\fts3_term.c \ $(TOP)\ext\fts3\fts3_test.c \ $(TOP)\ext\rbu\test_rbu.c \ - $(TOP)\ext\session\test_session.c + $(TOP)\ext\session\test_session.c # Statically linked extensions. # @@ -1569,7 +1644,7 @@ TESTEXT = \ $(TOP)\ext\expert\test_expert.c \ $(TOP)\ext\misc\amatch.c \ $(TOP)\ext\misc\appendvfs.c \ - $(TOP)\ext\misc\carray.c \ + $(TOP)\ext\misc\basexx.c \ $(TOP)\ext\misc\cksumvfs.c \ $(TOP)\ext\misc\closure.c \ $(TOP)\ext\misc\csv.c \ @@ -1585,17 +1660,23 @@ TESTEXT = \ $(TOP)\ext\misc\mmapwarm.c \ $(TOP)\ext\misc\nextchar.c \ $(TOP)\ext\misc\normalize.c \ - $(TOP)\ext\misc\percentile.c \ $(TOP)\ext\misc\prefixes.c \ + $(TOP)\ext\misc\qpvtab.c \ + $(TOP)\ext\misc\randomjson.c \ $(TOP)\ext\misc\regexp.c \ $(TOP)\ext\misc\remember.c \ $(TOP)\ext\misc\series.c \ $(TOP)\ext\misc\spellfix.c \ + $(TOP)\ext\misc\stmtrand.c \ $(TOP)\ext\misc\totype.c \ $(TOP)\ext\misc\unionvtab.c \ $(TOP)\ext\misc\wholenumber.c \ $(TOP)\ext\rtree\test_rtreedoc.c \ - fts5.c + $(TOP)\ext\recover\sqlite3recover.c \ + $(TOP)\ext\recover\test_recover.c \ + $(TOP)\ext\intck\test_intck.c \ + $(TOP)\ext\intck\sqlite3intck.c \ + $(TOP)\ext\recover\dbdata.c # If use of zlib is enabled, add the "zipfile.c" source file. # @@ -1609,10 +1690,9 @@ TESTEXT = $(TESTEXT) $(TOP)\ext\misc\zipfile.c TESTSRC2 = \ $(SRC00) \ $(SRC01) \ - $(SRC06) \ $(SRC07) \ $(SRC10) \ - $(TOP)\ext\async\sqlite3async.c + fts5.c # Header files used by all library source files. # @@ -1632,7 +1712,7 @@ HDR = \ $(TOP)\src\pager.h \ $(TOP)\src\pcache.h \ parse.h \ - $(TOP)\src\pragma.h \ + pragma.h \ $(SQLITE3H) \ sqlite3ext.h \ $(TOP)\src\sqliteInt.h \ @@ -1644,14 +1724,6 @@ HDR = \ # Header files used by extensions # -EXTHDR = $(EXTHDR) \ - $(TOP)\ext\fts1\fts1.h \ - $(TOP)\ext\fts1\fts1_hash.h \ - $(TOP)\ext\fts1\fts1_tokenizer.h -EXTHDR = $(EXTHDR) \ - $(TOP)\ext\fts2\fts2.h \ - $(TOP)\ext\fts2\fts2_hash.h \ - $(TOP)\ext\fts2\fts2_tokenizer.h EXTHDR = $(EXTHDR) \ $(TOP)\ext\fts3\fts3.h \ $(TOP)\ext\fts3\fts3Int.h \ @@ -1695,25 +1767,72 @@ FUZZDATA = \ # when the shell is not being dynamically linked. # !IF $(DYNAMIC_SHELL)==0 && $(FOR_WIN10)==0 +SHELL_COMPILE_OPTS = $(SHELL_COMPILE_OPTS) -DSQLITE_DQS=0 SHELL_COMPILE_OPTS = $(SHELL_COMPILE_OPTS) -DSQLITE_ENABLE_FTS4=1 SHELL_COMPILE_OPTS = $(SHELL_COMPILE_OPTS) -DSQLITE_ENABLE_EXPLAIN_COMMENTS=1 SHELL_COMPILE_OPTS = $(SHELL_COMPILE_OPTS) -DSQLITE_ENABLE_OFFSET_SQL_FUNC=1 +SHELL_COMPILE_OPTS = $(SHELL_COMPILE_OPTS) -DSQLITE_ENABLE_PERCENTILE=1 +SHELL_COMPILE_OPTS = $(SHELL_COMPILE_OPTS) -DSQLITE_ENABLE_UNKNOWN_SQL_FUNCTION=1 +SHELL_COMPILE_OPTS = $(SHELL_COMPILE_OPTS) -DSQLITE_ENABLE_STMT_SCANSTATUS=1 +SHELL_COMPILE_OPTS = $(SHELL_COMPILE_OPTS) -DSQLITE_STRICT_SUBTYPE=1 !ENDIF # <> # Extra compiler options for various test tools. # -MPTESTER_COMPILE_OPTS = -DSQLITE_ENABLE_JSON1 -DSQLITE_ENABLE_FTS5 -FUZZERSHELL_COMPILE_OPTS = -DSQLITE_ENABLE_JSON1 -FUZZCHECK_OPTS = -DSQLITE_ENABLE_JSON1 -DSQLITE_ENABLE_MEMSYS5 -DSQLITE_OSS_FUZZ -DSQLITE_MAX_MEMORY=50000000 -DSQLITE_PRINTF_PRECISION_LIMIT=1000 +MPTESTER_COMPILE_OPTS = -DSQLITE_ENABLE_FTS5 +FUZZERSHELL_COMPILE_OPTS = +FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -I$(TOP)\test -I$(TOP)\ext\recover +FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_ENABLE_MEMSYS5 +FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_OSS_FUZZ +FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_ENABLE_BYTECODE_VTAB +FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_ENABLE_CARRAY +FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_ENABLE_DBPAGE_VTAB +FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_ENABLE_DBSTAT_VTAB +FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_ENABLE_BYTECODE_VTAB +FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_ENABLE_DESERIALIZE +FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_ENABLE_EXPLAIN_COMMENTS +FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_ENABLE_FTS3_PARENTHESIS +FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_ENABLE_FTS4 +FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_ENABLE_FTS5 +FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_ENABLE_GEOPOLY +FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_ENABLE_MATH_FUNCTIONS +FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_ENABLE_MEMSYS5 +FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_ENABLE_NORMALIZE +FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_ENABLE_OFFSET_SQL_FUNC +FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_ENABLE_PERCENTILE +FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_ENABLE_PREUPDATE_HOOK +FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_ENABLE_RTREE +FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_ENABLE_SESSION +FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_ENABLE_STMTVTAB +FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_ENABLE_UNKNOWN_SQL_FUNCTION +FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_ENABLE_STAT4 +FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_ENABLE_STMT_SCANSTATUS +FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_MAX_MEMORY=50000000 +FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_MAX_MMAP_SIZE=0 +FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_OMIT_LOAD_EXTENSION +FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_PRINTF_PRECISION_LIMIT=1000 +FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_PRIVATE="" +FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_STRICT_SUBTYPE=1 +FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_STATIC_RANDOMJSON + +FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_MAX_MEMORY=50000000 +FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_PRINTF_PRECISION_LIMIT=1000 +FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_OMIT_LOAD_EXTENSION FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_ENABLE_FTS4 FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_ENABLE_FTS5 FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_ENABLE_RTREE FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_ENABLE_GEOPOLY FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_ENABLE_DBSTAT_VTAB FUZZCHECK_OPTS = $(FUZZCHECK_OPTS) -DSQLITE_ENABLE_BYTECODE_VTAB +FUZZCHECK_SRC = $(FUZZCHECK_SRC) $(TOP)\test\fuzzcheck.c +FUZZCHECK_SRC = $(FUZZCHECK_SRC) $(TOP)\test\ossfuzz.c +FUZZCHECK_SRC = $(FUZZCHECK_SRC) $(TOP)\test\fuzzinvariants.c +FUZZCHECK_SRC = $(FUZZCHECK_SRC) $(TOP)\test\vt02.c +FUZZCHECK_SRC = $(FUZZCHECK_SRC) $(TOP)\ext\recover\dbdata.c +FUZZCHECK_SRC = $(FUZZCHECK_SRC) $(TOP)\ext\recover\sqlite3recover.c +FUZZCHECK_SRC = $(FUZZCHECK_SRC) $(TOP)\ext\misc\randomjson.c -FUZZCHECK_SRC = $(TOP)\test\fuzzcheck.c $(TOP)\test\ossfuzz.c OSSSHELL_SRC = $(TOP)\test\ossshell.c $(TOP)\test\ossfuzz.c DBFUZZ_COMPILE_OPTS = -DSQLITE_THREADSAFE=0 -DSQLITE_OMIT_LOAD_EXTENSION KV_COMPILE_OPTS = -DSQLITE_THREADSAFE=0 -DSQLITE_DIRECT_OVERFLOW_READ @@ -1753,6 +1872,11 @@ dll: $(SQLITE3DLL) # shell: $(SQLITE3EXE) +# jimsh0 - replacement for tclsh +# +jimsh0.exe: $(TOP)\autosetup\jimsh0.c + cl -DHAVE__FULLPATH=1 $(TOP)\autosetup\jimsh0.c + # <> libsqlite3.lib: $(LIBOBJ) $(LTLIB) $(LTLIBOPTS) /OUT:$@ $(LIBOBJ) $(TLIBS) @@ -1768,22 +1892,38 @@ tclsqlite3.def: tclsqlite.lo pkgIndex.tcl: $(TOP)\VERSION for /F %%V in ('type "$(TOP)\VERSION"') do ( \ - echo package ifneeded sqlite3 @version@ [list load [file join $$dir $(SQLITE3TCLDLL)] sqlite3] \ + echo package ifneeded sqlite3 @version@ [list load [file join $$dir $(SQLITE3TCLDLL)] Sqlite3] \ | $(TCLSH_CMD) $(TOP)\tool\replace.tcl exact @version@ %%V > pkgIndex.tcl \ ) $(SQLITE3TCLDLL): libtclsqlite3.lib $(LIBRESOBJS) tclsqlite3.def pkgIndex.tcl $(LD) $(LDFLAGS) $(LTLINKOPTS) $(LTLIBPATHS) /DLL /DEF:tclsqlite3.def /OUT:$@ libtclsqlite3.lib $(LIBRESOBJS) $(LTLIBS) $(TLIBS) + +tclextension: $(SQLITE3TCLDLL) + +tclextension-install: $(SQLITE3TCLDLL) + $(TCLSH_CMD) $(TOP)\tool\buildtclext.tcl --install-only + +tclextension-uninstall: + $(TCLSH_CMD) $(TOP)\tool\buildtclext.tcl --uninstall + +tclextension-list: + @ $(TCLSH_CMD) $(TOP)\tool\buildtclext.tcl --info + +tclextension-verify: sqlite3.h + @ $(TCLSH_CMD) $(TOP)\tool\buildtclext.tcl --version-check + + # <> $(SQLITE3DLL): $(LIBOBJ) $(LIBRESOBJS) $(CORE_LINK_DEP) $(LD) $(LDFLAGS) $(LTLINKOPTS) $(LTLIBPATHS) /DLL $(CORE_LINK_OPTS) /OUT:$@ $(LIBOBJ) $(LIBRESOBJS) $(LTLIBS) $(TLIBS) # <> -sqlite3.def: libsqlite3.lib +sqlite3.def: libsqlite3.lib $(JIM_TCLSH) echo EXPORTS > sqlite3.def dumpbin /all libsqlite3.lib \ - | $(TCLSH_CMD) $(TOP)\tool\replace.tcl include "^\s+1 _?(sqlite3(?:session|changeset|changegroup|rebaser|rbu)?_[^@]*)(?:@\d+)?$$" \1 \ + | $(JIM_TCLSH) $(TOP)\tool\replace.tcl include "^\s+1 _?(sqlite3(?:session|changeset|changegroup|rebaser|rbu)?_[^@]*)(?:@\d+)?$$" \1 \ | sort >> sqlite3.def # <> @@ -1792,12 +1932,25 @@ $(SQLITE3EXE): shell.c $(SHELL_CORE_DEP) $(LIBRESOBJS) $(SHELL_CORE_SRC) $(SQLIT /link $(SQLITE3EXEPDB) $(LDFLAGS) $(LTLINKOPTS) $(SHELL_LINK_OPTS) $(LTLIBPATHS) $(LIBRESOBJS) $(LIBREADLINE) $(LTLIBS) $(TLIBS) # <> -sqldiff.exe: $(TOP)\tool\sqldiff.c $(SQLITE3C) $(SQLITE3H) - $(LTLINK) $(NO_WARN) $(TOP)\tool\sqldiff.c $(SQLITE3C) /link $(LDFLAGS) $(LTLINKOPTS) +sqldiff.exe: $(TOP)\tool\sqldiff.c $(TOP)\ext\misc\sqlite3_stdio.h $(TOP)\ext\misc\sqlite3_stdio.c $(SQLITE3C) $(SQLITE3H) $(LIBRESOBJS) + $(LTLINK) $(NO_WARN) -I$(TOP)\ext\misc $(TOP)\tool\sqldiff.c $(TOP)\ext\misc\sqlite3_stdio.c $(SQLITE3C) /link $(LDFLAGS) $(LTLINKOPTS) $(LIBRESOBJS) dbhash.exe: $(TOP)\tool\dbhash.c $(SQLITE3C) $(SQLITE3H) $(LTLINK) $(NO_WARN) $(TOP)\tool\dbhash.c $(SQLITE3C) /link $(LDFLAGS) $(LTLINKOPTS) +RSYNC_SRC = \ + $(TOP)\tool\sqlite3_rsync.c \ + $(SQLITE3C) + +RSYNC_OPT = \ + -DSQLITE_ENABLE_DBPAGE_VTAB \ + -DSQLITE_THREADSAFE=0 \ + -DSQLITE_OMIT_LOAD_EXTENSION \ + -DSQLITE_OMIT_DEPRECATED + +sqlite3_rsync.exe: $(RSYNC_SRC) $(LIBRESOBJS) + $(LTLINK) $(RSYNC_OPT) $(NO_WARN) $(RSYNC_SRC) /link $(LDFLAGS) $(LTLINKOPTS) $(LIBRESOBJS) + scrub.exe: $(TOP)\ext\misc\scrub.c $(SQLITE3C) $(SQLITE3H) $(LTLINK) $(NO_WARN) -DSCRUB_STANDALONE=1 $(TOP)\ext\misc\scrub.c $(SQLITE3C) /link $(LDFLAGS) $(LTLINKOPTS) @@ -1807,6 +1960,12 @@ srcck1.exe: $(TOP)\tool\srcck1.c sourcetest: srcck1.exe $(SQLITE3C) srcck1.exe $(SQLITE3C) +src-verify.exe: $(TOP)\tool\src-verify.c + $(LTLINK) $(NO_WARN) $(TOP)\tool\src-verify.c + +verify-source: src-verify.exe + src-verify.exe $(TOP) + fuzzershell.exe: $(TOP)\tool\fuzzershell.c $(SQLITE3C) $(SQLITE3H) $(LTLINK) $(NO_WARN) $(FUZZERSHELL_COMPILE_OPTS) $(TOP)\tool\fuzzershell.c $(SQLITE3C) /link $(LDFLAGS) $(LTLINKOPTS) @@ -1814,7 +1973,10 @@ dbfuzz.exe: $(TOP)\test\dbfuzz.c $(SQLITE3C) $(SQLITE3H) $(LTLINK) $(NO_WARN) $(DBFUZZ_COMPILE_OPTS) $(TOP)\test\dbfuzz.c $(SQLITE3C) /link $(LDFLAGS) $(LTLINKOPTS) fuzzcheck.exe: $(FUZZCHECK_SRC) $(SQLITE3C) $(SQLITE3H) - $(LTLINK) $(NO_WARN) $(FUZZCHECK_OPTS) $(FUZZCHECK_SRC) $(SQLITE3C) /link $(LDFLAGS) $(LTLINKOPTS) + $(LTLINK) /F 8388608 $(NO_WARN) $(FUZZCHECK_OPTS) $(FUZZCHECK_SRC) $(SQLITE3C) /link $(LDFLAGS) $(LTLINKOPTS) + +fuzzcheck-asan.exe: $(FUZZCHECK_SRC) $(SQLITE3C) $(SQLITE3H) + $(LTLINK) $(NO_WARN) /fsanitize=address $(FUZZCHECK_OPTS) $(FUZZCHECK_SRC) $(SQLITE3C) /link $(LDFLAGS) $(LTLINKOPTS) ossshell.exe: $(OSSSHELL_SRC) $(SQLITE3C) $(SQLITE3H) $(LTLINK) $(NO_WARN) $(FUZZCHECK_OPTS) $(OSSSHELL_SRC) $(SQLITE3C) /link $(LDFLAGS) $(LTLINKOPTS) @@ -1845,35 +2007,22 @@ mptest: mptester.exe # files are automatically generated. This target takes care of # all that automatic generation. # -.target_source: $(SRC) $(TOP)\tool\vdbe-compress.tcl fts5.c $(SQLITE_TCL_DEP) +.target_source: $(SRC) $(TOP)\tool\vdbe-compress.tcl fts5.c $(SQLITE_TCL_DEP) $(JIM_TCLSH) -rmdir /Q/S tsrc 2>NUL -mkdir tsrc - for %i in ($(SRC00)) do copy /Y %i tsrc - for %i in ($(SRC01)) do copy /Y %i tsrc - for %i in ($(SRC03)) do copy /Y %i tsrc - for %i in ($(SRC04)) do copy /Y %i tsrc - for %i in ($(SRC05)) do copy /Y %i tsrc - for %i in ($(SRC06)) do copy /Y %i tsrc - for %i in ($(SRC07)) do copy /Y %i tsrc - for %i in ($(SRC08)) do copy /Y %i tsrc - for %i in ($(SRC09)) do copy /Y %i tsrc - for %i in ($(SRC10)) do copy /Y %i tsrc - for %i in ($(SRC11)) do copy /Y %i tsrc - for %i in ($(SRC12)) do copy /Y %i tsrc - copy /Y fts5.c tsrc + $(JIM_TCLSH) $(TOP)\tool\cp.tcl $(SRC00) $(SRC01) $(SRC03) $(SRC04) $(SRC05) $(SRC07) $(SRC09) $(SRC10) $(SRC11) $(SRC12) fts5.c fts5.h tsrc copy /B tsrc\fts5.c +,, - copy /Y fts5.h tsrc copy /B tsrc\fts5.h +,, del /Q tsrc\sqlite.h.in tsrc\parse.y 2>NUL - $(TCLSH_CMD) $(TOP)\tool\vdbe-compress.tcl $(OPTS) < tsrc\vdbe.c > vdbe.new + $(JIM_TCLSH) $(TOP)\tool\vdbe-compress.tcl $(OPTS) < tsrc\vdbe.c > vdbe.new move vdbe.new tsrc\vdbe.c echo > .target_source -sqlite3.c: .target_source sqlite3ext.h sqlite3session.h $(MKSQLITE3C_TOOL) - $(TCLSH_CMD) $(MKSQLITE3C_TOOL) $(MKSQLITE3C_ARGS) +sqlite3.c: .target_source sqlite3ext.h sqlite3session.h $(MKSQLITE3C_TOOL) src-verify.exe $(JIM_TCLSH) + $(JIM_TCLSH) $(MKSQLITE3C_TOOL) $(MKSQLITE3C_ARGS) $(EXTRA_SRC) -sqlite3-all.c: sqlite3.c $(TOP)\tool\split-sqlite3c.tcl - $(TCLSH_CMD) $(TOP)\tool\split-sqlite3c.tcl +sqlite3-all.c: sqlite3.c $(TOP)\tool\split-sqlite3c.tcl $(JIM_TCLSH) + $(JIM_TCLSH) $(TOP)\tool\split-sqlite3c.tcl # <> # Rule to build the amalgamation @@ -1915,11 +2064,11 @@ opcodes.lo: opcodes.c # !IF $(USE_RC)!=0 # <> -$(LIBRESOBJS): $(TOP)\src\sqlite3.rc $(SQLITE3H) $(TOP)\VERSION +$(LIBRESOBJS): $(TOP)\src\sqlite3.rc $(SQLITE3H) $(TOP)\VERSION $(JIM_TCLSH) echo #ifndef SQLITE_RESOURCE_VERSION > sqlite3rc.h for /F %%V in ('type "$(TOP)\VERSION"') do ( \ echo #define SQLITE_RESOURCE_VERSION %%V \ - | $(TCLSH_CMD) $(TOP)\tool\replace.tcl exact . ^, >> sqlite3rc.h \ + | $(JIM_TCLSH) $(TOP)\tool\replace.tcl exact . ^, >> sqlite3rc.h \ ) echo #endif >> sqlite3rc.h $(LTRCOMPILE) -fo $(LIBRESOBJS) $(TOP)\src\sqlite3.rc @@ -1959,11 +2108,17 @@ build.lo: $(TOP)\src\build.c $(HDR) callback.lo: $(TOP)\src\callback.c $(HDR) $(LTCOMPILE) $(CORE_COMPILE_OPTS) -c $(TOP)\src\callback.c +carray.lo: $(TOP)\src\carray.c $(HDR) + $(LTCOMPILE) $(CORE_COMPILE_OPTS) -c $(TOP)\src\carray.c + complete.lo: $(TOP)\src\complete.c $(HDR) $(LTCOMPILE) $(CORE_COMPILE_OPTS) -c $(TOP)\src\complete.c -ctime.lo: $(TOP)\src\ctime.c $(HDR) - $(LTCOMPILE) $(CORE_COMPILE_OPTS) -c $(TOP)\src\ctime.c +ctime.c: $(TOP)\tool\mkctimec.tcl $(JIM_TCLSH) + $(JIM_TCLSH) $(TOP)\tool\mkctimec.tcl + +ctime.lo: ctime.c $(HDR) + $(LTCOMPILE) $(CORE_COMPILE_OPTS) -c ctime.c date.lo: $(TOP)\src\date.c $(HDR) $(LTCOMPILE) $(CORE_COMPILE_OPTS) -c $(TOP)\src\date.c @@ -1998,6 +2153,9 @@ hash.lo: $(TOP)\src\hash.c $(HDR) insert.lo: $(TOP)\src\insert.c $(HDR) $(LTCOMPILE) $(CORE_COMPILE_OPTS) -c $(TOP)\src\insert.c +json.lo: $(TOP)\src\json.c $(HDR) + $(LTCOMPILE) $(CORE_COMPILE_OPTS) -c $(TOP)\src\json.c + legacy.lo: $(TOP)\src\legacy.c $(HDR) $(LTCOMPILE) $(CORE_COMPILE_OPTS) -c $(TOP)\src\legacy.c @@ -2058,6 +2216,9 @@ pcache1.lo: $(TOP)\src\pcache1.c $(HDR) $(TOP)\src\pcache.h os.lo: $(TOP)\src\os.c $(HDR) $(LTCOMPILE) $(CORE_COMPILE_OPTS) -c $(TOP)\src\os.c +os_kv.lo: $(TOP)\src\os_kv.c $(HDR) + $(LTCOMPILE) $(CORE_COMPILE_OPTS) -c $(TOP)\src\os_kv.c + os_unix.lo: $(TOP)\src\os_unix.c $(HDR) $(LTCOMPILE) $(CORE_COMPILE_OPTS) -c $(TOP)\src\os_unix.c @@ -2174,11 +2335,11 @@ tclsqlite3.exe: tclsqlite-shell.lo $(SQLITE3C) $(SQLITE3H) $(LIBRESOBJS) # Rules to build opcodes.c and opcodes.h # -opcodes.c: opcodes.h $(TOP)\tool\mkopcodec.tcl - $(TCLSH_CMD) $(TOP)\tool\mkopcodec.tcl opcodes.h > opcodes.c +opcodes.c: opcodes.h $(TOP)\tool\mkopcodec.tcl $(JIM_TCLSH) + $(JIM_TCLSH) $(TOP)\tool\mkopcodec.tcl opcodes.h > opcodes.c -opcodes.h: parse.h $(TOP)\src\vdbe.c $(TOP)\tool\mkopcodeh.tcl - type parse.h $(TOP)\src\vdbe.c | $(TCLSH_CMD) $(TOP)\tool\mkopcodeh.tcl > opcodes.h +opcodes.h: parse.h $(TOP)\src\vdbe.c $(TOP)\tool\mkopcodeh.tcl $(JIM_TCLSH) + type parse.h $(TOP)\src\vdbe.c | $(JIM_TCLSH) $(TOP)\tool\mkopcodeh.tcl > opcodes.h # Rules to build parse.c and parse.h - the outputs of lemon. # @@ -2190,8 +2351,11 @@ parse.c: $(TOP)\src\parse.y lemon.exe copy /B parse.y +,, .\lemon.exe $(REQ_FEATURE_FLAGS) $(OPT_FEATURE_FLAGS) $(EXT_FEATURE_FLAGS) $(OPTS) -S parse.y -$(SQLITE3H): $(TOP)\src\sqlite.h.in $(TOP)\manifest mksourceid.exe $(TOP)\VERSION - $(TCLSH_CMD) $(TOP)\tool\mksqlite3h.tcl $(TOP:\=/) > $(SQLITE3H) $(MKSQLITE3H_ARGS) +pragma.h: $(TOP)\tool\mkpragmatab.tcl $(JIM_TCLSH) + $(JIM_TCLSH) $(TOP)\tool\mkpragmatab.tcl + +$(SQLITE3H): $(TOP)\src\sqlite.h.in $(TOP)\manifest mksourceid.exe $(TOP)\VERSION $(JIM_TCLSH) + $(JIM_TCLSH) $(TOP)\tool\mksqlite3h.tcl "$(TOP:\=/)" -o $(SQLITE3H) $(MKSQLITE3H_ARGS) sqlite3ext.h: .target_source !IF $(USE_STDCALL)!=0 || $(FOR_WIN10)!=0 @@ -2214,32 +2378,46 @@ mkkeywordhash.exe: $(TOP)\tool\mkkeywordhash.c keywordhash.h: $(TOP)\tool\mkkeywordhash.c mkkeywordhash.exe .\mkkeywordhash.exe > keywordhash.h -# Source files that go into making shell.c -SHELL_SRC = \ - $(TOP)\src\shell.c.in \ - $(TOP)\ext\misc\appendvfs.c \ - $(TOP)\ext\misc\completion.c \ - $(TOP)\ext\misc\decimal.c \ - $(TOP)\ext\misc\fileio.c \ - $(TOP)\ext\misc\ieee754.c \ - $(TOP)\ext\misc\regexp.c \ - $(TOP)\ext\misc\series.c \ - $(TOP)\ext\misc\shathree.c \ - $(TOP)\ext\misc\uint.c \ - $(TOP)\ext\expert\sqlite3expert.c \ - $(TOP)\ext\expert\sqlite3expert.h \ - $(TOP)\ext\misc\memtrace.c \ - $(TOP)\src\test_windirent.c +# Source and header files that shell.c depends on +SHELL_DEP = \ + $(TOP)\src\shell.c.in \ + $(TOP)\ext\expert\sqlite3expert.c \ + $(TOP)\ext\expert\sqlite3expert.h \ + $(TOP)\ext\intck\sqlite3intck.c \ + $(TOP)\ext\intck\sqlite3intck.h \ + $(TOP)\ext\misc\appendvfs.c \ + $(TOP)\ext\misc\base64.c \ + $(TOP)\ext\misc\base85.c \ + $(TOP)\ext\misc\completion.c \ + $(TOP)\ext\misc\decimal.c \ + $(TOP)\ext\misc\fileio.c \ + $(TOP)\ext\misc\ieee754.c \ + $(TOP)\ext\misc\memtrace.c \ + $(TOP)\ext\misc\pcachetrace.c \ + $(TOP)\ext\misc\regexp.c \ + $(TOP)\ext\misc\series.c \ + $(TOP)\ext\misc\sha1.c \ + $(TOP)\ext\misc\shathree.c \ + $(TOP)\ext\misc\sqlar.c \ + $(TOP)\ext\misc\sqlite3_stdio.c \ + $(TOP)\ext\misc\sqlite3_stdio.h \ + $(TOP)\ext\misc\uint.c \ + $(TOP)\ext\misc\vfstrace.c \ + $(TOP)\ext\misc\windirent.h \ + $(TOP)\ext\misc\zipfile.c \ + $(TOP)\ext\recover\dbdata.c \ + $(TOP)\ext\recover\sqlite3recover.c \ + $(TOP)\ext\recover\sqlite3recover.h # If use of zlib is enabled, add the "zipfile.c" source file. # !IF $(USE_ZLIB)!=0 -SHELL_SRC = $(SHELL_SRC) $(TOP)\ext\misc\sqlar.c -SHELL_SRC = $(SHELL_SRC) $(TOP)\ext\misc\zipfile.c +SHELL_DEP = $(SHELL_DEP) $(TOP)\ext\misc\sqlar.c +SHELL_DEP = $(SHELL_DEP) $(TOP)\ext\misc\zipfile.c !ENDIF -shell.c: $(SHELL_SRC) $(TOP)\tool\mkshellc.tcl - $(TCLSH_CMD) $(TOP)\tool\mkshellc.tcl > shell.c +shell.c: $(SHELL_DEP) $(TOP)\tool\mkshellc.tcl $(JIM_TCLSH) + $(JIM_TCLSH) $(TOP)\tool\mkshellc.tcl shell.c zlib: pushd $(ZLIBDIR) && $(MAKE) /f win32\Makefile.msc clean $(ZLIBLIB) && popd @@ -2249,24 +2427,6 @@ zlib: icu.lo: $(TOP)\ext\icu\icu.c $(HDR) $(EXTHDR) $(LTCOMPILE) $(CORE_COMPILE_OPTS) $(NO_WARN) -DSQLITE_CORE -c $(TOP)\ext\icu\icu.c -fts2.lo: $(TOP)\ext\fts2\fts2.c $(HDR) $(EXTHDR) - $(LTCOMPILE) $(CORE_COMPILE_OPTS) $(NO_WARN) -DSQLITE_CORE -c $(TOP)\ext\fts2\fts2.c - -fts2_hash.lo: $(TOP)\ext\fts2\fts2_hash.c $(HDR) $(EXTHDR) - $(LTCOMPILE) $(CORE_COMPILE_OPTS) $(NO_WARN) -DSQLITE_CORE -c $(TOP)\ext\fts2\fts2_hash.c - -fts2_icu.lo: $(TOP)\ext\fts2\fts2_icu.c $(HDR) $(EXTHDR) - $(LTCOMPILE) $(CORE_COMPILE_OPTS) $(NO_WARN) -DSQLITE_CORE -c $(TOP)\ext\fts2\fts2_icu.c - -fts2_porter.lo: $(TOP)\ext\fts2\fts2_porter.c $(HDR) $(EXTHDR) - $(LTCOMPILE) $(CORE_COMPILE_OPTS) $(NO_WARN) -DSQLITE_CORE -c $(TOP)\ext\fts2\fts2_porter.c - -fts2_tokenizer.lo: $(TOP)\ext\fts2\fts2_tokenizer.c $(HDR) $(EXTHDR) - $(LTCOMPILE) $(CORE_COMPILE_OPTS) $(NO_WARN) -DSQLITE_CORE -c $(TOP)\ext\fts2\fts2_tokenizer.c - -fts2_tokenizer1.lo: $(TOP)\ext\fts2\fts2_tokenizer1.c $(HDR) $(EXTHDR) - $(LTCOMPILE) $(CORE_COMPILE_OPTS) $(NO_WARN) -DSQLITE_CORE -c $(TOP)\ext\fts2\fts2_tokenizer1.c - fts3.lo: $(TOP)\ext\fts3\fts3.c $(HDR) $(EXTHDR) $(LTCOMPILE) $(CORE_COMPILE_OPTS) $(NO_WARN) -DSQLITE_CORE -c $(TOP)\ext\fts3\fts3.c @@ -2306,9 +2466,6 @@ fts3_unicode2.lo: $(TOP)\ext\fts3\fts3_unicode2.c $(HDR) $(EXTHDR) fts3_write.lo: $(TOP)\ext\fts3\fts3_write.c $(HDR) $(EXTHDR) $(LTCOMPILE) $(CORE_COMPILE_OPTS) $(NO_WARN) -DSQLITE_CORE -c $(TOP)\ext\fts3\fts3_write.c -json1.lo: $(TOP)\ext\misc\json1.c $(HDR) $(EXTHDR) - $(LTCOMPILE) $(CORE_COMPILE_OPTS) $(NO_WARN) -DSQLITE_CORE -c $(TOP)\ext\misc\json1.c - stmt.lo: $(TOP)\ext\misc\stmt.c $(HDR) $(EXTHDR) $(LTCOMPILE) $(CORE_COMPILE_OPTS) $(NO_WARN) -DSQLITE_CORE -c $(TOP)\ext\misc\stmt.c @@ -2337,24 +2494,6 @@ FTS5_SRC = \ $(TOP)\ext\fts5\fts5_varint.c \ $(TOP)\ext\fts5\fts5_vocab.c -LSM1_SRC = \ - $(TOP)\ext\lsm1\lsm.h \ - $(TOP)\ext\lsm1\lsmInt.h \ - $(TOP)\ext\lsm1\lsm_ckpt.c \ - $(TOP)\ext\lsm1\lsm_file.c \ - $(TOP)\ext\lsm1\lsm_log.c \ - $(TOP)\ext\lsm1\lsm_main.c \ - $(TOP)\ext\lsm1\lsm_mem.c \ - $(TOP)\ext\lsm1\lsm_mutex.c \ - $(TOP)\ext\lsm1\lsm_shared.c \ - $(TOP)\ext\lsm1\lsm_sorted.c \ - $(TOP)\ext\lsm1\lsm_str.c \ - $(TOP)\ext\lsm1\lsm_tree.c \ - $(TOP)\ext\lsm1\lsm_unix.c \ - $(TOP)\ext\lsm1\lsm_varint.c \ - $(TOP)\ext\lsm1\lsm_vtab.c \ - $(TOP)\ext\lsm1\lsm_win32.c - fts5parse.c: $(TOP)\ext\fts5\fts5parse.y lemon.exe copy /Y $(TOP)\ext\fts5\fts5parse.y . copy /B fts5parse.y +,, @@ -2363,16 +2502,11 @@ fts5parse.c: $(TOP)\ext\fts5\fts5parse.y lemon.exe fts5parse.h: fts5parse.c -fts5.c: $(FTS5_SRC) - $(TCLSH_CMD) $(TOP)\ext\fts5\tool\mkfts5c.tcl +fts5.c: $(FTS5_SRC) $(JIM_TCLSH) + $(JIM_TCLSH) $(TOP)\ext\fts5\tool\mkfts5c.tcl copy /Y $(TOP)\ext\fts5\fts5.h . copy /B fts5.h +,, -lsm1.c: $(LSM1_SRC) - $(TCLSH_CMD) $(TOP)\ext\lsm1\tool\mklsm1c.tcl - copy /Y $(TOP)\ext\lsm1\lsm.h . - copy /B lsm.h +,, - fts5.lo: fts5.c $(HDR) $(EXTHDR) $(LTCOMPILE) $(CORE_COMPILE_OPTS) $(NO_WARN) -DSQLITE_CORE -c fts5.c @@ -2400,9 +2534,12 @@ TESTFIXTURE_FLAGS = $(TESTFIXTURE_FLAGS) -DSQLITE_DEFAULT_PAGE_SIZE=1024 TESTFIXTURE_FLAGS = $(TESTFIXTURE_FLAGS) -DSQLITE_ENABLE_STMTVTAB=1 TESTFIXTURE_FLAGS = $(TESTFIXTURE_FLAGS) -DSQLITE_ENABLE_DBPAGE_VTAB=1 TESTFIXTURE_FLAGS = $(TESTFIXTURE_FLAGS) -DSQLITE_ENABLE_BYTECODE_VTAB=1 -TESTFIXTURE_FLAGS = $(TESTFIXTURE_FLAGS) -DSQLITE_ENABLE_JSON1=1 +TESTFIXTURE_FLAGS = $(TESTFIXTURE_FLAGS) -DSQLITE_ENABLE_CARRAY=1 +TESTFIXTURE_FLAGS = $(TESTFIXTURE_FLAGS) -DSQLITE_ENABLE_PERCENTILE=1 TESTFIXTURE_FLAGS = $(TESTFIXTURE_FLAGS) -DSQLITE_CKSUMVFS_STATIC=1 TESTFIXTURE_FLAGS = $(TESTFIXTURE_FLAGS) $(TEST_CCONV_OPTS) +TESTFIXTURE_FLAGS = $(TESTFIXTURE_FLAGS) -DSQLITE_STATIC_RANDOMJSON +TESTFIXTURE_FLAGS = $(TESTFIXTURE_FLAGS) -DSQLITE_STRICT_SUBTYPE=1 TESTFIXTURE_SRC0 = $(TESTEXT) $(TESTSRC2) TESTFIXTURE_SRC1 = $(TESTEXT) $(SQLITE3C) @@ -2434,17 +2571,28 @@ sqlite_tcl.h: testfixture.exe: $(TESTFIXTURE_SRC) $(TESTFIXTURE_DEP) $(SQLITE3H) $(LIBRESOBJS) $(HDR) $(SQLITE_TCL_DEP) $(LTLINK) -DSQLITE_NO_SYNC=1 $(TESTFIXTURE_FLAGS) \ - -DBUILD_sqlite -I$(TCLINCDIR) \ + -DBUILD_sqlite -I$(TCLINCDIR) -I$(TOP)\ext\misc \ $(TESTFIXTURE_SRC) \ /link $(LDFLAGS) $(LTLINKOPTS) $(TCLLIBPATHS) $(LTLIBPATHS) $(LIBRESOBJS) $(TCLLIBS) $(LTLIBS) $(TLIBS) +# A small helper for manually running individual tests +tf.bat: testfixture.exe Makefile.msc + echo @set PATH=$(LIBTCLPATH);%PATH% > $@ + echo .\testfixture.exe %* >> $@ + extensiontest: testfixture.exe testloadext.dll @set PATH=$(LIBTCLPATH);$(PATH) .\testfixture.exe $(TOP)\test\loadext.test $(TESTOPTS) -coretestprogs: $(TESTPROGS) +tool-zip: testfixture.exe sqlite3.exe sqldiff.exe sqlite3_analyzer.exe sqlite3_rsync.exe sqlite3.dll $(TOP)\tool\mktoolzip.tcl + .\testfixture.exe $(TOP)\tool\mktoolzip.tcl + +snapshot-zip: testfixture.exe sqlite3.exe sqldiff.exe sqlite3_analyzer.exe sqlite3_rsync.exe sqlite3.dll $(TOP)\tool\mktoolzip.tcl + .\testfixture.exe $(TOP)\tool\mktoolzip.tcl --snapshot + +coretestprogs: testfixture.exe sqlite3.exe -testprogs: coretestprogs srcck1.exe fuzzcheck.exe sessionfuzz.exe +testprogs: $(TESTPROGS) srcck1.exe fuzzcheck.exe sessionfuzz.exe fulltest: alltest fuzztest @@ -2467,6 +2615,13 @@ queryplantest: testfixture.exe shell fuzztest: fuzzcheck.exe .\fuzzcheck.exe $(FUZZDATA) +# Legacy testing target for third-party integrators. The SQLite +# developers seldom use this target themselves. Instead +# they use "nmake /f Makefile.msc devtest" which runs tests on +# a standard set of options +# +test: $(TESTPROGS) sourcetest fuzztest tcltest + # Minimal testing that runs in less than 3 minutes (on a fast machine) # quicktest: testfixture.exe sourcetest @@ -2476,7 +2631,6 @@ quicktest: testfixture.exe sourcetest # This is the common case. Run many tests that do not take too long, # including fuzzcheck, sqlite3_analyzer, and sqldiff tests. # -test: $(TESTPROGS) sourcetest fuzztest tcltest # The veryquick.test TCL tests. # @@ -2484,6 +2638,42 @@ tcltest: testfixture.exe @set PATH=$(LIBTCLPATH);$(PATH) .\testfixture.exe $(TOP)\test\veryquick.test $(TESTOPTS) +# Runs all the same tests cases as the "tcltest" target but uses +# the testrunner.tcl script to run them in multiple cores +# concurrently. +testrunner: testfixture.exe + .\testfixture.exe $(TOP)\test\testrunner.tcl + +# This is the testing target preferred by the core SQLite developers. +# It runs tests under a standard configuration. The devs run +# "nmake /f Makefile.msc devtest" prior to each check-in, at a minimum. +# Probably other tests too, but at least this one. +# +devtest: srctree-check sourcetest + $(TCLSH_CMD) $(TOP)\test\testrunner.tcl mdevtest + +mdevtest: + $(TCLSH_CMD) $(TOP)\test\testrunner.tcl mdevtest + +# Validate that various generated files in the source tree +# are up-to-date. +# +srctree-check: $(TOP)\tool\srctree-check.tcl + $(TCLSH_CMD) $(TOP)\tool\srctree-check.tcl + +# Testing for a release +# +releasetest: verify-source + $(TCLSH_CMD) $(TOP)\test\testrunner.tcl release + +# xdevtest is like releasetest, except that it skips the +# dependency on verify-source so that xdevtest can be run from +# a modified source tree. +# +xdevtest: + $(TCLSH_CMD) $(TOP)\test\testrunner.tcl release + + smoketest: $(TESTPROGS) @set PATH=$(LIBTCLPATH);$(PATH) .\testfixture.exe $(TOP)\test\main.test $(TESTOPTS) @@ -2491,8 +2681,8 @@ smoketest: $(TESTPROGS) shelltest: $(TESTPROGS) .\testfixture.exe $(TOP)\test\permutations.test shell -sqlite3_analyzer.c: $(SQLITE3C) $(SQLITE3H) $(TOP)\src\tclsqlite.c $(TOP)\tool\spaceanal.tcl $(TOP)\tool\mkccode.tcl $(TOP)\tool\sqlite3_analyzer.c.in $(SQLITE_TCL_DEP) - $(TCLSH_CMD) $(TOP)\tool\mkccode.tcl $(TOP)\tool\sqlite3_analyzer.c.in > $@ +sqlite3_analyzer.c: $(SQLITE3C) $(SQLITE3H) $(TOP)\src\tclsqlite.c $(TOP)\tool\spaceanal.tcl $(TOP)\tool\mkccode.tcl $(TOP)\tool\sqlite3_analyzer.c.in $(TOP)\ext\misc\sqlite3_stdio.h $(TOP)\ext\misc\sqlite3_stdio.c $(SQLITE_TCL_DEP) + $(TCLSH_CMD) $(TOP)\tool\mkccode.tcl -DINCLUDE_SQLITE3_C $(TOP)\tool\sqlite3_analyzer.c.in > $@ sqlite3_analyzer.exe: sqlite3_analyzer.c $(LIBRESOBJS) $(LTLINK) $(NO_WARN) -DBUILD_sqlite -I$(TCLINCDIR) sqlite3_analyzer.c \ @@ -2509,14 +2699,14 @@ sqlite3_expert.exe: $(SQLITE3C) $(TOP)\ext\expert\sqlite3expert.h $(TOP)\ext\exp $(LTLINK) $(NO_WARN) $(TOP)\ext\expert\sqlite3expert.c $(TOP)\ext\expert\expert.c $(SQLITE3C) $(TLIBS) CHECKER_DEPS =\ - $(TOP)/tool/mkccode.tcl \ + $(TOP)\tool\mkccode.tcl \ sqlite3.c \ - $(TOP)/src/tclsqlite.c \ - $(TOP)/ext/repair/sqlite3_checker.tcl \ - $(TOP)/ext/repair/checkindex.c \ - $(TOP)/ext/repair/checkfreelist.c \ - $(TOP)/ext/misc/btreeinfo.c \ - $(TOP)/ext/repair/sqlite3_checker.c.in + $(TOP)\src\tclsqlite.c \ + $(TOP)\ext\repair\sqlite3_checker.tcl \ + $(TOP)\ext\repair\checkindex.c \ + $(TOP)\ext\repair\checkfreelist.c \ + $(TOP)\ext\misc\btreeinfo.c \ + $(TOP)\ext\repair\sqlite3_checker.c.in sqlite3_checker.c: $(CHECKER_DEPS) $(TCLSH_CMD) $(TOP)\tool\mkccode.tcl $(TOP)\ext\repair\sqlite3_checker.c.in > $@ @@ -2613,8 +2803,17 @@ THREADTEST3_SRC = \ threadtest3.exe: $(THREADTEST3_SRC) $(TOP)\src\test_multiplex.c $(SQLITE3C) $(SQLITE3H) $(LTLINK) $(NO_WARN) $(TOP)\test\threadtest3.c $(TOP)\src\test_multiplex.c $(SQLITE3C) /link $(LDFLAGS) $(LTLINKOPTS) -LSMDIR=$(TOP)\ext\lsm1 -!INCLUDE $(LSMDIR)\Makefile.msc +# Display key variables that control which version of TCL is to be used. +# +tcl-env: + @echo TCLDIR = $(TCLDIR) + @echo TCLVERSION = $(TCLVERSION) + @echo TCLSUFFIX = $(TCLSUFFIX) + @echo LIBTCL = $(LIBTCL) + @echo LIBTCLSTUB = $(LIBTCLSTUB) + @echo TCLSH_CMD = $(TCLSH_CMD) + @echo JIM_TCLSH = $(JIM_TCLSH) + @echo VISUALSTUDIOVERSION = $(VISUALSTUDIOVERSION) moreclean: clean del /Q $(SQLITE3C) $(SQLITE3H) 2>NUL @@ -2622,7 +2821,8 @@ moreclean: clean clean: del /Q *.exp *.lo *.ilk *.lib *.obj *.ncb *.pdb *.sdf *.suo 2>NUL - del /Q *.bsc *.def *.cod *.da *.bb *.bbg *.vc gmon.out 2>NUL + del /Q *.bsc *.cod *.da *.bb *.bbg *.vc gmon.out 2>NUL + del /Q sqlite3.def tclsqlite3.def ctime.c pragma.h 2>NUL del /Q $(SQLITE3EXE) $(SQLITE3DLL) Replace.exe 2>NUL # <> del /Q $(SQLITE3TCLDLL) pkgIndex.tcl 2>NUL @@ -2638,7 +2838,9 @@ clean: del /Q lsm.dll lsmtest.exe 2>NUL del /Q atrc.exe changesetfuzz.exe dbtotxt.exe index_usage.exe 2>NUL del /Q testloadext.dll 2>NUL - del /Q testfixture.exe test.db 2>NUL + del /Q testfixture.exe test.db tf.bat 2>NUL + del /Q /S testdir 2>/NUL + -rmdir /Q /S testdir 2>NUL del /Q LogEst.exe fts3view.exe rollback-test.exe showdb.exe dbdump.exe 2>NUL del /Q changeset.exe 2>NUL del /Q showjournal.exe showstat4.exe showwal.exe speedtest1.exe 2>NUL @@ -2646,7 +2848,7 @@ clean: del /Q sqlite3.c sqlite3-*.c sqlite3.h 2>NUL del /Q sqlite3rc.h 2>NUL del /Q shell.c sqlite3ext.h sqlite3session.h 2>NUL - del /Q sqlite3_analyzer.exe sqlite3_analyzer.c 2>NUL + del /Q sqlite3_analyzer.exe sqlite3_analyzer.c sqlite3_rsync.exe 2>NUL del /Q sqlite-*-output.vsix 2>NUL del /Q fuzzershell.exe fuzzcheck.exe sqldiff.exe dbhash.exe 2>NUL del /Q sqltclsh.* 2>NUL @@ -2654,5 +2856,6 @@ clean: del /Q kvtest.exe ossshell.exe scrub.exe 2>NUL del /Q showshm.exe sqlite3_checker.* sqlite3_expert.exe 2>NUL del /Q fts5.* fts5parse.* 2>NUL - del /Q lsm.h lsm1.c 2>NUL + del /q src-verify.exe 2>NUL + del /q jimsh.exe jimsh0.exe 2>NUL # <> diff --git a/README.md b/README.md index daff9d9954..b18f572325 100644 --- a/README.md +++ b/README.md @@ -17,8 +17,7 @@ SQLCipher is maintained by Zetetic, LLC, and additional information and document - 100% of data in the database file is encrypted - Good security practices (CBC mode, HMAC, key derivation) - Zero-configuration and application level cryptography -- Algorithms provided by the peer reviewed OpenSSL crypto library. -- Configurable crypto providers +- Support for multiple cryptographic providers ## Compatibility @@ -32,26 +31,18 @@ The SQLCipher team welcomes contributions to the core library. All contributions ## Compiling -Building SQLCipher is similar to compiling a regular version of SQLite from source, with a couple of small exceptions: +Building SQLCipher is similar to compiling a regular version of SQLite from source, with a few small exceptions. You must: - 1. You *must* define `SQLITE_HAS_CODEC` and either `SQLITE_TEMP_STORE=2` or `SQLITE_TEMP_STORE=3` - 2. You will need to link against a support cryptographic provider (OpenSSL, LibTomCrypt, CommonCrypto/Security.framework, or NSS) + 1. define `SQLITE_HAS_CODEC` + 2. define `SQLITE_TEMP_STORE=2` or `SQLITE_TEMP_STORE=3` (or use `configure`'s --with-tempstore=yes option) + 3. define `SQLITE_EXTRA_INIT=sqlcipher_extra_init` and `SQLITE_EXTRA_SHUTDOWN=sqlcipher_extra_shutdown` + 4. define `SQLITE_THREADSAFE` to `1` or `2` (enabled automatically by `configure`) + 2. compile and link with a supported cryptographic provider (OpenSSL, LibTomCrypt, CommonCrypto/Security.framework, or NSS) -The following examples demonstrate linking against OpenSSL, which is a readily available provider on most Unix-like systems. +The following examples demonstrate use of OpenSSL, which is a readily available provider on most Unix-like systems. Note that, in this example, `--with-tempstore=yes` is setting `SQLITE_TEMP_STORE=2` for the build, and `SQLITE_THREADSAFE` has a default value of `1`. -Example 1. Static linking (replace /opt/local/lib with the path to libcrypto.a). Note in this -example, `--enable-tempstore=yes` is setting `SQLITE_TEMP_STORE=2` for the build. - -``` -$ ./configure --enable-tempstore=yes CFLAGS="-DSQLITE_HAS_CODEC" \ - LDFLAGS="/opt/local/lib/libcrypto.a" -$ make ``` - -Example 2. Dynamic linking - -``` -$ ./configure --enable-tempstore=yes CFLAGS="-DSQLITE_HAS_CODEC" \ +$ ./configure --with-tempstore=yes CFLAGS="-DSQLITE_HAS_CODEC -DSQLITE_EXTRA_INIT=sqlcipher_extra_init -DSQLITE_EXTRA_SHUTDOWN=sqlcipher_extra_shutdown" \ LDFLAGS="-lcrypto" $ make ``` @@ -65,7 +56,7 @@ As a result, the SQLCipher package includes it's own independent tests that exer To run SQLCipher specific tests, configure as described here and run the following to execute the tests and receive a report of the results: ``` -$ ./configure --enable-tempstore=yes --enable-fts5 CFLAGS="-DSQLITE_HAS_CODEC -DSQLCIPHER_TEST" \ +$ ./configure --with-tempstore=yes --enable-fts5 CFLAGS="-DSQLITE_HAS_CODEC -DSQLITE_EXTRA_INIT=sqlcipher_extra_init -DSQLITE_EXTRA_SHUTDOWN=sqlcipher_extra_shutdown -DSQLCIPHER_TEST" \ LDFLAGS="-lcrypto" $ make testfixture $ ./testfixture test/sqlcipher.test @@ -133,7 +124,7 @@ support@zetetic.net! ## Community Edition Open Source License -Copyright (c) 2020, ZETETIC LLC +Copyright (c) 2025, ZETETIC LLC All rights reserved. Redistribution and use in source and binary forms, with or without @@ -162,22 +153,27 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

SQLite Source Repository

-This repository contains the complete source code for the -[SQLite database engine](https://sqlite.org/). Some test scripts -are also included. However, many other test scripts -and most of the documentation are managed separately. +This repository contains the complete source code for the +[SQLite database engine](https://sqlite.org/), including +many tests. Additional tests and most documentation +are managed separately. + +See the [on-line documentation](https://sqlite.org/) for more information +about what SQLite is and how it works from a user's perspective. This +README file is about the source code that goes into building SQLite, +not about how SQLite is used. ## Version Control -SQLite sources are managed using the -[Fossil](https://www.fossil-scm.org/), a distributed version control system +SQLite sources are managed using +[Fossil](https://fossil-scm.org/), a distributed version control system that was specifically designed and written to support SQLite development. The [Fossil repository](https://sqlite.org/src/timeline) contains the urtext. If you are reading this on GitHub or some other Git repository or service, then you are looking at a mirror. The names of check-ins and other artifacts in a Git mirror are different from the official -names for those objects. The offical names for check-ins are +names for those objects. The official names for check-ins are found in a footer on the check-in comment for authorized mirrors. The official check-in name can also be seen in the `manifest.uuid` file in the root of the tree. Always use the official name, not the @@ -187,46 +183,62 @@ If you pulled your SQLite source code from a secondary source and want to verify its integrity, there are hints on how to do that in the [Verifying Code Authenticity](#vauth) section below. -## Obtaining The Code +## Contacting The SQLite Developers + +The preferred way to ask questions or make comments about SQLite or to +report bugs against SQLite is to visit the +[SQLite Forum](https://sqlite.org/forum) at . +Anonymous postings are permitted. + +If you think you have found a bug that has security implications and +you do not want to report it on the public forum, you can send a private +email to drh at sqlite dot org. + +## Public Domain + +The SQLite source code is in the public domain. See + for details. + +Because SQLite is in the public domain, we do not normally accept pull +requests, because if we did take a pull request, the changes in that +pull request might carry a copyright and the SQLite source code would +then no longer be fully in the public domain. + +## Obtaining The SQLite Source Code -If you do not want to use Fossil, you can download tarballs or ZIP -archives or [SQLite archives](https://sqlite.org/cli.html#sqlar) as follows: +Source code tarballs or ZIP archives are available at: - * Lastest trunk check-in as - [Tarball](https://www.sqlite.org/src/tarball/sqlite.tar.gz), - [ZIP-archive](https://www.sqlite.org/src/zip/sqlite.zip), or - [SQLite-archive](https://www.sqlite.org/src/sqlar/sqlite.sqlar). + * [Latest trunk check-in](https://sqlite.org/src/rchvdwnld/trunk). - * Latest release as - [Tarball](https://www.sqlite.org/src/tarball/sqlite.tar.gz?r=release), - [ZIP-archive](https://www.sqlite.org/src/zip/sqlite.zip?r=release), or - [SQLite-archive](https://www.sqlite.org/src/sqlar/sqlite.sqlar?r=release). + * [Latest release](https://sqlite.org/src/rchvdwnld/release) - * For other check-ins, substitute an appropriate branch name or - tag or hash prefix in place of "release" in the URLs of the previous - bullet. Or browse the [timeline](https://www.sqlite.org/src/timeline) - to locate the check-in desired, click on its information page link, - then click on the "Tarball" or "ZIP Archive" links on the information - page. + * For other check-ins, browse the + [project timeline](https://sqlite.org/src/timeline?y=ci) and + click on the check-in hash of the check-in you want to download. + On the resulting "info" page, click one of the options to the + right of the "**Downloads:**" label in the "**Overview**" section + near the top. -If you do want to use Fossil to check out the source tree, +To access sources directly using [Fossil](https://fossil-scm.org/home), first install Fossil version 2.0 or later. -(Source tarballs and precompiled binaries available -[here](https://www.fossil-scm.org/fossil/uv/download.html). Fossil is -a stand-alone program. To install, simply download or build the single -executable file and put that file someplace on your $PATH.) +Source tarballs and precompiled binaries for Fossil are available at +. Fossil is +a stand-alone program. To install, simply download or build the single +executable file and put that file someplace on your $PATH or %PATH%. Then run commands like this: - mkdir -p ~/sqlite ~/Fossils + mkdir -p ~/sqlite cd ~/sqlite - fossil clone https://www.sqlite.org/src ~/Fossils/sqlite.fossil - fossil open ~/Fossils/sqlite.fossil + fossil open https://sqlite.org/src -After setting up a repository using the steps above, you can always -update to the lastest version using: +The initial "fossil open" command will take two or three minutes. Afterwards, +you can do fast, bandwidth-efficient updates to the whatever versions +of SQLite you like. Some examples: - fossil update trunk ;# latest trunk check-in - fossil update release ;# latest official release + fossil update trunk ;# latest trunk check-in + fossil update release ;# latest official release + fossil update trunk:2024-01-01 ;# First trunk check-in after 2024-01-01 + fossil update version-3.39.0 ;# Version 3.39.0 Or type "fossil ui" to get a web-based user interface. @@ -240,15 +252,42 @@ script found at the root of the source tree. Then run "make". For example: - tar xzf sqlite.tar.gz ;# Unpack the source tree into "sqlite" - mkdir bld ;# Build will occur in a sibling directory - cd bld ;# Change to the build directory - ../sqlite/configure ;# Run the configure script - make ;# Run the makefile. - make sqlite3.c ;# Build the "amalgamation" source file - make test ;# Run some tests (requires Tcl) + apt install gcc make tcl-dev ;# Make sure you have all the necessary build tools + tar xzf sqlite.tar.gz ;# Unpack the source tree into "sqlite" + mkdir bld ;# Build will occur in a sibling directory + cd bld ;# Change to the build directory + ../sqlite/configure ;# Run the configure script + make sqlite3 ;# Builds the "sqlite3" command-line tool + make sqlite3.c ;# Build the "amalgamation" source file + make sqldiff ;# Builds the "sqldiff" command-line tool + # Makefile targets below this point require tcl-dev + make tclextension-install ;# Build and install the SQLite TCL extension + make devtest ;# Run development tests + make releasetest ;# Run full release tests + make sqlite3_analyzer ;# Builds the "sqlite3_analyzer" tool -See the makefile for additional targets. +See the makefile for additional targets. For debugging builds, the +core developers typically run "configure" with options like this: + + ../sqlite/configure --enable-all --enable-debug CFLAGS='-O0 -g' + +For release builds, the core developers usually do: + + ../sqlite/configure --enable-all + +Almost all makefile targets require a "tclsh" TCL interpreter version 8.6 or +later. The "tclextension-install" target and the test targets that follow +all require TCL development libraries too. ("apt install tcl-dev"). It is +helpful, but is not required, to install the SQLite TCL extension (the +"tclextension-install" target) prior to running tests. The "releasetest" +target has additional requirements, such as "valgrind". + +On "make" command-lines, one can add "OPTIONS=..." to specify additional +compile-time options over and above those set by ./configure. For example, +to compile with the SQLITE_OMIT_DEPRECATED compile-time option, one could say: + + ./configure --enable-all + make OPTIONS=-DSQLITE_OMIT_DEPRECATED sqlite3 The configure script uses autoconf 2.61 and libtool. If the configure script does not work out for you, there is a generic makefile named @@ -256,60 +295,86 @@ script does not work out for you, there is a generic makefile named can copy and edit to suit your needs. Comments on the generic makefile show what changes are needed. -## Using MSVC for Windows systems - -On Windows, all applicable build products can be compiled with MSVC. -First open the command prompt window associated with the desired compiler -version (e.g. "Developer Command Prompt for VS2013"). Next, use NMAKE -with the provided "Makefile.msc" to build one of the supported targets. - -For example, from the parent directory of the source subtree named "sqlite": - - mkdir bld - cd bld - nmake /f ..\sqlite\Makefile.msc TOP=..\sqlite - nmake /f ..\sqlite\Makefile.msc sqlite3.c TOP=..\sqlite - nmake /f ..\sqlite\Makefile.msc sqlite3.dll TOP=..\sqlite - nmake /f ..\sqlite\Makefile.msc sqlite3.exe TOP=..\sqlite - nmake /f ..\sqlite\Makefile.msc test TOP=..\sqlite - -There are several build options that can be set via the NMAKE command -line. For example, to build for WinRT, simply add "FOR_WINRT=1" argument -to the "sqlite3.dll" command line above. When debugging into the SQLite -code, adding the "DEBUG=1" argument to one of the above command lines is -recommended. - -SQLite does not require [Tcl](http://www.tcl.tk/) to run, but a Tcl installation -is required by the makefiles (including those for MSVC). SQLite contains -a lot of generated code and Tcl is used to do much of that code generation. - -## Source Code Tour - -Most of the core source files are in the **src/** subdirectory. The -**src/** folder also contains files used to build the "testfixture" test -harness. The names of the source files used by "testfixture" all begin -with "test". -The **src/** also contains the "shell.c" file -which is the main program for the "sqlite3.exe" -[command-line shell](https://sqlite.org/cli.html) and -the "tclsqlite.c" file which implements the -[Tcl bindings](https://sqlite.org/tclsqlite.html) for SQLite. -(Historical note: SQLite began as a Tcl -extension and only later escaped to the wild as an independent library.) - -Test scripts and programs are found in the **test/** subdirectory. -Addtional test code is found in other source repositories. -See [How SQLite Is Tested](http://www.sqlite.org/testing.html) for -additional information. - -The **ext/** subdirectory contains code for extensions. The -Full-text search engine is in **ext/fts3**. The R-Tree engine is in -**ext/rtree**. The **ext/misc** subdirectory contains a number of -smaller, single-file extensions, such as a REGEXP operator. - -The **tool/** subdirectory contains various scripts and programs used -for building generated source code files or for testing or for generating -accessory programs such as "sqlite3_analyzer(.exe)". +## Compiling for Windows Using MSVC + +On Windows, everything can be compiled with MSVC. +You will also need a working installation of TCL if you want to run tests. +TCL is not required if you just want to build SQLite itself. +See the [compile-for-windows.md](doc/compile-for-windows.md) document for +additional information about how to install MSVC and TCL and configure your +build environment. + +If you want to run tests, you need to let SQLite know the location of your +TCL library, using a command like this: + + set TCLDIR=c:\Tcl + +SQLite uses "tclsh.exe" as part of the build process, and so that +program will need to be somewhere on your %PATH%. SQLite itself +does not contain any TCL code, but it does use TCL to run tests. +You may need to install TCL development +libraries in order to successfully complete some makefile targets. +It is helpful, but is not required, to install the SQLite TCL extension +(the "tclextension-install" target) prior to running tests. + +Build using Makefile.msc. Example: + + nmake /f Makefile.msc sqlite3.exe + nmake /f Makefile.msc sqlite3.c + nmake /f Makefile.msc sqldiff.exe + # Makefile targets below this point require TCL development libraries + nmake /f Makefile.msc tclextension-install + nmake /f Makefile.msc devtest + nmake /f Makefile.msc releasetest + nmake /f Makefile.msc sqlite3_analyzer.exe + +There are many other makefile targets. See comments in Makefile.msc for +details. + +As with the unix Makefile, the OPTIONS=... argument can be passed on the nmake +command-line to enable new compile-time options. For example: + + nmake /f Makefile.msc OPTIONS=-DSQLITE_OMIT_DEPRECATED sqlite3.exe + +## Source Tree Map + + * **src/** - This directory contains the primary source code for the + SQLite core. For historical reasons, C-code used for testing is + also found here. Source files intended for testing begin with "`test`". + The `tclsqlite3.c` and `tclsqlite3.h` files are the TCL interface + for SQLite and are also not part of the core. + + * **test/** - This directory and its subdirectories contains code used + for testing. Files that end in "`.test`" are TCL scripts that run + tests using an augmented TCL interpreter named "testfixture". Use + a command like "`make testfixture`" (unix) or + "`nmake /f Makefile.msc testfixture.exe`" (windows) to build that + augmented TCL interpreter, then run individual tests using commands like + "`testfixture test/main.test`". This test/ subdirectory also contains + additional C code modules and scripts for other kinds of testing. + + * **tool/** - This directory contains programs and scripts used to + build some of the machine-generated code that goes into the SQLite + core, as well as to build and run tests and perform diagnostics. + The source code to [the Lemon parser generator](./doc/lemon.html) is + found here. There are also TCL scripts used to build and/or transform + source code files. For example, the tool/mksqlite3h.tcl script reads + the src/sqlite.h.in file and uses it as a template to construct + the deliverable "sqlite3.h" file that defines the SQLite interface. + + * **ext/** - Various extensions to SQLite are found under this + directory. For example, the FTS5 subsystem is in "ext/fts5/". + Some of these extensions (ex: FTS3/4, FTS5, RTREE) might get built + into the SQLite amalgamation, but not all of them. The + "ext/misc/" subdirectory contains an assortment of one-file extensions, + many of which are omitted from the SQLite core, but which are included + in the [SQLite CLI](https://sqlite.org/cli.html). + + * **doc/** - Some documentation files about SQLite internals are found + here. Note, however, that the primary documentation designed for + application developers and users of SQLite is in a completely separate + repository. Note also that the primary API documentation is derived + from specially constructed comments in the src/sqlite.h.in file. ### Generated Source Code Files @@ -323,7 +388,7 @@ manually-edited files and automatically-generated files. The SQLite interface is defined by the **sqlite3.h** header file, which is generated from src/sqlite.h.in, ./manifest.uuid, and ./VERSION. The -[Tcl script](http://www.tcl.tk) at tool/mksqlite3h.tcl does the conversion. +[Tcl script](https://www.tcl.tk) at tool/mksqlite3h.tcl does the conversion. The manifest.uuid file contains the SHA3 hash of the particular check-in and is used to generate the SQLITE\_SOURCE\_ID macro. The VERSION file contains the current SQLite version number. The sqlite3.h header is really @@ -332,7 +397,7 @@ at just the right spots. Note that comment text in the sqlite3.h file is used to generate much of the SQLite API documentation. The Tcl scripts used to generate that documentation are in a separate source repository. -The SQL language parser is **parse.c** which is generate from a grammar in +The SQL language parser is **parse.c** which is generated from a grammar in the src/parse.y file. The conversion of "parse.y" into "parse.c" is done by the [lemon](./doc/lemon.html) LALR(1) parser generator. The source code for lemon is at tool/lemon.c. Lemon uses the tool/lempar.c file as a @@ -342,7 +407,7 @@ generates parse.c. The **opcodes.h** header file contains macros that define the numbers corresponding to opcodes in the "VDBE" virtual machine. The opcodes.h -file is generated by the scanning the src/vdbe.c source file. The +file is generated by scanning the src/vdbe.c source file. The Tcl script at ./mkopcodeh.tcl does this scan and generates opcodes.h. A second Tcl script, ./mkopcodec.tcl, then scans opcodes.h to generate the **opcodes.c** source file, which contains a reverse mapping from @@ -390,33 +455,39 @@ individual source file exceeds 32K lines in length. ## How It All Fits Together SQLite is modular in design. -See the [architectural description](http://www.sqlite.org/arch.html) +See the [architectural description](https://sqlite.org/arch.html) for details. Other documents that are useful in -(helping to understand how SQLite works include the -[file format](http://www.sqlite.org/fileformat2.html) description, -the [virtual machine](http://www.sqlite.org/opcode.html) that runs +helping to understand how SQLite works include the +[file format](https://sqlite.org/fileformat2.html) description, +the [virtual machine](https://sqlite.org/opcode.html) that runs prepared statements, the description of -[how transactions work](http://www.sqlite.org/atomiccommit.html), and -the [overview of the query planner](http://www.sqlite.org/optoverview.html). +[how transactions work](https://sqlite.org/atomiccommit.html), and +the [overview of the query planner](https://sqlite.org/optoverview.html). -Years of effort have gone into optimizating SQLite, both +Decades of effort have gone into optimizing SQLite, both for small size and high performance. And optimizations tend to result in complex code. So there is a lot of complexity in the current SQLite implementation. It will not be the easiest library in the world to hack. -Key files: +### Key source code files * **sqlite.h.in** - This file defines the public interface to the SQLite library. Readers will need to be familiar with this interface before - trying to understand how the library works internally. + trying to understand how the library works internally. This file is + really a template that is transformed into the "sqlite3.h" deliverable + using a script invoked by the makefile. * **sqliteInt.h** - this header file defines many of the data objects used internally by SQLite. In addition to "sqliteInt.h", some - subsystems have their own header files. + subsystems inside of sQLite have their own header files. These internal + interfaces are not for use by applications. They can and do change + from one release of SQLite to the next. * **parse.y** - This file describes the LALR(1) grammar that SQLite uses to parse SQL statements, and the actions that are taken at each step - in the parsing process. + in the parsing process. The file is processed by the + [Lemon Parser Generator](./doc/lemon.html) to produce the actual C code + used for parsing. * **vdbe.c** - This file implements the virtual machine that runs prepared statements. There are various helper files whose names @@ -452,15 +523,17 @@ Key files: is not part of the core SQLite library. But as most of the tests in this repository are written in Tcl, the Tcl language bindings are important. - * **test*.c** - Files in the src/ folder that begin with "test" go into + * **test\*.c** - Files in the src/ folder that begin with "test" go into building the "testfixture.exe" program. The testfixture.exe program is an enhanced Tcl shell. The testfixture.exe program runs scripts in the test/ folder to validate the core SQLite code. The testfixture program - (and some other test programs too) is build and run when you type + (and some other test programs too) is built and run when you type "make test". - * **ext/misc/json1.c** - This file implements the various JSON functions - that are build into SQLite. + * **VERSION**, **manifest**, and **manifest.uuid** - These files define + the current SQLite version number. The "VERSION" file is human generated, + but the "manifest" and "manifest.uuid" files are automatically generated + by the [Fossil version control system](https://fossil-scm.org/). There are many other source files. Each has a succinct header comment that describes its purpose and role within the larger system. @@ -469,21 +542,38 @@ describes its purpose and role within the larger system. ## Verifying Code Authenticity The `manifest` file at the root directory of the source tree -contains either a SHA3-256 hash (for newer files) or a SHA1 hash (for -older files) for every source file in the repository. -The SHA3-256 hash of the `manifest` -file itself is the official name of the version of the source tree that you -have. The `manifest.uuid` file should contain the SHA3-256 hash of the +contains either a SHA3-256 hash or a SHA1 hash +for every source file in the repository. +The name of the version of the entire source tree is just the +SHA3-256 hash of the `manifest` file itself, possibly with the +last line of that file omitted if the last line begins with +"`# Remove this line`". +The `manifest.uuid` file should contain the SHA3-256 hash of the `manifest` file. If all of the above hash comparisons are correct, then you can be confident that your source tree is authentic and unadulterated. +Details on the format for the `manifest` files are available +[on the Fossil website](https://fossil-scm.org/home/doc/trunk/www/fileformat.wiki#manifest). + +The process of checking source code authenticity is automated by the +makefile: + +> make verify-source -The format of the `manifest` file should be mostly self-explanatory, but -if you want details, they are available -[here](https://fossil-scm.org/fossil/doc/trunk/www/fileformat.wiki#manifest). +Or on windows: + +> nmake /f Makefile.msc verify-source + +Using the makefile to verify source integrity is good for detecting +accidental changes to the source tree, but malicious changes could be +hidden by also modifying the makefiles. ## Contacts -The main SQLite website is [http://www.sqlite.org/](http://www.sqlite.org/) +The main SQLite website is [https://sqlite.org/](https://sqlite.org/) with geographically distributed backups at -[http://www2.sqlite.org/](http://www2.sqlite.org) and -[http://www3.sqlite.org/](http://www3.sqlite.org). +[https://www2.sqlite.org/](https://www2.sqlite.org) and +[https://www3.sqlite.org/](https://www3.sqlite.org). + +Contact the SQLite developers through the +[SQLite Forum](https://sqlite.org/forum/). In an emergency, you +can send private email to the lead developer at drh at sqlite dot org. diff --git a/SQLCipher.podspec.json b/SQLCipher.podspec.json deleted file mode 100644 index 91ce7bc068..0000000000 --- a/SQLCipher.podspec.json +++ /dev/null @@ -1,91 +0,0 @@ -{ - "authors": "Zetetic LLC", - "default_subspecs": "standard", - "description": "SQLCipher is an open source extension to SQLite that provides transparent 256-bit AES encryption of database files.", - "homepage": "https://www.zetetic.net/sqlcipher/", - "license": "BSD", - "name": "SQLCipher", - "platforms": { - "ios": "8.0", - "osx": "10.9", - "tvos": "9.0", - "watchos": "2.0" - }, - "prepare_command": "./configure --enable-tempstore=yes --with-crypto-lib=commoncrypto CFLAGS=\"-DSQLITE_HAS_CODEC -DSQLITE_TEMP_STORE=2 -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_STAT3 -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_LOAD_EXTENSION -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_ENABLE_FTS3_PARENTHESIS -DSQLITE_ENABLE_FTS4 -DSQLITE_ENABLE_FTS4_UNICODE61 -DSQLITE_ENABLE_JSON1 -DSQLITE_ENABLE_FTS5 -DHAVE_USLEEP=1 -DSQLITE_MAX_VARIABLE_NUMBER=99999\"; make sqlite3.c", - "requires_arc": false, - "source": { - "git": "https://github.com/sqlcipher/sqlcipher.git", - "tag": "v4.5.1" - }, - "summary": "Full Database Encryption for SQLite.", - "version": "4.5.1", - "subspecs": [ - { - "compiler_flags": [ - "-DNDEBUG", - "-DSQLITE_HAS_CODEC", - "-DSQLITE_TEMP_STORE=2", - "-DSQLITE_SOUNDEX", - "-DSQLITE_THREADSAFE", - "-DSQLITE_ENABLE_RTREE", - "-DSQLITE_ENABLE_STAT3", - "-DSQLITE_ENABLE_STAT4", - "-DSQLITE_ENABLE_COLUMN_METADATA", - "-DSQLITE_ENABLE_MEMORY_MANAGEMENT", - "-DSQLITE_ENABLE_LOAD_EXTENSION", - "-DSQLITE_ENABLE_FTS4", - "-DSQLITE_ENABLE_FTS4_UNICODE61", - "-DSQLITE_ENABLE_FTS3_PARENTHESIS", - "-DSQLITE_ENABLE_UNLOCK_NOTIFY", - "-DSQLITE_ENABLE_JSON1", - "-DSQLITE_ENABLE_FTS5", - "-DSQLCIPHER_CRYPTO_CC", - "-DHAVE_USLEEP=1", - "-DSQLITE_MAX_VARIABLE_NUMBER=99999" - ], - "frameworks": [ - "Foundation", - "Security" - ], - "name": "common", - "source_files": "sqlite3.{h,c}", - "xcconfig": { - "HEADER_SEARCH_PATHS": "$(PODS_ROOT)/SQLCipher", - "GCC_PREPROCESSOR_DEFINITIONS": "$(inherited) SQLITE_HAS_CODEC=1", - "OTHER_CFLAGS": "$(inherited) -DSQLITE_HAS_CODEC -DSQLITE_TEMP_STORE=2 -DSQLITE_SOUNDEX -DSQLITE_THREADSAFE -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_STAT3 -DSQLITE_ENABLE_STAT4 -DSQLITE_ENABLE_COLUMN_METADATA -DSQLITE_ENABLE_MEMORY_MANAGEMENT -DSQLITE_ENABLE_LOAD_EXTENSION -DSQLITE_ENABLE_FTS4 -DSQLITE_ENABLE_FTS4_UNICODE61 -DSQLITE_ENABLE_FTS3_PARENTHESIS -DSQLITE_ENABLE_UNLOCK_NOTIFY -DSQLITE_ENABLE_JSON1 -DSQLITE_ENABLE_FTS5 -DSQLCIPHER_CRYPTO_CC -DHAVE_USLEEP=1 -DSQLITE_MAX_VARIABLE_NUMBER=99999" - } - }, - { - "dependencies": { - "SQLCipher/common": [ - - ] - }, - "name": "standard" - }, - { - "compiler_flags": "", - "dependencies": { - "SQLCipher/common": [ - - ] - }, - "name": "fts", - "xcconfig": { - "OTHER_CFLAGS": "$(inherited)" - } - }, - { - "compiler_flags": "", - "dependencies": { - "SQLCipher/common": [ - - ] - }, - "name": "unlock_notify", - "xcconfig": { - "OTHER_CFLAGS": "$(inherited)" - } - } - ] -} diff --git a/SQLITE_LICENSE.md b/SQLITE_LICENSE.md new file mode 100644 index 0000000000..4029dc9e7f --- /dev/null +++ b/SQLITE_LICENSE.md @@ -0,0 +1,79 @@ +The SQLite source code, including all of the files in the directories +listed in the bullets below are +[Public Domain](https://sqlite.org/copyright.html). +The authors have submitted written affidavits releasing their work to +the public for any use. Every byte of the public-domain code can be +traced back to the original authors. The files of this repository +that are public domain include the following: + + * All of the primary SQLite source code files found in the + [src/ directory](https://sqlite.org/src/tree/src?type=tree&expand) + * All of the test cases and testing code in the + [test/ directory](https://sqlite.org/src/tree/test?type=tree&expand) + * All of the SQLite extension source code and test cases in the + [ext/ directory](https://sqlite.org/src/tree/ext?type=tree&expand) + * All code that ends up in the "sqlite3.c" and "sqlite3.h" build products + that actually implement the SQLite RDBMS. + * All of the code used to compile the + [command-line interface](https://sqlite.org/cli.html) + * All of the code used to build various utility programs such as + "sqldiff", "sqlite3_rsync", and "sqlite3_analyzer". + + +The public domain source files usually contain a header comment +similar to the following to make it clear that the software is +public domain. + +> The author disclaims copyright to this source code. In place of +> a legal notice, here is a blessing: +> +> * May you do good and not evil. +> * May you find forgiveness for yourself and forgive others. +> * May you share freely, never taking more than you give. + +Almost every file you find in this source repository will be +public domain. But there are a small number of exceptions: + +Non-Public-Domain Code Included With This Source Repository AS A Convenience +---------------------------------------------------------------------------- + +This repository contains a (relatively) small amount of non-public-domain +code used to help implement the configuration and build logic. In other +words, there are some non-public-domain files used to implement: + +> ./configure && make + +In all cases, the non-public-domain files included with this +repository have generous BSD-style licenses. So anyone is free to +use any of the code in this source repository for any purpose, though +attribution may be required to reuse or republish the configure and +build scripts. None of the non-public-domain code ever actually reaches +the build products, such as "sqlite3.c", however, so no attribution is +required to use SQLite itself. The non-public-domain code consists of +scripts used to help compile SQLite. The non-public-domain code is +technically not part of SQLite. The non-public-domain code is +included in this repository as a convenience to developers, so that those +who want to build SQLite do not need to go download a bunch of +third-party build scripts in order to compile SQLite. + +Non-public-domain code included in this respository includes: + + * The ["autosetup"](http://msteveb.github.io/autosetup/) configuration + system that is contained (mostly) in the autosetup/ directory, but also + includes the "./configure" script at the top-level of this archive. + Autosetup has a separate BSD-style license. See the + [autosetup/LICENSE](http://msteveb.github.io/autosetup/license/) + for details. + + * There are BSD-style licenses on some of the configuration + software found in the legacy autoconf/ directory and its + subdirectories. + +The following unix shell command can be run from the top-level +of this source repository in order to remove all non-public-domain +code: + +> rm -rf configure autosetup autoconf + +If you unpack this source repository and then run the command above, what +is left will be 100% public domain. diff --git a/VERSION b/VERSION index 8587f05207..d1278a4677 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -3.37.2 +3.51.1 diff --git a/aclocal.m4 b/aclocal.m4 deleted file mode 100644 index 8ce4d37a7a..0000000000 --- a/aclocal.m4 +++ /dev/null @@ -1,9068 +0,0 @@ -# generated automatically by aclocal 1.16.1 -*- Autoconf -*- - -# Copyright (C) 1996-2018 Free Software Foundation, Inc. - -# This file is free software; the Free Software Foundation -# gives unlimited permission to copy and/or distribute it, -# with or without modifications, as long as this notice is preserved. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY, to the extent permitted by law; without -# even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. - -m4_ifndef([AC_CONFIG_MACRO_DIRS], [m4_defun([_AM_CONFIG_MACRO_DIRS], [])m4_defun([AC_CONFIG_MACRO_DIRS], [_AM_CONFIG_MACRO_DIRS($@)])]) -# libtool.m4 - Configure libtool for the host system. -*-Autoconf-*- -# -# Copyright (C) 1996-2001, 2003-2015 Free Software Foundation, Inc. -# Written by Gordon Matzigkeit, 1996 -# -# This file is free software; the Free Software Foundation gives -# unlimited permission to copy and/or distribute it, with or without -# modifications, as long as this notice is preserved. - -m4_define([_LT_COPYING], [dnl -# Copyright (C) 2014 Free Software Foundation, Inc. -# This is free software; see the source for copying conditions. There is NO -# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - -# GNU Libtool is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of of the License, or -# (at your option) any later version. -# -# As a special exception to the GNU General Public License, if you -# distribute this file as part of a program or library that is built -# using GNU Libtool, you may include this file under the same -# distribution terms that you use for the rest of that program. -# -# GNU Libtool is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -]) - -# serial 58 LT_INIT - - -# LT_PREREQ(VERSION) -# ------------------ -# Complain and exit if this libtool version is less that VERSION. -m4_defun([LT_PREREQ], -[m4_if(m4_version_compare(m4_defn([LT_PACKAGE_VERSION]), [$1]), -1, - [m4_default([$3], - [m4_fatal([Libtool version $1 or higher is required], - 63)])], - [$2])]) - - -# _LT_CHECK_BUILDDIR -# ------------------ -# Complain if the absolute build directory name contains unusual characters -m4_defun([_LT_CHECK_BUILDDIR], -[case `pwd` in - *\ * | *\ *) - AC_MSG_WARN([Libtool does not cope well with whitespace in `pwd`]) ;; -esac -]) - - -# LT_INIT([OPTIONS]) -# ------------------ -AC_DEFUN([LT_INIT], -[AC_PREREQ([2.62])dnl We use AC_PATH_PROGS_FEATURE_CHECK -AC_REQUIRE([AC_CONFIG_AUX_DIR_DEFAULT])dnl -AC_BEFORE([$0], [LT_LANG])dnl -AC_BEFORE([$0], [LT_OUTPUT])dnl -AC_BEFORE([$0], [LTDL_INIT])dnl -m4_require([_LT_CHECK_BUILDDIR])dnl - -dnl Autoconf doesn't catch unexpanded LT_ macros by default: -m4_pattern_forbid([^_?LT_[A-Z_]+$])dnl -m4_pattern_allow([^(_LT_EOF|LT_DLGLOBAL|LT_DLLAZY_OR_NOW|LT_MULTI_MODULE)$])dnl -dnl aclocal doesn't pull ltoptions.m4, ltsugar.m4, or ltversion.m4 -dnl unless we require an AC_DEFUNed macro: -AC_REQUIRE([LTOPTIONS_VERSION])dnl -AC_REQUIRE([LTSUGAR_VERSION])dnl -AC_REQUIRE([LTVERSION_VERSION])dnl -AC_REQUIRE([LTOBSOLETE_VERSION])dnl -m4_require([_LT_PROG_LTMAIN])dnl - -_LT_SHELL_INIT([SHELL=${CONFIG_SHELL-/bin/sh}]) - -dnl Parse OPTIONS -_LT_SET_OPTIONS([$0], [$1]) - -# This can be used to rebuild libtool when needed -LIBTOOL_DEPS=$ltmain - -# Always use our own libtool. -LIBTOOL='$(SHELL) $(top_builddir)/libtool' -AC_SUBST(LIBTOOL)dnl - -_LT_SETUP - -# Only expand once: -m4_define([LT_INIT]) -])# LT_INIT - -# Old names: -AU_ALIAS([AC_PROG_LIBTOOL], [LT_INIT]) -AU_ALIAS([AM_PROG_LIBTOOL], [LT_INIT]) -dnl aclocal-1.4 backwards compatibility: -dnl AC_DEFUN([AC_PROG_LIBTOOL], []) -dnl AC_DEFUN([AM_PROG_LIBTOOL], []) - - -# _LT_PREPARE_CC_BASENAME -# ----------------------- -m4_defun([_LT_PREPARE_CC_BASENAME], [ -# Calculate cc_basename. Skip known compiler wrappers and cross-prefix. -func_cc_basename () -{ - for cc_temp in @S|@*""; do - case $cc_temp in - compile | *[[\\/]]compile | ccache | *[[\\/]]ccache ) ;; - distcc | *[[\\/]]distcc | purify | *[[\\/]]purify ) ;; - \-*) ;; - *) break;; - esac - done - func_cc_basename_result=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"` -} -])# _LT_PREPARE_CC_BASENAME - - -# _LT_CC_BASENAME(CC) -# ------------------- -# It would be clearer to call AC_REQUIREs from _LT_PREPARE_CC_BASENAME, -# but that macro is also expanded into generated libtool script, which -# arranges for $SED and $ECHO to be set by different means. -m4_defun([_LT_CC_BASENAME], -[m4_require([_LT_PREPARE_CC_BASENAME])dnl -AC_REQUIRE([_LT_DECL_SED])dnl -AC_REQUIRE([_LT_PROG_ECHO_BACKSLASH])dnl -func_cc_basename $1 -cc_basename=$func_cc_basename_result -]) - - -# _LT_FILEUTILS_DEFAULTS -# ---------------------- -# It is okay to use these file commands and assume they have been set -# sensibly after 'm4_require([_LT_FILEUTILS_DEFAULTS])'. -m4_defun([_LT_FILEUTILS_DEFAULTS], -[: ${CP="cp -f"} -: ${MV="mv -f"} -: ${RM="rm -f"} -])# _LT_FILEUTILS_DEFAULTS - - -# _LT_SETUP -# --------- -m4_defun([_LT_SETUP], -[AC_REQUIRE([AC_CANONICAL_HOST])dnl -AC_REQUIRE([AC_CANONICAL_BUILD])dnl -AC_REQUIRE([_LT_PREPARE_SED_QUOTE_VARS])dnl -AC_REQUIRE([_LT_PROG_ECHO_BACKSLASH])dnl - -_LT_DECL([], [PATH_SEPARATOR], [1], [The PATH separator for the build system])dnl -dnl -_LT_DECL([], [host_alias], [0], [The host system])dnl -_LT_DECL([], [host], [0])dnl -_LT_DECL([], [host_os], [0])dnl -dnl -_LT_DECL([], [build_alias], [0], [The build system])dnl -_LT_DECL([], [build], [0])dnl -_LT_DECL([], [build_os], [0])dnl -dnl -AC_REQUIRE([AC_PROG_CC])dnl -AC_REQUIRE([LT_PATH_LD])dnl -AC_REQUIRE([LT_PATH_NM])dnl -dnl -AC_REQUIRE([AC_PROG_LN_S])dnl -test -z "$LN_S" && LN_S="ln -s" -_LT_DECL([], [LN_S], [1], [Whether we need soft or hard links])dnl -dnl -AC_REQUIRE([LT_CMD_MAX_LEN])dnl -_LT_DECL([objext], [ac_objext], [0], [Object file suffix (normally "o")])dnl -_LT_DECL([], [exeext], [0], [Executable file suffix (normally "")])dnl -dnl -m4_require([_LT_FILEUTILS_DEFAULTS])dnl -m4_require([_LT_CHECK_SHELL_FEATURES])dnl -m4_require([_LT_PATH_CONVERSION_FUNCTIONS])dnl -m4_require([_LT_CMD_RELOAD])dnl -m4_require([_LT_CHECK_MAGIC_METHOD])dnl -m4_require([_LT_CHECK_SHAREDLIB_FROM_LINKLIB])dnl -m4_require([_LT_CMD_OLD_ARCHIVE])dnl -m4_require([_LT_CMD_GLOBAL_SYMBOLS])dnl -m4_require([_LT_WITH_SYSROOT])dnl -m4_require([_LT_CMD_TRUNCATE])dnl - -_LT_CONFIG_LIBTOOL_INIT([ -# See if we are running on zsh, and set the options that allow our -# commands through without removal of \ escapes INIT. -if test -n "\${ZSH_VERSION+set}"; then - setopt NO_GLOB_SUBST -fi -]) -if test -n "${ZSH_VERSION+set}"; then - setopt NO_GLOB_SUBST -fi - -_LT_CHECK_OBJDIR - -m4_require([_LT_TAG_COMPILER])dnl - -case $host_os in -aix3*) - # AIX sometimes has problems with the GCC collect2 program. For some - # reason, if we set the COLLECT_NAMES environment variable, the problems - # vanish in a puff of smoke. - if test set != "${COLLECT_NAMES+set}"; then - COLLECT_NAMES= - export COLLECT_NAMES - fi - ;; -esac - -# Global variables: -ofile=libtool -can_build_shared=yes - -# All known linkers require a '.a' archive for static linking (except MSVC, -# which needs '.lib'). -libext=a - -with_gnu_ld=$lt_cv_prog_gnu_ld - -old_CC=$CC -old_CFLAGS=$CFLAGS - -# Set sane defaults for various variables -test -z "$CC" && CC=cc -test -z "$LTCC" && LTCC=$CC -test -z "$LTCFLAGS" && LTCFLAGS=$CFLAGS -test -z "$LD" && LD=ld -test -z "$ac_objext" && ac_objext=o - -_LT_CC_BASENAME([$compiler]) - -# Only perform the check for file, if the check method requires it -test -z "$MAGIC_CMD" && MAGIC_CMD=file -case $deplibs_check_method in -file_magic*) - if test "$file_magic_cmd" = '$MAGIC_CMD'; then - _LT_PATH_MAGIC - fi - ;; -esac - -# Use C for the default configuration in the libtool script -LT_SUPPORTED_TAG([CC]) -_LT_LANG_C_CONFIG -_LT_LANG_DEFAULT_CONFIG -_LT_CONFIG_COMMANDS -])# _LT_SETUP - - -# _LT_PREPARE_SED_QUOTE_VARS -# -------------------------- -# Define a few sed substitution that help us do robust quoting. -m4_defun([_LT_PREPARE_SED_QUOTE_VARS], -[# Backslashify metacharacters that are still active within -# double-quoted strings. -sed_quote_subst='s/\([["`$\\]]\)/\\\1/g' - -# Same as above, but do not quote variable references. -double_quote_subst='s/\([["`\\]]\)/\\\1/g' - -# Sed substitution to delay expansion of an escaped shell variable in a -# double_quote_subst'ed string. -delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g' - -# Sed substitution to delay expansion of an escaped single quote. -delay_single_quote_subst='s/'\''/'\'\\\\\\\'\''/g' - -# Sed substitution to avoid accidental globbing in evaled expressions -no_glob_subst='s/\*/\\\*/g' -]) - -# _LT_PROG_LTMAIN -# --------------- -# Note that this code is called both from 'configure', and 'config.status' -# now that we use AC_CONFIG_COMMANDS to generate libtool. Notably, -# 'config.status' has no value for ac_aux_dir unless we are using Automake, -# so we pass a copy along to make sure it has a sensible value anyway. -m4_defun([_LT_PROG_LTMAIN], -[m4_ifdef([AC_REQUIRE_AUX_FILE], [AC_REQUIRE_AUX_FILE([ltmain.sh])])dnl -_LT_CONFIG_LIBTOOL_INIT([ac_aux_dir='$ac_aux_dir']) -ltmain=$ac_aux_dir/ltmain.sh -])# _LT_PROG_LTMAIN - - - -# So that we can recreate a full libtool script including additional -# tags, we accumulate the chunks of code to send to AC_CONFIG_COMMANDS -# in macros and then make a single call at the end using the 'libtool' -# label. - - -# _LT_CONFIG_LIBTOOL_INIT([INIT-COMMANDS]) -# ---------------------------------------- -# Register INIT-COMMANDS to be passed to AC_CONFIG_COMMANDS later. -m4_define([_LT_CONFIG_LIBTOOL_INIT], -[m4_ifval([$1], - [m4_append([_LT_OUTPUT_LIBTOOL_INIT], - [$1 -])])]) - -# Initialize. -m4_define([_LT_OUTPUT_LIBTOOL_INIT]) - - -# _LT_CONFIG_LIBTOOL([COMMANDS]) -# ------------------------------ -# Register COMMANDS to be passed to AC_CONFIG_COMMANDS later. -m4_define([_LT_CONFIG_LIBTOOL], -[m4_ifval([$1], - [m4_append([_LT_OUTPUT_LIBTOOL_COMMANDS], - [$1 -])])]) - -# Initialize. -m4_define([_LT_OUTPUT_LIBTOOL_COMMANDS]) - - -# _LT_CONFIG_SAVE_COMMANDS([COMMANDS], [INIT_COMMANDS]) -# ----------------------------------------------------- -m4_defun([_LT_CONFIG_SAVE_COMMANDS], -[_LT_CONFIG_LIBTOOL([$1]) -_LT_CONFIG_LIBTOOL_INIT([$2]) -]) - - -# _LT_FORMAT_COMMENT([COMMENT]) -# ----------------------------- -# Add leading comment marks to the start of each line, and a trailing -# full-stop to the whole comment if one is not present already. -m4_define([_LT_FORMAT_COMMENT], -[m4_ifval([$1], [ -m4_bpatsubst([m4_bpatsubst([$1], [^ *], [# ])], - [['`$\]], [\\\&])]m4_bmatch([$1], [[!?.]$], [], [.]) -)]) - - - - - -# _LT_DECL([CONFIGNAME], VARNAME, VALUE, [DESCRIPTION], [IS-TAGGED?]) -# ------------------------------------------------------------------- -# CONFIGNAME is the name given to the value in the libtool script. -# VARNAME is the (base) name used in the configure script. -# VALUE may be 0, 1 or 2 for a computed quote escaped value based on -# VARNAME. Any other value will be used directly. -m4_define([_LT_DECL], -[lt_if_append_uniq([lt_decl_varnames], [$2], [, ], - [lt_dict_add_subkey([lt_decl_dict], [$2], [libtool_name], - [m4_ifval([$1], [$1], [$2])]) - lt_dict_add_subkey([lt_decl_dict], [$2], [value], [$3]) - m4_ifval([$4], - [lt_dict_add_subkey([lt_decl_dict], [$2], [description], [$4])]) - lt_dict_add_subkey([lt_decl_dict], [$2], - [tagged?], [m4_ifval([$5], [yes], [no])])]) -]) - - -# _LT_TAGDECL([CONFIGNAME], VARNAME, VALUE, [DESCRIPTION]) -# -------------------------------------------------------- -m4_define([_LT_TAGDECL], [_LT_DECL([$1], [$2], [$3], [$4], [yes])]) - - -# lt_decl_tag_varnames([SEPARATOR], [VARNAME1...]) -# ------------------------------------------------ -m4_define([lt_decl_tag_varnames], -[_lt_decl_filter([tagged?], [yes], $@)]) - - -# _lt_decl_filter(SUBKEY, VALUE, [SEPARATOR], [VARNAME1..]) -# --------------------------------------------------------- -m4_define([_lt_decl_filter], -[m4_case([$#], - [0], [m4_fatal([$0: too few arguments: $#])], - [1], [m4_fatal([$0: too few arguments: $#: $1])], - [2], [lt_dict_filter([lt_decl_dict], [$1], [$2], [], lt_decl_varnames)], - [3], [lt_dict_filter([lt_decl_dict], [$1], [$2], [$3], lt_decl_varnames)], - [lt_dict_filter([lt_decl_dict], $@)])[]dnl -]) - - -# lt_decl_quote_varnames([SEPARATOR], [VARNAME1...]) -# -------------------------------------------------- -m4_define([lt_decl_quote_varnames], -[_lt_decl_filter([value], [1], $@)]) - - -# lt_decl_dquote_varnames([SEPARATOR], [VARNAME1...]) -# --------------------------------------------------- -m4_define([lt_decl_dquote_varnames], -[_lt_decl_filter([value], [2], $@)]) - - -# lt_decl_varnames_tagged([SEPARATOR], [VARNAME1...]) -# --------------------------------------------------- -m4_define([lt_decl_varnames_tagged], -[m4_assert([$# <= 2])dnl -_$0(m4_quote(m4_default([$1], [[, ]])), - m4_ifval([$2], [[$2]], [m4_dquote(lt_decl_tag_varnames)]), - m4_split(m4_normalize(m4_quote(_LT_TAGS)), [ ]))]) -m4_define([_lt_decl_varnames_tagged], -[m4_ifval([$3], [lt_combine([$1], [$2], [_], $3)])]) - - -# lt_decl_all_varnames([SEPARATOR], [VARNAME1...]) -# ------------------------------------------------ -m4_define([lt_decl_all_varnames], -[_$0(m4_quote(m4_default([$1], [[, ]])), - m4_if([$2], [], - m4_quote(lt_decl_varnames), - m4_quote(m4_shift($@))))[]dnl -]) -m4_define([_lt_decl_all_varnames], -[lt_join($@, lt_decl_varnames_tagged([$1], - lt_decl_tag_varnames([[, ]], m4_shift($@))))dnl -]) - - -# _LT_CONFIG_STATUS_DECLARE([VARNAME]) -# ------------------------------------ -# Quote a variable value, and forward it to 'config.status' so that its -# declaration there will have the same value as in 'configure'. VARNAME -# must have a single quote delimited value for this to work. -m4_define([_LT_CONFIG_STATUS_DECLARE], -[$1='`$ECHO "$][$1" | $SED "$delay_single_quote_subst"`']) - - -# _LT_CONFIG_STATUS_DECLARATIONS -# ------------------------------ -# We delimit libtool config variables with single quotes, so when -# we write them to config.status, we have to be sure to quote all -# embedded single quotes properly. In configure, this macro expands -# each variable declared with _LT_DECL (and _LT_TAGDECL) into: -# -# ='`$ECHO "$" | $SED "$delay_single_quote_subst"`' -m4_defun([_LT_CONFIG_STATUS_DECLARATIONS], -[m4_foreach([_lt_var], m4_quote(lt_decl_all_varnames), - [m4_n([_LT_CONFIG_STATUS_DECLARE(_lt_var)])])]) - - -# _LT_LIBTOOL_TAGS -# ---------------- -# Output comment and list of tags supported by the script -m4_defun([_LT_LIBTOOL_TAGS], -[_LT_FORMAT_COMMENT([The names of the tagged configurations supported by this script])dnl -available_tags='_LT_TAGS'dnl -]) - - -# _LT_LIBTOOL_DECLARE(VARNAME, [TAG]) -# ----------------------------------- -# Extract the dictionary values for VARNAME (optionally with TAG) and -# expand to a commented shell variable setting: -# -# # Some comment about what VAR is for. -# visible_name=$lt_internal_name -m4_define([_LT_LIBTOOL_DECLARE], -[_LT_FORMAT_COMMENT(m4_quote(lt_dict_fetch([lt_decl_dict], [$1], - [description])))[]dnl -m4_pushdef([_libtool_name], - m4_quote(lt_dict_fetch([lt_decl_dict], [$1], [libtool_name])))[]dnl -m4_case(m4_quote(lt_dict_fetch([lt_decl_dict], [$1], [value])), - [0], [_libtool_name=[$]$1], - [1], [_libtool_name=$lt_[]$1], - [2], [_libtool_name=$lt_[]$1], - [_libtool_name=lt_dict_fetch([lt_decl_dict], [$1], [value])])[]dnl -m4_ifval([$2], [_$2])[]m4_popdef([_libtool_name])[]dnl -]) - - -# _LT_LIBTOOL_CONFIG_VARS -# ----------------------- -# Produce commented declarations of non-tagged libtool config variables -# suitable for insertion in the LIBTOOL CONFIG section of the 'libtool' -# script. Tagged libtool config variables (even for the LIBTOOL CONFIG -# section) are produced by _LT_LIBTOOL_TAG_VARS. -m4_defun([_LT_LIBTOOL_CONFIG_VARS], -[m4_foreach([_lt_var], - m4_quote(_lt_decl_filter([tagged?], [no], [], lt_decl_varnames)), - [m4_n([_LT_LIBTOOL_DECLARE(_lt_var)])])]) - - -# _LT_LIBTOOL_TAG_VARS(TAG) -# ------------------------- -m4_define([_LT_LIBTOOL_TAG_VARS], -[m4_foreach([_lt_var], m4_quote(lt_decl_tag_varnames), - [m4_n([_LT_LIBTOOL_DECLARE(_lt_var, [$1])])])]) - - -# _LT_TAGVAR(VARNAME, [TAGNAME]) -# ------------------------------ -m4_define([_LT_TAGVAR], [m4_ifval([$2], [$1_$2], [$1])]) - - -# _LT_CONFIG_COMMANDS -# ------------------- -# Send accumulated output to $CONFIG_STATUS. Thanks to the lists of -# variables for single and double quote escaping we saved from calls -# to _LT_DECL, we can put quote escaped variables declarations -# into 'config.status', and then the shell code to quote escape them in -# for loops in 'config.status'. Finally, any additional code accumulated -# from calls to _LT_CONFIG_LIBTOOL_INIT is expanded. -m4_defun([_LT_CONFIG_COMMANDS], -[AC_PROVIDE_IFELSE([LT_OUTPUT], - dnl If the libtool generation code has been placed in $CONFIG_LT, - dnl instead of duplicating it all over again into config.status, - dnl then we will have config.status run $CONFIG_LT later, so it - dnl needs to know what name is stored there: - [AC_CONFIG_COMMANDS([libtool], - [$SHELL $CONFIG_LT || AS_EXIT(1)], [CONFIG_LT='$CONFIG_LT'])], - dnl If the libtool generation code is destined for config.status, - dnl expand the accumulated commands and init code now: - [AC_CONFIG_COMMANDS([libtool], - [_LT_OUTPUT_LIBTOOL_COMMANDS], [_LT_OUTPUT_LIBTOOL_COMMANDS_INIT])]) -])#_LT_CONFIG_COMMANDS - - -# Initialize. -m4_define([_LT_OUTPUT_LIBTOOL_COMMANDS_INIT], -[ - -# The HP-UX ksh and POSIX shell print the target directory to stdout -# if CDPATH is set. -(unset CDPATH) >/dev/null 2>&1 && unset CDPATH - -sed_quote_subst='$sed_quote_subst' -double_quote_subst='$double_quote_subst' -delay_variable_subst='$delay_variable_subst' -_LT_CONFIG_STATUS_DECLARATIONS -LTCC='$LTCC' -LTCFLAGS='$LTCFLAGS' -compiler='$compiler_DEFAULT' - -# A function that is used when there is no print builtin or printf. -func_fallback_echo () -{ - eval 'cat <<_LTECHO_EOF -\$[]1 -_LTECHO_EOF' -} - -# Quote evaled strings. -for var in lt_decl_all_varnames([[ \ -]], lt_decl_quote_varnames); do - case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in - *[[\\\\\\\`\\"\\\$]]*) - eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED \\"\\\$sed_quote_subst\\"\\\`\\\\\\"" ## exclude from sc_prohibit_nested_quotes - ;; - *) - eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" - ;; - esac -done - -# Double-quote double-evaled strings. -for var in lt_decl_all_varnames([[ \ -]], lt_decl_dquote_varnames); do - case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in - *[[\\\\\\\`\\"\\\$]]*) - eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED -e \\"\\\$double_quote_subst\\" -e \\"\\\$sed_quote_subst\\" -e \\"\\\$delay_variable_subst\\"\\\`\\\\\\"" ## exclude from sc_prohibit_nested_quotes - ;; - *) - eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" - ;; - esac -done - -_LT_OUTPUT_LIBTOOL_INIT -]) - -# _LT_GENERATED_FILE_INIT(FILE, [COMMENT]) -# ------------------------------------ -# Generate a child script FILE with all initialization necessary to -# reuse the environment learned by the parent script, and make the -# file executable. If COMMENT is supplied, it is inserted after the -# '#!' sequence but before initialization text begins. After this -# macro, additional text can be appended to FILE to form the body of -# the child script. The macro ends with non-zero status if the -# file could not be fully written (such as if the disk is full). -m4_ifdef([AS_INIT_GENERATED], -[m4_defun([_LT_GENERATED_FILE_INIT],[AS_INIT_GENERATED($@)])], -[m4_defun([_LT_GENERATED_FILE_INIT], -[m4_require([AS_PREPARE])]dnl -[m4_pushdef([AS_MESSAGE_LOG_FD])]dnl -[lt_write_fail=0 -cat >$1 <<_ASEOF || lt_write_fail=1 -#! $SHELL -# Generated by $as_me. -$2 -SHELL=\${CONFIG_SHELL-$SHELL} -export SHELL -_ASEOF -cat >>$1 <<\_ASEOF || lt_write_fail=1 -AS_SHELL_SANITIZE -_AS_PREPARE -exec AS_MESSAGE_FD>&1 -_ASEOF -test 0 = "$lt_write_fail" && chmod +x $1[]dnl -m4_popdef([AS_MESSAGE_LOG_FD])])])# _LT_GENERATED_FILE_INIT - -# LT_OUTPUT -# --------- -# This macro allows early generation of the libtool script (before -# AC_OUTPUT is called), incase it is used in configure for compilation -# tests. -AC_DEFUN([LT_OUTPUT], -[: ${CONFIG_LT=./config.lt} -AC_MSG_NOTICE([creating $CONFIG_LT]) -_LT_GENERATED_FILE_INIT(["$CONFIG_LT"], -[# Run this file to recreate a libtool stub with the current configuration.]) - -cat >>"$CONFIG_LT" <<\_LTEOF -lt_cl_silent=false -exec AS_MESSAGE_LOG_FD>>config.log -{ - echo - AS_BOX([Running $as_me.]) -} >&AS_MESSAGE_LOG_FD - -lt_cl_help="\ -'$as_me' creates a local libtool stub from the current configuration, -for use in further configure time tests before the real libtool is -generated. - -Usage: $[0] [[OPTIONS]] - - -h, --help print this help, then exit - -V, --version print version number, then exit - -q, --quiet do not print progress messages - -d, --debug don't remove temporary files - -Report bugs to ." - -lt_cl_version="\ -m4_ifset([AC_PACKAGE_NAME], [AC_PACKAGE_NAME ])config.lt[]dnl -m4_ifset([AC_PACKAGE_VERSION], [ AC_PACKAGE_VERSION]) -configured by $[0], generated by m4_PACKAGE_STRING. - -Copyright (C) 2011 Free Software Foundation, Inc. -This config.lt script is free software; the Free Software Foundation -gives unlimited permision to copy, distribute and modify it." - -while test 0 != $[#] -do - case $[1] in - --version | --v* | -V ) - echo "$lt_cl_version"; exit 0 ;; - --help | --h* | -h ) - echo "$lt_cl_help"; exit 0 ;; - --debug | --d* | -d ) - debug=: ;; - --quiet | --q* | --silent | --s* | -q ) - lt_cl_silent=: ;; - - -*) AC_MSG_ERROR([unrecognized option: $[1] -Try '$[0] --help' for more information.]) ;; - - *) AC_MSG_ERROR([unrecognized argument: $[1] -Try '$[0] --help' for more information.]) ;; - esac - shift -done - -if $lt_cl_silent; then - exec AS_MESSAGE_FD>/dev/null -fi -_LTEOF - -cat >>"$CONFIG_LT" <<_LTEOF -_LT_OUTPUT_LIBTOOL_COMMANDS_INIT -_LTEOF - -cat >>"$CONFIG_LT" <<\_LTEOF -AC_MSG_NOTICE([creating $ofile]) -_LT_OUTPUT_LIBTOOL_COMMANDS -AS_EXIT(0) -_LTEOF -chmod +x "$CONFIG_LT" - -# configure is writing to config.log, but config.lt does its own redirection, -# appending to config.log, which fails on DOS, as config.log is still kept -# open by configure. Here we exec the FD to /dev/null, effectively closing -# config.log, so it can be properly (re)opened and appended to by config.lt. -lt_cl_success=: -test yes = "$silent" && - lt_config_lt_args="$lt_config_lt_args --quiet" -exec AS_MESSAGE_LOG_FD>/dev/null -$SHELL "$CONFIG_LT" $lt_config_lt_args || lt_cl_success=false -exec AS_MESSAGE_LOG_FD>>config.log -$lt_cl_success || AS_EXIT(1) -])# LT_OUTPUT - - -# _LT_CONFIG(TAG) -# --------------- -# If TAG is the built-in tag, create an initial libtool script with a -# default configuration from the untagged config vars. Otherwise add code -# to config.status for appending the configuration named by TAG from the -# matching tagged config vars. -m4_defun([_LT_CONFIG], -[m4_require([_LT_FILEUTILS_DEFAULTS])dnl -_LT_CONFIG_SAVE_COMMANDS([ - m4_define([_LT_TAG], m4_if([$1], [], [C], [$1]))dnl - m4_if(_LT_TAG, [C], [ - # See if we are running on zsh, and set the options that allow our - # commands through without removal of \ escapes. - if test -n "${ZSH_VERSION+set}"; then - setopt NO_GLOB_SUBST - fi - - cfgfile=${ofile}T - trap "$RM \"$cfgfile\"; exit 1" 1 2 15 - $RM "$cfgfile" - - cat <<_LT_EOF >> "$cfgfile" -#! $SHELL -# Generated automatically by $as_me ($PACKAGE) $VERSION -# NOTE: Changes made to this file will be lost: look at ltmain.sh. - -# Provide generalized library-building support services. -# Written by Gordon Matzigkeit, 1996 - -_LT_COPYING -_LT_LIBTOOL_TAGS - -# Configured defaults for sys_lib_dlsearch_path munging. -: \${LT_SYS_LIBRARY_PATH="$configure_time_lt_sys_library_path"} - -# ### BEGIN LIBTOOL CONFIG -_LT_LIBTOOL_CONFIG_VARS -_LT_LIBTOOL_TAG_VARS -# ### END LIBTOOL CONFIG - -_LT_EOF - - cat <<'_LT_EOF' >> "$cfgfile" - -# ### BEGIN FUNCTIONS SHARED WITH CONFIGURE - -_LT_PREPARE_MUNGE_PATH_LIST -_LT_PREPARE_CC_BASENAME - -# ### END FUNCTIONS SHARED WITH CONFIGURE - -_LT_EOF - - case $host_os in - aix3*) - cat <<\_LT_EOF >> "$cfgfile" -# AIX sometimes has problems with the GCC collect2 program. For some -# reason, if we set the COLLECT_NAMES environment variable, the problems -# vanish in a puff of smoke. -if test set != "${COLLECT_NAMES+set}"; then - COLLECT_NAMES= - export COLLECT_NAMES -fi -_LT_EOF - ;; - esac - - _LT_PROG_LTMAIN - - # We use sed instead of cat because bash on DJGPP gets confused if - # if finds mixed CR/LF and LF-only lines. Since sed operates in - # text mode, it properly converts lines to CR/LF. This bash problem - # is reportedly fixed, but why not run on old versions too? - sed '$q' "$ltmain" >> "$cfgfile" \ - || (rm -f "$cfgfile"; exit 1) - - mv -f "$cfgfile" "$ofile" || - (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") - chmod +x "$ofile" -], -[cat <<_LT_EOF >> "$ofile" - -dnl Unfortunately we have to use $1 here, since _LT_TAG is not expanded -dnl in a comment (ie after a #). -# ### BEGIN LIBTOOL TAG CONFIG: $1 -_LT_LIBTOOL_TAG_VARS(_LT_TAG) -# ### END LIBTOOL TAG CONFIG: $1 -_LT_EOF -])dnl /m4_if -], -[m4_if([$1], [], [ - PACKAGE='$PACKAGE' - VERSION='$VERSION' - RM='$RM' - ofile='$ofile'], []) -])dnl /_LT_CONFIG_SAVE_COMMANDS -])# _LT_CONFIG - - -# LT_SUPPORTED_TAG(TAG) -# --------------------- -# Trace this macro to discover what tags are supported by the libtool -# --tag option, using: -# autoconf --trace 'LT_SUPPORTED_TAG:$1' -AC_DEFUN([LT_SUPPORTED_TAG], []) - - -# C support is built-in for now -m4_define([_LT_LANG_C_enabled], []) -m4_define([_LT_TAGS], []) - - -# LT_LANG(LANG) -# ------------- -# Enable libtool support for the given language if not already enabled. -AC_DEFUN([LT_LANG], -[AC_BEFORE([$0], [LT_OUTPUT])dnl -m4_case([$1], - [C], [_LT_LANG(C)], - [C++], [_LT_LANG(CXX)], - [Go], [_LT_LANG(GO)], - [Java], [_LT_LANG(GCJ)], - [Fortran 77], [_LT_LANG(F77)], - [Fortran], [_LT_LANG(FC)], - [Windows Resource], [_LT_LANG(RC)], - [m4_ifdef([_LT_LANG_]$1[_CONFIG], - [_LT_LANG($1)], - [m4_fatal([$0: unsupported language: "$1"])])])dnl -])# LT_LANG - - -# _LT_LANG(LANGNAME) -# ------------------ -m4_defun([_LT_LANG], -[m4_ifdef([_LT_LANG_]$1[_enabled], [], - [LT_SUPPORTED_TAG([$1])dnl - m4_append([_LT_TAGS], [$1 ])dnl - m4_define([_LT_LANG_]$1[_enabled], [])dnl - _LT_LANG_$1_CONFIG($1)])dnl -])# _LT_LANG - - -m4_ifndef([AC_PROG_GO], [ -# NOTE: This macro has been submitted for inclusion into # -# GNU Autoconf as AC_PROG_GO. When it is available in # -# a released version of Autoconf we should remove this # -# macro and use it instead. # -m4_defun([AC_PROG_GO], -[AC_LANG_PUSH(Go)dnl -AC_ARG_VAR([GOC], [Go compiler command])dnl -AC_ARG_VAR([GOFLAGS], [Go compiler flags])dnl -_AC_ARG_VAR_LDFLAGS()dnl -AC_CHECK_TOOL(GOC, gccgo) -if test -z "$GOC"; then - if test -n "$ac_tool_prefix"; then - AC_CHECK_PROG(GOC, [${ac_tool_prefix}gccgo], [${ac_tool_prefix}gccgo]) - fi -fi -if test -z "$GOC"; then - AC_CHECK_PROG(GOC, gccgo, gccgo, false) -fi -])#m4_defun -])#m4_ifndef - - -# _LT_LANG_DEFAULT_CONFIG -# ----------------------- -m4_defun([_LT_LANG_DEFAULT_CONFIG], -[AC_PROVIDE_IFELSE([AC_PROG_CXX], - [LT_LANG(CXX)], - [m4_define([AC_PROG_CXX], defn([AC_PROG_CXX])[LT_LANG(CXX)])]) - -AC_PROVIDE_IFELSE([AC_PROG_F77], - [LT_LANG(F77)], - [m4_define([AC_PROG_F77], defn([AC_PROG_F77])[LT_LANG(F77)])]) - -AC_PROVIDE_IFELSE([AC_PROG_FC], - [LT_LANG(FC)], - [m4_define([AC_PROG_FC], defn([AC_PROG_FC])[LT_LANG(FC)])]) - -dnl The call to [A][M_PROG_GCJ] is quoted like that to stop aclocal -dnl pulling things in needlessly. -AC_PROVIDE_IFELSE([AC_PROG_GCJ], - [LT_LANG(GCJ)], - [AC_PROVIDE_IFELSE([A][M_PROG_GCJ], - [LT_LANG(GCJ)], - [AC_PROVIDE_IFELSE([LT_PROG_GCJ], - [LT_LANG(GCJ)], - [m4_ifdef([AC_PROG_GCJ], - [m4_define([AC_PROG_GCJ], defn([AC_PROG_GCJ])[LT_LANG(GCJ)])]) - m4_ifdef([A][M_PROG_GCJ], - [m4_define([A][M_PROG_GCJ], defn([A][M_PROG_GCJ])[LT_LANG(GCJ)])]) - m4_ifdef([LT_PROG_GCJ], - [m4_define([LT_PROG_GCJ], defn([LT_PROG_GCJ])[LT_LANG(GCJ)])])])])]) - -AC_PROVIDE_IFELSE([AC_PROG_GO], - [LT_LANG(GO)], - [m4_define([AC_PROG_GO], defn([AC_PROG_GO])[LT_LANG(GO)])]) - -AC_PROVIDE_IFELSE([LT_PROG_RC], - [LT_LANG(RC)], - [m4_define([LT_PROG_RC], defn([LT_PROG_RC])[LT_LANG(RC)])]) -])# _LT_LANG_DEFAULT_CONFIG - -# Obsolete macros: -AU_DEFUN([AC_LIBTOOL_CXX], [LT_LANG(C++)]) -AU_DEFUN([AC_LIBTOOL_F77], [LT_LANG(Fortran 77)]) -AU_DEFUN([AC_LIBTOOL_FC], [LT_LANG(Fortran)]) -AU_DEFUN([AC_LIBTOOL_GCJ], [LT_LANG(Java)]) -AU_DEFUN([AC_LIBTOOL_RC], [LT_LANG(Windows Resource)]) -dnl aclocal-1.4 backwards compatibility: -dnl AC_DEFUN([AC_LIBTOOL_CXX], []) -dnl AC_DEFUN([AC_LIBTOOL_F77], []) -dnl AC_DEFUN([AC_LIBTOOL_FC], []) -dnl AC_DEFUN([AC_LIBTOOL_GCJ], []) -dnl AC_DEFUN([AC_LIBTOOL_RC], []) - - -# _LT_TAG_COMPILER -# ---------------- -m4_defun([_LT_TAG_COMPILER], -[AC_REQUIRE([AC_PROG_CC])dnl - -_LT_DECL([LTCC], [CC], [1], [A C compiler])dnl -_LT_DECL([LTCFLAGS], [CFLAGS], [1], [LTCC compiler flags])dnl -_LT_TAGDECL([CC], [compiler], [1], [A language specific compiler])dnl -_LT_TAGDECL([with_gcc], [GCC], [0], [Is the compiler the GNU compiler?])dnl - -# If no C compiler was specified, use CC. -LTCC=${LTCC-"$CC"} - -# If no C compiler flags were specified, use CFLAGS. -LTCFLAGS=${LTCFLAGS-"$CFLAGS"} - -# Allow CC to be a program name with arguments. -compiler=$CC -])# _LT_TAG_COMPILER - - -# _LT_COMPILER_BOILERPLATE -# ------------------------ -# Check for compiler boilerplate output or warnings with -# the simple compiler test code. -m4_defun([_LT_COMPILER_BOILERPLATE], -[m4_require([_LT_DECL_SED])dnl -ac_outfile=conftest.$ac_objext -echo "$lt_simple_compile_test_code" >conftest.$ac_ext -eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err -_lt_compiler_boilerplate=`cat conftest.err` -$RM conftest* -])# _LT_COMPILER_BOILERPLATE - - -# _LT_LINKER_BOILERPLATE -# ---------------------- -# Check for linker boilerplate output or warnings with -# the simple link test code. -m4_defun([_LT_LINKER_BOILERPLATE], -[m4_require([_LT_DECL_SED])dnl -ac_outfile=conftest.$ac_objext -echo "$lt_simple_link_test_code" >conftest.$ac_ext -eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err -_lt_linker_boilerplate=`cat conftest.err` -$RM -r conftest* -])# _LT_LINKER_BOILERPLATE - -# _LT_REQUIRED_DARWIN_CHECKS -# ------------------------- -m4_defun_once([_LT_REQUIRED_DARWIN_CHECKS],[ - case $host_os in - rhapsody* | darwin*) - AC_CHECK_TOOL([DSYMUTIL], [dsymutil], [:]) - AC_CHECK_TOOL([NMEDIT], [nmedit], [:]) - AC_CHECK_TOOL([LIPO], [lipo], [:]) - AC_CHECK_TOOL([OTOOL], [otool], [:]) - AC_CHECK_TOOL([OTOOL64], [otool64], [:]) - _LT_DECL([], [DSYMUTIL], [1], - [Tool to manipulate archived DWARF debug symbol files on Mac OS X]) - _LT_DECL([], [NMEDIT], [1], - [Tool to change global to local symbols on Mac OS X]) - _LT_DECL([], [LIPO], [1], - [Tool to manipulate fat objects and archives on Mac OS X]) - _LT_DECL([], [OTOOL], [1], - [ldd/readelf like tool for Mach-O binaries on Mac OS X]) - _LT_DECL([], [OTOOL64], [1], - [ldd/readelf like tool for 64 bit Mach-O binaries on Mac OS X 10.4]) - - AC_CACHE_CHECK([for -single_module linker flag],[lt_cv_apple_cc_single_mod], - [lt_cv_apple_cc_single_mod=no - if test -z "$LT_MULTI_MODULE"; then - # By default we will add the -single_module flag. You can override - # by either setting the environment variable LT_MULTI_MODULE - # non-empty at configure time, or by adding -multi_module to the - # link flags. - rm -rf libconftest.dylib* - echo "int foo(void){return 1;}" > conftest.c - echo "$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ --dynamiclib -Wl,-single_module conftest.c" >&AS_MESSAGE_LOG_FD - $LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ - -dynamiclib -Wl,-single_module conftest.c 2>conftest.err - _lt_result=$? - # If there is a non-empty error log, and "single_module" - # appears in it, assume the flag caused a linker warning - if test -s conftest.err && $GREP single_module conftest.err; then - cat conftest.err >&AS_MESSAGE_LOG_FD - # Otherwise, if the output was created with a 0 exit code from - # the compiler, it worked. - elif test -f libconftest.dylib && test 0 = "$_lt_result"; then - lt_cv_apple_cc_single_mod=yes - else - cat conftest.err >&AS_MESSAGE_LOG_FD - fi - rm -rf libconftest.dylib* - rm -f conftest.* - fi]) - - AC_CACHE_CHECK([for -exported_symbols_list linker flag], - [lt_cv_ld_exported_symbols_list], - [lt_cv_ld_exported_symbols_list=no - save_LDFLAGS=$LDFLAGS - echo "_main" > conftest.sym - LDFLAGS="$LDFLAGS -Wl,-exported_symbols_list,conftest.sym" - AC_LINK_IFELSE([AC_LANG_PROGRAM([],[])], - [lt_cv_ld_exported_symbols_list=yes], - [lt_cv_ld_exported_symbols_list=no]) - LDFLAGS=$save_LDFLAGS - ]) - - AC_CACHE_CHECK([for -force_load linker flag],[lt_cv_ld_force_load], - [lt_cv_ld_force_load=no - cat > conftest.c << _LT_EOF -int forced_loaded() { return 2;} -_LT_EOF - echo "$LTCC $LTCFLAGS -c -o conftest.o conftest.c" >&AS_MESSAGE_LOG_FD - $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&AS_MESSAGE_LOG_FD - echo "$AR cr libconftest.a conftest.o" >&AS_MESSAGE_LOG_FD - $AR cr libconftest.a conftest.o 2>&AS_MESSAGE_LOG_FD - echo "$RANLIB libconftest.a" >&AS_MESSAGE_LOG_FD - $RANLIB libconftest.a 2>&AS_MESSAGE_LOG_FD - cat > conftest.c << _LT_EOF -int main() { return 0;} -_LT_EOF - echo "$LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a" >&AS_MESSAGE_LOG_FD - $LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a 2>conftest.err - _lt_result=$? - if test -s conftest.err && $GREP force_load conftest.err; then - cat conftest.err >&AS_MESSAGE_LOG_FD - elif test -f conftest && test 0 = "$_lt_result" && $GREP forced_load conftest >/dev/null 2>&1; then - lt_cv_ld_force_load=yes - else - cat conftest.err >&AS_MESSAGE_LOG_FD - fi - rm -f conftest.err libconftest.a conftest conftest.c - rm -rf conftest.dSYM - ]) - case $host_os in - rhapsody* | darwin1.[[012]]) - _lt_dar_allow_undefined='$wl-undefined ${wl}suppress' ;; - darwin1.*) - _lt_dar_allow_undefined='$wl-flat_namespace $wl-undefined ${wl}suppress' ;; - darwin*) # darwin 5.x on - # if running on 10.5 or later, the deployment target defaults - # to the OS version, if on x86, and 10.4, the deployment - # target defaults to 10.4. Don't you love it? - case ${MACOSX_DEPLOYMENT_TARGET-10.0},$host in - 10.0,*86*-darwin8*|10.0,*-darwin[[91]]*) - _lt_dar_allow_undefined='$wl-undefined ${wl}dynamic_lookup' ;; - 10.[[012]][[,.]]*) - _lt_dar_allow_undefined='$wl-flat_namespace $wl-undefined ${wl}suppress' ;; - 10.*) - _lt_dar_allow_undefined='$wl-undefined ${wl}dynamic_lookup' ;; - esac - ;; - esac - if test yes = "$lt_cv_apple_cc_single_mod"; then - _lt_dar_single_mod='$single_module' - fi - if test yes = "$lt_cv_ld_exported_symbols_list"; then - _lt_dar_export_syms=' $wl-exported_symbols_list,$output_objdir/$libname-symbols.expsym' - else - _lt_dar_export_syms='~$NMEDIT -s $output_objdir/$libname-symbols.expsym $lib' - fi - if test : != "$DSYMUTIL" && test no = "$lt_cv_ld_force_load"; then - _lt_dsymutil='~$DSYMUTIL $lib || :' - else - _lt_dsymutil= - fi - ;; - esac -]) - - -# _LT_DARWIN_LINKER_FEATURES([TAG]) -# --------------------------------- -# Checks for linker and compiler features on darwin -m4_defun([_LT_DARWIN_LINKER_FEATURES], -[ - m4_require([_LT_REQUIRED_DARWIN_CHECKS]) - _LT_TAGVAR(archive_cmds_need_lc, $1)=no - _LT_TAGVAR(hardcode_direct, $1)=no - _LT_TAGVAR(hardcode_automatic, $1)=yes - _LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported - if test yes = "$lt_cv_ld_force_load"; then - _LT_TAGVAR(whole_archive_flag_spec, $1)='`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience $wl-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`' - m4_case([$1], [F77], [_LT_TAGVAR(compiler_needs_object, $1)=yes], - [FC], [_LT_TAGVAR(compiler_needs_object, $1)=yes]) - else - _LT_TAGVAR(whole_archive_flag_spec, $1)='' - fi - _LT_TAGVAR(link_all_deplibs, $1)=yes - _LT_TAGVAR(allow_undefined_flag, $1)=$_lt_dar_allow_undefined - case $cc_basename in - ifort*|nagfor*) _lt_dar_can_shared=yes ;; - *) _lt_dar_can_shared=$GCC ;; - esac - if test yes = "$_lt_dar_can_shared"; then - output_verbose_link_cmd=func_echo_all - _LT_TAGVAR(archive_cmds, $1)="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dsymutil" - _LT_TAGVAR(module_cmds, $1)="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dsymutil" - _LT_TAGVAR(archive_expsym_cmds, $1)="sed 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dar_export_syms$_lt_dsymutil" - _LT_TAGVAR(module_expsym_cmds, $1)="sed -e 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dar_export_syms$_lt_dsymutil" - m4_if([$1], [CXX], -[ if test yes != "$lt_cv_apple_cc_single_mod"; then - _LT_TAGVAR(archive_cmds, $1)="\$CC -r -keep_private_externs -nostdlib -o \$lib-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$lib-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring$_lt_dsymutil" - _LT_TAGVAR(archive_expsym_cmds, $1)="sed 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -r -keep_private_externs -nostdlib -o \$lib-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$lib-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring$_lt_dar_export_syms$_lt_dsymutil" - fi -],[]) - else - _LT_TAGVAR(ld_shlibs, $1)=no - fi -]) - -# _LT_SYS_MODULE_PATH_AIX([TAGNAME]) -# ---------------------------------- -# Links a minimal program and checks the executable -# for the system default hardcoded library path. In most cases, -# this is /usr/lib:/lib, but when the MPI compilers are used -# the location of the communication and MPI libs are included too. -# If we don't find anything, use the default library path according -# to the aix ld manual. -# Store the results from the different compilers for each TAGNAME. -# Allow to override them for all tags through lt_cv_aix_libpath. -m4_defun([_LT_SYS_MODULE_PATH_AIX], -[m4_require([_LT_DECL_SED])dnl -if test set = "${lt_cv_aix_libpath+set}"; then - aix_libpath=$lt_cv_aix_libpath -else - AC_CACHE_VAL([_LT_TAGVAR([lt_cv_aix_libpath_], [$1])], - [AC_LINK_IFELSE([AC_LANG_PROGRAM],[ - lt_aix_libpath_sed='[ - /Import File Strings/,/^$/ { - /^0/ { - s/^0 *\([^ ]*\) *$/\1/ - p - } - }]' - _LT_TAGVAR([lt_cv_aix_libpath_], [$1])=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` - # Check for a 64-bit object if we didn't find anything. - if test -z "$_LT_TAGVAR([lt_cv_aix_libpath_], [$1])"; then - _LT_TAGVAR([lt_cv_aix_libpath_], [$1])=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` - fi],[]) - if test -z "$_LT_TAGVAR([lt_cv_aix_libpath_], [$1])"; then - _LT_TAGVAR([lt_cv_aix_libpath_], [$1])=/usr/lib:/lib - fi - ]) - aix_libpath=$_LT_TAGVAR([lt_cv_aix_libpath_], [$1]) -fi -])# _LT_SYS_MODULE_PATH_AIX - - -# _LT_SHELL_INIT(ARG) -# ------------------- -m4_define([_LT_SHELL_INIT], -[m4_divert_text([M4SH-INIT], [$1 -])])# _LT_SHELL_INIT - - - -# _LT_PROG_ECHO_BACKSLASH -# ----------------------- -# Find how we can fake an echo command that does not interpret backslash. -# In particular, with Autoconf 2.60 or later we add some code to the start -# of the generated configure script that will find a shell with a builtin -# printf (that we can use as an echo command). -m4_defun([_LT_PROG_ECHO_BACKSLASH], -[ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' -ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO -ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO - -AC_MSG_CHECKING([how to print strings]) -# Test print first, because it will be a builtin if present. -if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \ - test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then - ECHO='print -r --' -elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then - ECHO='printf %s\n' -else - # Use this function as a fallback that always works. - func_fallback_echo () - { - eval 'cat <<_LTECHO_EOF -$[]1 -_LTECHO_EOF' - } - ECHO='func_fallback_echo' -fi - -# func_echo_all arg... -# Invoke $ECHO with all args, space-separated. -func_echo_all () -{ - $ECHO "$*" -} - -case $ECHO in - printf*) AC_MSG_RESULT([printf]) ;; - print*) AC_MSG_RESULT([print -r]) ;; - *) AC_MSG_RESULT([cat]) ;; -esac - -m4_ifdef([_AS_DETECT_SUGGESTED], -[_AS_DETECT_SUGGESTED([ - test -n "${ZSH_VERSION+set}${BASH_VERSION+set}" || ( - ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' - ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO - ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO - PATH=/empty FPATH=/empty; export PATH FPATH - test "X`printf %s $ECHO`" = "X$ECHO" \ - || test "X`print -r -- $ECHO`" = "X$ECHO" )])]) - -_LT_DECL([], [SHELL], [1], [Shell to use when invoking shell scripts]) -_LT_DECL([], [ECHO], [1], [An echo program that protects backslashes]) -])# _LT_PROG_ECHO_BACKSLASH - - -# _LT_WITH_SYSROOT -# ---------------- -AC_DEFUN([_LT_WITH_SYSROOT], -[AC_MSG_CHECKING([for sysroot]) -AC_ARG_WITH([sysroot], -[AS_HELP_STRING([--with-sysroot@<:@=DIR@:>@], - [Search for dependent libraries within DIR (or the compiler's sysroot - if not specified).])], -[], [with_sysroot=no]) - -dnl lt_sysroot will always be passed unquoted. We quote it here -dnl in case the user passed a directory name. -lt_sysroot= -case $with_sysroot in #( - yes) - if test yes = "$GCC"; then - lt_sysroot=`$CC --print-sysroot 2>/dev/null` - fi - ;; #( - /*) - lt_sysroot=`echo "$with_sysroot" | sed -e "$sed_quote_subst"` - ;; #( - no|'') - ;; #( - *) - AC_MSG_RESULT([$with_sysroot]) - AC_MSG_ERROR([The sysroot must be an absolute path.]) - ;; -esac - - AC_MSG_RESULT([${lt_sysroot:-no}]) -_LT_DECL([], [lt_sysroot], [0], [The root where to search for ]dnl -[dependent libraries, and where our libraries should be installed.])]) - -# _LT_ENABLE_LOCK -# --------------- -m4_defun([_LT_ENABLE_LOCK], -[AC_ARG_ENABLE([libtool-lock], - [AS_HELP_STRING([--disable-libtool-lock], - [avoid locking (might break parallel builds)])]) -test no = "$enable_libtool_lock" || enable_libtool_lock=yes - -# Some flags need to be propagated to the compiler or linker for good -# libtool support. -case $host in -ia64-*-hpux*) - # Find out what ABI is being produced by ac_compile, and set mode - # options accordingly. - echo 'int i;' > conftest.$ac_ext - if AC_TRY_EVAL(ac_compile); then - case `/usr/bin/file conftest.$ac_objext` in - *ELF-32*) - HPUX_IA64_MODE=32 - ;; - *ELF-64*) - HPUX_IA64_MODE=64 - ;; - esac - fi - rm -rf conftest* - ;; -*-*-irix6*) - # Find out what ABI is being produced by ac_compile, and set linker - # options accordingly. - echo '[#]line '$LINENO' "configure"' > conftest.$ac_ext - if AC_TRY_EVAL(ac_compile); then - if test yes = "$lt_cv_prog_gnu_ld"; then - case `/usr/bin/file conftest.$ac_objext` in - *32-bit*) - LD="${LD-ld} -melf32bsmip" - ;; - *N32*) - LD="${LD-ld} -melf32bmipn32" - ;; - *64-bit*) - LD="${LD-ld} -melf64bmip" - ;; - esac - else - case `/usr/bin/file conftest.$ac_objext` in - *32-bit*) - LD="${LD-ld} -32" - ;; - *N32*) - LD="${LD-ld} -n32" - ;; - *64-bit*) - LD="${LD-ld} -64" - ;; - esac - fi - fi - rm -rf conftest* - ;; - -mips64*-*linux*) - # Find out what ABI is being produced by ac_compile, and set linker - # options accordingly. - echo '[#]line '$LINENO' "configure"' > conftest.$ac_ext - if AC_TRY_EVAL(ac_compile); then - emul=elf - case `/usr/bin/file conftest.$ac_objext` in - *32-bit*) - emul="${emul}32" - ;; - *64-bit*) - emul="${emul}64" - ;; - esac - case `/usr/bin/file conftest.$ac_objext` in - *MSB*) - emul="${emul}btsmip" - ;; - *LSB*) - emul="${emul}ltsmip" - ;; - esac - case `/usr/bin/file conftest.$ac_objext` in - *N32*) - emul="${emul}n32" - ;; - esac - LD="${LD-ld} -m $emul" - fi - rm -rf conftest* - ;; - -x86_64-*kfreebsd*-gnu|x86_64-*linux*|powerpc*-*linux*| \ -s390*-*linux*|s390*-*tpf*|sparc*-*linux*) - # Find out what ABI is being produced by ac_compile, and set linker - # options accordingly. Note that the listed cases only cover the - # situations where additional linker options are needed (such as when - # doing 32-bit compilation for a host where ld defaults to 64-bit, or - # vice versa); the common cases where no linker options are needed do - # not appear in the list. - echo 'int i;' > conftest.$ac_ext - if AC_TRY_EVAL(ac_compile); then - case `/usr/bin/file conftest.o` in - *32-bit*) - case $host in - x86_64-*kfreebsd*-gnu) - LD="${LD-ld} -m elf_i386_fbsd" - ;; - x86_64-*linux*) - case `/usr/bin/file conftest.o` in - *x86-64*) - LD="${LD-ld} -m elf32_x86_64" - ;; - *) - LD="${LD-ld} -m elf_i386" - ;; - esac - ;; - powerpc64le-*linux*) - LD="${LD-ld} -m elf32lppclinux" - ;; - powerpc64-*linux*) - LD="${LD-ld} -m elf32ppclinux" - ;; - s390x-*linux*) - LD="${LD-ld} -m elf_s390" - ;; - sparc64-*linux*) - LD="${LD-ld} -m elf32_sparc" - ;; - esac - ;; - *64-bit*) - case $host in - x86_64-*kfreebsd*-gnu) - LD="${LD-ld} -m elf_x86_64_fbsd" - ;; - x86_64-*linux*) - LD="${LD-ld} -m elf_x86_64" - ;; - powerpcle-*linux*) - LD="${LD-ld} -m elf64lppc" - ;; - powerpc-*linux*) - LD="${LD-ld} -m elf64ppc" - ;; - s390*-*linux*|s390*-*tpf*) - LD="${LD-ld} -m elf64_s390" - ;; - sparc*-*linux*) - LD="${LD-ld} -m elf64_sparc" - ;; - esac - ;; - esac - fi - rm -rf conftest* - ;; - -*-*-sco3.2v5*) - # On SCO OpenServer 5, we need -belf to get full-featured binaries. - SAVE_CFLAGS=$CFLAGS - CFLAGS="$CFLAGS -belf" - AC_CACHE_CHECK([whether the C compiler needs -belf], lt_cv_cc_needs_belf, - [AC_LANG_PUSH(C) - AC_LINK_IFELSE([AC_LANG_PROGRAM([[]],[[]])],[lt_cv_cc_needs_belf=yes],[lt_cv_cc_needs_belf=no]) - AC_LANG_POP]) - if test yes != "$lt_cv_cc_needs_belf"; then - # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf - CFLAGS=$SAVE_CFLAGS - fi - ;; -*-*solaris*) - # Find out what ABI is being produced by ac_compile, and set linker - # options accordingly. - echo 'int i;' > conftest.$ac_ext - if AC_TRY_EVAL(ac_compile); then - case `/usr/bin/file conftest.o` in - *64-bit*) - case $lt_cv_prog_gnu_ld in - yes*) - case $host in - i?86-*-solaris*|x86_64-*-solaris*) - LD="${LD-ld} -m elf_x86_64" - ;; - sparc*-*-solaris*) - LD="${LD-ld} -m elf64_sparc" - ;; - esac - # GNU ld 2.21 introduced _sol2 emulations. Use them if available. - if ${LD-ld} -V | grep _sol2 >/dev/null 2>&1; then - LD=${LD-ld}_sol2 - fi - ;; - *) - if ${LD-ld} -64 -r -o conftest2.o conftest.o >/dev/null 2>&1; then - LD="${LD-ld} -64" - fi - ;; - esac - ;; - esac - fi - rm -rf conftest* - ;; -esac - -need_locks=$enable_libtool_lock -])# _LT_ENABLE_LOCK - - -# _LT_PROG_AR -# ----------- -m4_defun([_LT_PROG_AR], -[AC_CHECK_TOOLS(AR, [ar], false) -: ${AR=ar} -: ${AR_FLAGS=cr} -_LT_DECL([], [AR], [1], [The archiver]) -_LT_DECL([], [AR_FLAGS], [1], [Flags to create an archive]) - -AC_CACHE_CHECK([for archiver @FILE support], [lt_cv_ar_at_file], - [lt_cv_ar_at_file=no - AC_COMPILE_IFELSE([AC_LANG_PROGRAM], - [echo conftest.$ac_objext > conftest.lst - lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&AS_MESSAGE_LOG_FD' - AC_TRY_EVAL([lt_ar_try]) - if test 0 -eq "$ac_status"; then - # Ensure the archiver fails upon bogus file names. - rm -f conftest.$ac_objext libconftest.a - AC_TRY_EVAL([lt_ar_try]) - if test 0 -ne "$ac_status"; then - lt_cv_ar_at_file=@ - fi - fi - rm -f conftest.* libconftest.a - ]) - ]) - -if test no = "$lt_cv_ar_at_file"; then - archiver_list_spec= -else - archiver_list_spec=$lt_cv_ar_at_file -fi -_LT_DECL([], [archiver_list_spec], [1], - [How to feed a file listing to the archiver]) -])# _LT_PROG_AR - - -# _LT_CMD_OLD_ARCHIVE -# ------------------- -m4_defun([_LT_CMD_OLD_ARCHIVE], -[_LT_PROG_AR - -AC_CHECK_TOOL(STRIP, strip, :) -test -z "$STRIP" && STRIP=: -_LT_DECL([], [STRIP], [1], [A symbol stripping program]) - -AC_CHECK_TOOL(RANLIB, ranlib, :) -test -z "$RANLIB" && RANLIB=: -_LT_DECL([], [RANLIB], [1], - [Commands used to install an old-style archive]) - -# Determine commands to create old-style static archives. -old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs' -old_postinstall_cmds='chmod 644 $oldlib' -old_postuninstall_cmds= - -if test -n "$RANLIB"; then - case $host_os in - bitrig* | openbsd*) - old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$tool_oldlib" - ;; - *) - old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$tool_oldlib" - ;; - esac - old_archive_cmds="$old_archive_cmds~\$RANLIB \$tool_oldlib" -fi - -case $host_os in - darwin*) - lock_old_archive_extraction=yes ;; - *) - lock_old_archive_extraction=no ;; -esac -_LT_DECL([], [old_postinstall_cmds], [2]) -_LT_DECL([], [old_postuninstall_cmds], [2]) -_LT_TAGDECL([], [old_archive_cmds], [2], - [Commands used to build an old-style archive]) -_LT_DECL([], [lock_old_archive_extraction], [0], - [Whether to use a lock for old archive extraction]) -])# _LT_CMD_OLD_ARCHIVE - - -# _LT_COMPILER_OPTION(MESSAGE, VARIABLE-NAME, FLAGS, -# [OUTPUT-FILE], [ACTION-SUCCESS], [ACTION-FAILURE]) -# ---------------------------------------------------------------- -# Check whether the given compiler option works -AC_DEFUN([_LT_COMPILER_OPTION], -[m4_require([_LT_FILEUTILS_DEFAULTS])dnl -m4_require([_LT_DECL_SED])dnl -AC_CACHE_CHECK([$1], [$2], - [$2=no - m4_if([$4], , [ac_outfile=conftest.$ac_objext], [ac_outfile=$4]) - echo "$lt_simple_compile_test_code" > conftest.$ac_ext - lt_compiler_flag="$3" ## exclude from sc_useless_quotes_in_assignment - # Insert the option either (1) after the last *FLAGS variable, or - # (2) before a word containing "conftest.", or (3) at the end. - # Note that $ac_compile itself does not contain backslashes and begins - # with a dollar sign (not a hyphen), so the echo should work correctly. - # The option is referenced via a variable to avoid confusing sed. - lt_compile=`echo "$ac_compile" | $SED \ - -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ - -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \ - -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&AS_MESSAGE_LOG_FD) - (eval "$lt_compile" 2>conftest.err) - ac_status=$? - cat conftest.err >&AS_MESSAGE_LOG_FD - echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD - if (exit $ac_status) && test -s "$ac_outfile"; then - # The compiler can only warn and ignore the option if not recognized - # So say no if there are warnings other than the usual output. - $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp - $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 - if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then - $2=yes - fi - fi - $RM conftest* -]) - -if test yes = "[$]$2"; then - m4_if([$5], , :, [$5]) -else - m4_if([$6], , :, [$6]) -fi -])# _LT_COMPILER_OPTION - -# Old name: -AU_ALIAS([AC_LIBTOOL_COMPILER_OPTION], [_LT_COMPILER_OPTION]) -dnl aclocal-1.4 backwards compatibility: -dnl AC_DEFUN([AC_LIBTOOL_COMPILER_OPTION], []) - - -# _LT_LINKER_OPTION(MESSAGE, VARIABLE-NAME, FLAGS, -# [ACTION-SUCCESS], [ACTION-FAILURE]) -# ---------------------------------------------------- -# Check whether the given linker option works -AC_DEFUN([_LT_LINKER_OPTION], -[m4_require([_LT_FILEUTILS_DEFAULTS])dnl -m4_require([_LT_DECL_SED])dnl -AC_CACHE_CHECK([$1], [$2], - [$2=no - save_LDFLAGS=$LDFLAGS - LDFLAGS="$LDFLAGS $3" - echo "$lt_simple_link_test_code" > conftest.$ac_ext - if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then - # The linker can only warn and ignore the option if not recognized - # So say no if there are warnings - if test -s conftest.err; then - # Append any errors to the config.log. - cat conftest.err 1>&AS_MESSAGE_LOG_FD - $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp - $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 - if diff conftest.exp conftest.er2 >/dev/null; then - $2=yes - fi - else - $2=yes - fi - fi - $RM -r conftest* - LDFLAGS=$save_LDFLAGS -]) - -if test yes = "[$]$2"; then - m4_if([$4], , :, [$4]) -else - m4_if([$5], , :, [$5]) -fi -])# _LT_LINKER_OPTION - -# Old name: -AU_ALIAS([AC_LIBTOOL_LINKER_OPTION], [_LT_LINKER_OPTION]) -dnl aclocal-1.4 backwards compatibility: -dnl AC_DEFUN([AC_LIBTOOL_LINKER_OPTION], []) - - -# LT_CMD_MAX_LEN -#--------------- -AC_DEFUN([LT_CMD_MAX_LEN], -[AC_REQUIRE([AC_CANONICAL_HOST])dnl -# find the maximum length of command line arguments -AC_MSG_CHECKING([the maximum length of command line arguments]) -AC_CACHE_VAL([lt_cv_sys_max_cmd_len], [dnl - i=0 - teststring=ABCD - - case $build_os in - msdosdjgpp*) - # On DJGPP, this test can blow up pretty badly due to problems in libc - # (any single argument exceeding 2000 bytes causes a buffer overrun - # during glob expansion). Even if it were fixed, the result of this - # check would be larger than it should be. - lt_cv_sys_max_cmd_len=12288; # 12K is about right - ;; - - gnu*) - # Under GNU Hurd, this test is not required because there is - # no limit to the length of command line arguments. - # Libtool will interpret -1 as no limit whatsoever - lt_cv_sys_max_cmd_len=-1; - ;; - - cygwin* | mingw* | cegcc*) - # On Win9x/ME, this test blows up -- it succeeds, but takes - # about 5 minutes as the teststring grows exponentially. - # Worse, since 9x/ME are not pre-emptively multitasking, - # you end up with a "frozen" computer, even though with patience - # the test eventually succeeds (with a max line length of 256k). - # Instead, let's just punt: use the minimum linelength reported by - # all of the supported platforms: 8192 (on NT/2K/XP). - lt_cv_sys_max_cmd_len=8192; - ;; - - mint*) - # On MiNT this can take a long time and run out of memory. - lt_cv_sys_max_cmd_len=8192; - ;; - - amigaos*) - # On AmigaOS with pdksh, this test takes hours, literally. - # So we just punt and use a minimum line length of 8192. - lt_cv_sys_max_cmd_len=8192; - ;; - - bitrig* | darwin* | dragonfly* | freebsd* | netbsd* | openbsd*) - # This has been around since 386BSD, at least. Likely further. - if test -x /sbin/sysctl; then - lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax` - elif test -x /usr/sbin/sysctl; then - lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax` - else - lt_cv_sys_max_cmd_len=65536 # usable default for all BSDs - fi - # And add a safety zone - lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` - lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` - ;; - - interix*) - # We know the value 262144 and hardcode it with a safety zone (like BSD) - lt_cv_sys_max_cmd_len=196608 - ;; - - os2*) - # The test takes a long time on OS/2. - lt_cv_sys_max_cmd_len=8192 - ;; - - osf*) - # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure - # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not - # nice to cause kernel panics so lets avoid the loop below. - # First set a reasonable default. - lt_cv_sys_max_cmd_len=16384 - # - if test -x /sbin/sysconfig; then - case `/sbin/sysconfig -q proc exec_disable_arg_limit` in - *1*) lt_cv_sys_max_cmd_len=-1 ;; - esac - fi - ;; - sco3.2v5*) - lt_cv_sys_max_cmd_len=102400 - ;; - sysv5* | sco5v6* | sysv4.2uw2*) - kargmax=`grep ARG_MAX /etc/conf/cf.d/stune 2>/dev/null` - if test -n "$kargmax"; then - lt_cv_sys_max_cmd_len=`echo $kargmax | sed 's/.*[[ ]]//'` - else - lt_cv_sys_max_cmd_len=32768 - fi - ;; - *) - lt_cv_sys_max_cmd_len=`(getconf ARG_MAX) 2> /dev/null` - if test -n "$lt_cv_sys_max_cmd_len" && \ - test undefined != "$lt_cv_sys_max_cmd_len"; then - lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` - lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` - else - # Make teststring a little bigger before we do anything with it. - # a 1K string should be a reasonable start. - for i in 1 2 3 4 5 6 7 8; do - teststring=$teststring$teststring - done - SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}} - # If test is not a shell built-in, we'll probably end up computing a - # maximum length that is only half of the actual maximum length, but - # we can't tell. - while { test X`env echo "$teststring$teststring" 2>/dev/null` \ - = "X$teststring$teststring"; } >/dev/null 2>&1 && - test 17 != "$i" # 1/2 MB should be enough - do - i=`expr $i + 1` - teststring=$teststring$teststring - done - # Only check the string length outside the loop. - lt_cv_sys_max_cmd_len=`expr "X$teststring" : ".*" 2>&1` - teststring= - # Add a significant safety factor because C++ compilers can tack on - # massive amounts of additional arguments before passing them to the - # linker. It appears as though 1/2 is a usable value. - lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 2` - fi - ;; - esac -]) -if test -n "$lt_cv_sys_max_cmd_len"; then - AC_MSG_RESULT($lt_cv_sys_max_cmd_len) -else - AC_MSG_RESULT(none) -fi -max_cmd_len=$lt_cv_sys_max_cmd_len -_LT_DECL([], [max_cmd_len], [0], - [What is the maximum length of a command?]) -])# LT_CMD_MAX_LEN - -# Old name: -AU_ALIAS([AC_LIBTOOL_SYS_MAX_CMD_LEN], [LT_CMD_MAX_LEN]) -dnl aclocal-1.4 backwards compatibility: -dnl AC_DEFUN([AC_LIBTOOL_SYS_MAX_CMD_LEN], []) - - -# _LT_HEADER_DLFCN -# ---------------- -m4_defun([_LT_HEADER_DLFCN], -[AC_CHECK_HEADERS([dlfcn.h], [], [], [AC_INCLUDES_DEFAULT])dnl -])# _LT_HEADER_DLFCN - - -# _LT_TRY_DLOPEN_SELF (ACTION-IF-TRUE, ACTION-IF-TRUE-W-USCORE, -# ACTION-IF-FALSE, ACTION-IF-CROSS-COMPILING) -# ---------------------------------------------------------------- -m4_defun([_LT_TRY_DLOPEN_SELF], -[m4_require([_LT_HEADER_DLFCN])dnl -if test yes = "$cross_compiling"; then : - [$4] -else - lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 - lt_status=$lt_dlunknown - cat > conftest.$ac_ext <<_LT_EOF -[#line $LINENO "configure" -#include "confdefs.h" - -#if HAVE_DLFCN_H -#include -#endif - -#include - -#ifdef RTLD_GLOBAL -# define LT_DLGLOBAL RTLD_GLOBAL -#else -# ifdef DL_GLOBAL -# define LT_DLGLOBAL DL_GLOBAL -# else -# define LT_DLGLOBAL 0 -# endif -#endif - -/* We may have to define LT_DLLAZY_OR_NOW in the command line if we - find out it does not work in some platform. */ -#ifndef LT_DLLAZY_OR_NOW -# ifdef RTLD_LAZY -# define LT_DLLAZY_OR_NOW RTLD_LAZY -# else -# ifdef DL_LAZY -# define LT_DLLAZY_OR_NOW DL_LAZY -# else -# ifdef RTLD_NOW -# define LT_DLLAZY_OR_NOW RTLD_NOW -# else -# ifdef DL_NOW -# define LT_DLLAZY_OR_NOW DL_NOW -# else -# define LT_DLLAZY_OR_NOW 0 -# endif -# endif -# endif -# endif -#endif - -/* When -fvisibility=hidden is used, assume the code has been annotated - correspondingly for the symbols needed. */ -#if defined __GNUC__ && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) -int fnord () __attribute__((visibility("default"))); -#endif - -int fnord () { return 42; } -int main () -{ - void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); - int status = $lt_dlunknown; - - if (self) - { - if (dlsym (self,"fnord")) status = $lt_dlno_uscore; - else - { - if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; - else puts (dlerror ()); - } - /* dlclose (self); */ - } - else - puts (dlerror ()); - - return status; -}] -_LT_EOF - if AC_TRY_EVAL(ac_link) && test -s "conftest$ac_exeext" 2>/dev/null; then - (./conftest; exit; ) >&AS_MESSAGE_LOG_FD 2>/dev/null - lt_status=$? - case x$lt_status in - x$lt_dlno_uscore) $1 ;; - x$lt_dlneed_uscore) $2 ;; - x$lt_dlunknown|x*) $3 ;; - esac - else : - # compilation failed - $3 - fi -fi -rm -fr conftest* -])# _LT_TRY_DLOPEN_SELF - - -# LT_SYS_DLOPEN_SELF -# ------------------ -AC_DEFUN([LT_SYS_DLOPEN_SELF], -[m4_require([_LT_HEADER_DLFCN])dnl -if test yes != "$enable_dlopen"; then - enable_dlopen=unknown - enable_dlopen_self=unknown - enable_dlopen_self_static=unknown -else - lt_cv_dlopen=no - lt_cv_dlopen_libs= - - case $host_os in - beos*) - lt_cv_dlopen=load_add_on - lt_cv_dlopen_libs= - lt_cv_dlopen_self=yes - ;; - - mingw* | pw32* | cegcc*) - lt_cv_dlopen=LoadLibrary - lt_cv_dlopen_libs= - ;; - - cygwin*) - lt_cv_dlopen=dlopen - lt_cv_dlopen_libs= - ;; - - darwin*) - # if libdl is installed we need to link against it - AC_CHECK_LIB([dl], [dlopen], - [lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-ldl],[ - lt_cv_dlopen=dyld - lt_cv_dlopen_libs= - lt_cv_dlopen_self=yes - ]) - ;; - - tpf*) - # Don't try to run any link tests for TPF. We know it's impossible - # because TPF is a cross-compiler, and we know how we open DSOs. - lt_cv_dlopen=dlopen - lt_cv_dlopen_libs= - lt_cv_dlopen_self=no - ;; - - *) - AC_CHECK_FUNC([shl_load], - [lt_cv_dlopen=shl_load], - [AC_CHECK_LIB([dld], [shl_load], - [lt_cv_dlopen=shl_load lt_cv_dlopen_libs=-ldld], - [AC_CHECK_FUNC([dlopen], - [lt_cv_dlopen=dlopen], - [AC_CHECK_LIB([dl], [dlopen], - [lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-ldl], - [AC_CHECK_LIB([svld], [dlopen], - [lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-lsvld], - [AC_CHECK_LIB([dld], [dld_link], - [lt_cv_dlopen=dld_link lt_cv_dlopen_libs=-ldld]) - ]) - ]) - ]) - ]) - ]) - ;; - esac - - if test no = "$lt_cv_dlopen"; then - enable_dlopen=no - else - enable_dlopen=yes - fi - - case $lt_cv_dlopen in - dlopen) - save_CPPFLAGS=$CPPFLAGS - test yes = "$ac_cv_header_dlfcn_h" && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H" - - save_LDFLAGS=$LDFLAGS - wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\" - - save_LIBS=$LIBS - LIBS="$lt_cv_dlopen_libs $LIBS" - - AC_CACHE_CHECK([whether a program can dlopen itself], - lt_cv_dlopen_self, [dnl - _LT_TRY_DLOPEN_SELF( - lt_cv_dlopen_self=yes, lt_cv_dlopen_self=yes, - lt_cv_dlopen_self=no, lt_cv_dlopen_self=cross) - ]) - - if test yes = "$lt_cv_dlopen_self"; then - wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\" - AC_CACHE_CHECK([whether a statically linked program can dlopen itself], - lt_cv_dlopen_self_static, [dnl - _LT_TRY_DLOPEN_SELF( - lt_cv_dlopen_self_static=yes, lt_cv_dlopen_self_static=yes, - lt_cv_dlopen_self_static=no, lt_cv_dlopen_self_static=cross) - ]) - fi - - CPPFLAGS=$save_CPPFLAGS - LDFLAGS=$save_LDFLAGS - LIBS=$save_LIBS - ;; - esac - - case $lt_cv_dlopen_self in - yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;; - *) enable_dlopen_self=unknown ;; - esac - - case $lt_cv_dlopen_self_static in - yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;; - *) enable_dlopen_self_static=unknown ;; - esac -fi -_LT_DECL([dlopen_support], [enable_dlopen], [0], - [Whether dlopen is supported]) -_LT_DECL([dlopen_self], [enable_dlopen_self], [0], - [Whether dlopen of programs is supported]) -_LT_DECL([dlopen_self_static], [enable_dlopen_self_static], [0], - [Whether dlopen of statically linked programs is supported]) -])# LT_SYS_DLOPEN_SELF - -# Old name: -AU_ALIAS([AC_LIBTOOL_DLOPEN_SELF], [LT_SYS_DLOPEN_SELF]) -dnl aclocal-1.4 backwards compatibility: -dnl AC_DEFUN([AC_LIBTOOL_DLOPEN_SELF], []) - - -# _LT_COMPILER_C_O([TAGNAME]) -# --------------------------- -# Check to see if options -c and -o are simultaneously supported by compiler. -# This macro does not hard code the compiler like AC_PROG_CC_C_O. -m4_defun([_LT_COMPILER_C_O], -[m4_require([_LT_DECL_SED])dnl -m4_require([_LT_FILEUTILS_DEFAULTS])dnl -m4_require([_LT_TAG_COMPILER])dnl -AC_CACHE_CHECK([if $compiler supports -c -o file.$ac_objext], - [_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)], - [_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=no - $RM -r conftest 2>/dev/null - mkdir conftest - cd conftest - mkdir out - echo "$lt_simple_compile_test_code" > conftest.$ac_ext - - lt_compiler_flag="-o out/conftest2.$ac_objext" - # Insert the option either (1) after the last *FLAGS variable, or - # (2) before a word containing "conftest.", or (3) at the end. - # Note that $ac_compile itself does not contain backslashes and begins - # with a dollar sign (not a hyphen), so the echo should work correctly. - lt_compile=`echo "$ac_compile" | $SED \ - -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ - -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \ - -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&AS_MESSAGE_LOG_FD) - (eval "$lt_compile" 2>out/conftest.err) - ac_status=$? - cat out/conftest.err >&AS_MESSAGE_LOG_FD - echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD - if (exit $ac_status) && test -s out/conftest2.$ac_objext - then - # The compiler can only warn and ignore the option if not recognized - # So say no if there are warnings - $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp - $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 - if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then - _LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=yes - fi - fi - chmod u+w . 2>&AS_MESSAGE_LOG_FD - $RM conftest* - # SGI C++ compiler will create directory out/ii_files/ for - # template instantiation - test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files - $RM out/* && rmdir out - cd .. - $RM -r conftest - $RM conftest* -]) -_LT_TAGDECL([compiler_c_o], [lt_cv_prog_compiler_c_o], [1], - [Does compiler simultaneously support -c and -o options?]) -])# _LT_COMPILER_C_O - - -# _LT_COMPILER_FILE_LOCKS([TAGNAME]) -# ---------------------------------- -# Check to see if we can do hard links to lock some files if needed -m4_defun([_LT_COMPILER_FILE_LOCKS], -[m4_require([_LT_ENABLE_LOCK])dnl -m4_require([_LT_FILEUTILS_DEFAULTS])dnl -_LT_COMPILER_C_O([$1]) - -hard_links=nottested -if test no = "$_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)" && test no != "$need_locks"; then - # do not overwrite the value of need_locks provided by the user - AC_MSG_CHECKING([if we can lock with hard links]) - hard_links=yes - $RM conftest* - ln conftest.a conftest.b 2>/dev/null && hard_links=no - touch conftest.a - ln conftest.a conftest.b 2>&5 || hard_links=no - ln conftest.a conftest.b 2>/dev/null && hard_links=no - AC_MSG_RESULT([$hard_links]) - if test no = "$hard_links"; then - AC_MSG_WARN(['$CC' does not support '-c -o', so 'make -j' may be unsafe]) - need_locks=warn - fi -else - need_locks=no -fi -_LT_DECL([], [need_locks], [1], [Must we lock files when doing compilation?]) -])# _LT_COMPILER_FILE_LOCKS - - -# _LT_CHECK_OBJDIR -# ---------------- -m4_defun([_LT_CHECK_OBJDIR], -[AC_CACHE_CHECK([for objdir], [lt_cv_objdir], -[rm -f .libs 2>/dev/null -mkdir .libs 2>/dev/null -if test -d .libs; then - lt_cv_objdir=.libs -else - # MS-DOS does not allow filenames that begin with a dot. - lt_cv_objdir=_libs -fi -rmdir .libs 2>/dev/null]) -objdir=$lt_cv_objdir -_LT_DECL([], [objdir], [0], - [The name of the directory that contains temporary libtool files])dnl -m4_pattern_allow([LT_OBJDIR])dnl -AC_DEFINE_UNQUOTED([LT_OBJDIR], "$lt_cv_objdir/", - [Define to the sub-directory where libtool stores uninstalled libraries.]) -])# _LT_CHECK_OBJDIR - - -# _LT_LINKER_HARDCODE_LIBPATH([TAGNAME]) -# -------------------------------------- -# Check hardcoding attributes. -m4_defun([_LT_LINKER_HARDCODE_LIBPATH], -[AC_MSG_CHECKING([how to hardcode library paths into programs]) -_LT_TAGVAR(hardcode_action, $1)= -if test -n "$_LT_TAGVAR(hardcode_libdir_flag_spec, $1)" || - test -n "$_LT_TAGVAR(runpath_var, $1)" || - test yes = "$_LT_TAGVAR(hardcode_automatic, $1)"; then - - # We can hardcode non-existent directories. - if test no != "$_LT_TAGVAR(hardcode_direct, $1)" && - # If the only mechanism to avoid hardcoding is shlibpath_var, we - # have to relink, otherwise we might link with an installed library - # when we should be linking with a yet-to-be-installed one - ## test no != "$_LT_TAGVAR(hardcode_shlibpath_var, $1)" && - test no != "$_LT_TAGVAR(hardcode_minus_L, $1)"; then - # Linking always hardcodes the temporary library directory. - _LT_TAGVAR(hardcode_action, $1)=relink - else - # We can link without hardcoding, and we can hardcode nonexisting dirs. - _LT_TAGVAR(hardcode_action, $1)=immediate - fi -else - # We cannot hardcode anything, or else we can only hardcode existing - # directories. - _LT_TAGVAR(hardcode_action, $1)=unsupported -fi -AC_MSG_RESULT([$_LT_TAGVAR(hardcode_action, $1)]) - -if test relink = "$_LT_TAGVAR(hardcode_action, $1)" || - test yes = "$_LT_TAGVAR(inherit_rpath, $1)"; then - # Fast installation is not supported - enable_fast_install=no -elif test yes = "$shlibpath_overrides_runpath" || - test no = "$enable_shared"; then - # Fast installation is not necessary - enable_fast_install=needless -fi -_LT_TAGDECL([], [hardcode_action], [0], - [How to hardcode a shared library path into an executable]) -])# _LT_LINKER_HARDCODE_LIBPATH - - -# _LT_CMD_STRIPLIB -# ---------------- -m4_defun([_LT_CMD_STRIPLIB], -[m4_require([_LT_DECL_EGREP]) -striplib= -old_striplib= -AC_MSG_CHECKING([whether stripping libraries is possible]) -if test -n "$STRIP" && $STRIP -V 2>&1 | $GREP "GNU strip" >/dev/null; then - test -z "$old_striplib" && old_striplib="$STRIP --strip-debug" - test -z "$striplib" && striplib="$STRIP --strip-unneeded" - AC_MSG_RESULT([yes]) -else -# FIXME - insert some real tests, host_os isn't really good enough - case $host_os in - darwin*) - if test -n "$STRIP"; then - striplib="$STRIP -x" - old_striplib="$STRIP -S" - AC_MSG_RESULT([yes]) - else - AC_MSG_RESULT([no]) - fi - ;; - *) - AC_MSG_RESULT([no]) - ;; - esac -fi -_LT_DECL([], [old_striplib], [1], [Commands to strip libraries]) -_LT_DECL([], [striplib], [1]) -])# _LT_CMD_STRIPLIB - - -# _LT_PREPARE_MUNGE_PATH_LIST -# --------------------------- -# Make sure func_munge_path_list() is defined correctly. -m4_defun([_LT_PREPARE_MUNGE_PATH_LIST], -[[# func_munge_path_list VARIABLE PATH -# ----------------------------------- -# VARIABLE is name of variable containing _space_ separated list of -# directories to be munged by the contents of PATH, which is string -# having a format: -# "DIR[:DIR]:" -# string "DIR[ DIR]" will be prepended to VARIABLE -# ":DIR[:DIR]" -# string "DIR[ DIR]" will be appended to VARIABLE -# "DIRP[:DIRP]::[DIRA:]DIRA" -# string "DIRP[ DIRP]" will be prepended to VARIABLE and string -# "DIRA[ DIRA]" will be appended to VARIABLE -# "DIR[:DIR]" -# VARIABLE will be replaced by "DIR[ DIR]" -func_munge_path_list () -{ - case x@S|@2 in - x) - ;; - *:) - eval @S|@1=\"`$ECHO @S|@2 | $SED 's/:/ /g'` \@S|@@S|@1\" - ;; - x:*) - eval @S|@1=\"\@S|@@S|@1 `$ECHO @S|@2 | $SED 's/:/ /g'`\" - ;; - *::*) - eval @S|@1=\"\@S|@@S|@1\ `$ECHO @S|@2 | $SED -e 's/.*:://' -e 's/:/ /g'`\" - eval @S|@1=\"`$ECHO @S|@2 | $SED -e 's/::.*//' -e 's/:/ /g'`\ \@S|@@S|@1\" - ;; - *) - eval @S|@1=\"`$ECHO @S|@2 | $SED 's/:/ /g'`\" - ;; - esac -} -]])# _LT_PREPARE_PATH_LIST - - -# _LT_SYS_DYNAMIC_LINKER([TAG]) -# ----------------------------- -# PORTME Fill in your ld.so characteristics -m4_defun([_LT_SYS_DYNAMIC_LINKER], -[AC_REQUIRE([AC_CANONICAL_HOST])dnl -m4_require([_LT_DECL_EGREP])dnl -m4_require([_LT_FILEUTILS_DEFAULTS])dnl -m4_require([_LT_DECL_OBJDUMP])dnl -m4_require([_LT_DECL_SED])dnl -m4_require([_LT_CHECK_SHELL_FEATURES])dnl -m4_require([_LT_PREPARE_MUNGE_PATH_LIST])dnl -AC_MSG_CHECKING([dynamic linker characteristics]) -m4_if([$1], - [], [ -if test yes = "$GCC"; then - case $host_os in - darwin*) lt_awk_arg='/^libraries:/,/LR/' ;; - *) lt_awk_arg='/^libraries:/' ;; - esac - case $host_os in - mingw* | cegcc*) lt_sed_strip_eq='s|=\([[A-Za-z]]:\)|\1|g' ;; - *) lt_sed_strip_eq='s|=/|/|g' ;; - esac - lt_search_path_spec=`$CC -print-search-dirs | awk $lt_awk_arg | $SED -e "s/^libraries://" -e $lt_sed_strip_eq` - case $lt_search_path_spec in - *\;*) - # if the path contains ";" then we assume it to be the separator - # otherwise default to the standard path separator (i.e. ":") - it is - # assumed that no part of a normal pathname contains ";" but that should - # okay in the real world where ";" in dirpaths is itself problematic. - lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED 's/;/ /g'` - ;; - *) - lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED "s/$PATH_SEPARATOR/ /g"` - ;; - esac - # Ok, now we have the path, separated by spaces, we can step through it - # and add multilib dir if necessary... - lt_tmp_lt_search_path_spec= - lt_multi_os_dir=/`$CC $CPPFLAGS $CFLAGS $LDFLAGS -print-multi-os-directory 2>/dev/null` - # ...but if some path component already ends with the multilib dir we assume - # that all is fine and trust -print-search-dirs as is (GCC 4.2? or newer). - case "$lt_multi_os_dir; $lt_search_path_spec " in - "/; "* | "/.; "* | "/./; "* | *"$lt_multi_os_dir "* | *"$lt_multi_os_dir/ "*) - lt_multi_os_dir= - ;; - esac - for lt_sys_path in $lt_search_path_spec; do - if test -d "$lt_sys_path$lt_multi_os_dir"; then - lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path$lt_multi_os_dir" - elif test -n "$lt_multi_os_dir"; then - test -d "$lt_sys_path" && \ - lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path" - fi - done - lt_search_path_spec=`$ECHO "$lt_tmp_lt_search_path_spec" | awk ' -BEGIN {RS = " "; FS = "/|\n";} { - lt_foo = ""; - lt_count = 0; - for (lt_i = NF; lt_i > 0; lt_i--) { - if ($lt_i != "" && $lt_i != ".") { - if ($lt_i == "..") { - lt_count++; - } else { - if (lt_count == 0) { - lt_foo = "/" $lt_i lt_foo; - } else { - lt_count--; - } - } - } - } - if (lt_foo != "") { lt_freq[[lt_foo]]++; } - if (lt_freq[[lt_foo]] == 1) { print lt_foo; } -}'` - # AWK program above erroneously prepends '/' to C:/dos/paths - # for these hosts. - case $host_os in - mingw* | cegcc*) lt_search_path_spec=`$ECHO "$lt_search_path_spec" |\ - $SED 's|/\([[A-Za-z]]:\)|\1|g'` ;; - esac - sys_lib_search_path_spec=`$ECHO "$lt_search_path_spec" | $lt_NL2SP` -else - sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" -fi]) -library_names_spec= -libname_spec='lib$name' -soname_spec= -shrext_cmds=.so -postinstall_cmds= -postuninstall_cmds= -finish_cmds= -finish_eval= -shlibpath_var= -shlibpath_overrides_runpath=unknown -version_type=none -dynamic_linker="$host_os ld.so" -sys_lib_dlsearch_path_spec="/lib /usr/lib" -need_lib_prefix=unknown -hardcode_into_libs=no - -# when you set need_version to no, make sure it does not cause -set_version -# flags to be left without arguments -need_version=unknown - -AC_ARG_VAR([LT_SYS_LIBRARY_PATH], -[User-defined run-time library search path.]) - -case $host_os in -aix3*) - version_type=linux # correct to gnu/linux during the next big refactor - library_names_spec='$libname$release$shared_ext$versuffix $libname.a' - shlibpath_var=LIBPATH - - # AIX 3 has no versioning support, so we append a major version to the name. - soname_spec='$libname$release$shared_ext$major' - ;; - -aix[[4-9]]*) - version_type=linux # correct to gnu/linux during the next big refactor - need_lib_prefix=no - need_version=no - hardcode_into_libs=yes - if test ia64 = "$host_cpu"; then - # AIX 5 supports IA64 - library_names_spec='$libname$release$shared_ext$major $libname$release$shared_ext$versuffix $libname$shared_ext' - shlibpath_var=LD_LIBRARY_PATH - else - # With GCC up to 2.95.x, collect2 would create an import file - # for dependence libraries. The import file would start with - # the line '#! .'. This would cause the generated library to - # depend on '.', always an invalid library. This was fixed in - # development snapshots of GCC prior to 3.0. - case $host_os in - aix4 | aix4.[[01]] | aix4.[[01]].*) - if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' - echo ' yes ' - echo '#endif'; } | $CC -E - | $GREP yes > /dev/null; then - : - else - can_build_shared=no - fi - ;; - esac - # Using Import Files as archive members, it is possible to support - # filename-based versioning of shared library archives on AIX. While - # this would work for both with and without runtime linking, it will - # prevent static linking of such archives. So we do filename-based - # shared library versioning with .so extension only, which is used - # when both runtime linking and shared linking is enabled. - # Unfortunately, runtime linking may impact performance, so we do - # not want this to be the default eventually. Also, we use the - # versioned .so libs for executables only if there is the -brtl - # linker flag in LDFLAGS as well, or --with-aix-soname=svr4 only. - # To allow for filename-based versioning support, we need to create - # libNAME.so.V as an archive file, containing: - # *) an Import File, referring to the versioned filename of the - # archive as well as the shared archive member, telling the - # bitwidth (32 or 64) of that shared object, and providing the - # list of exported symbols of that shared object, eventually - # decorated with the 'weak' keyword - # *) the shared object with the F_LOADONLY flag set, to really avoid - # it being seen by the linker. - # At run time we better use the real file rather than another symlink, - # but for link time we create the symlink libNAME.so -> libNAME.so.V - - case $with_aix_soname,$aix_use_runtimelinking in - # AIX (on Power*) has no versioning support, so currently we cannot hardcode correct - # soname into executable. Probably we can add versioning support to - # collect2, so additional links can be useful in future. - aix,yes) # traditional libtool - dynamic_linker='AIX unversionable lib.so' - # If using run time linking (on AIX 4.2 or later) use lib.so - # instead of lib.a to let people know that these are not - # typical AIX shared libraries. - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' - ;; - aix,no) # traditional AIX only - dynamic_linker='AIX lib.a[(]lib.so.V[)]' - # We preserve .a as extension for shared libraries through AIX4.2 - # and later when we are not doing run time linking. - library_names_spec='$libname$release.a $libname.a' - soname_spec='$libname$release$shared_ext$major' - ;; - svr4,*) # full svr4 only - dynamic_linker="AIX lib.so.V[(]$shared_archive_member_spec.o[)]" - library_names_spec='$libname$release$shared_ext$major $libname$shared_ext' - # We do not specify a path in Import Files, so LIBPATH fires. - shlibpath_overrides_runpath=yes - ;; - *,yes) # both, prefer svr4 - dynamic_linker="AIX lib.so.V[(]$shared_archive_member_spec.o[)], lib.a[(]lib.so.V[)]" - library_names_spec='$libname$release$shared_ext$major $libname$shared_ext' - # unpreferred sharedlib libNAME.a needs extra handling - postinstall_cmds='test -n "$linkname" || linkname="$realname"~func_stripname "" ".so" "$linkname"~$install_shared_prog "$dir/$func_stripname_result.$libext" "$destdir/$func_stripname_result.$libext"~test -z "$tstripme" || test -z "$striplib" || $striplib "$destdir/$func_stripname_result.$libext"' - postuninstall_cmds='for n in $library_names $old_library; do :; done~func_stripname "" ".so" "$n"~test "$func_stripname_result" = "$n" || func_append rmfiles " $odir/$func_stripname_result.$libext"' - # We do not specify a path in Import Files, so LIBPATH fires. - shlibpath_overrides_runpath=yes - ;; - *,no) # both, prefer aix - dynamic_linker="AIX lib.a[(]lib.so.V[)], lib.so.V[(]$shared_archive_member_spec.o[)]" - library_names_spec='$libname$release.a $libname.a' - soname_spec='$libname$release$shared_ext$major' - # unpreferred sharedlib libNAME.so.V and symlink libNAME.so need extra handling - postinstall_cmds='test -z "$dlname" || $install_shared_prog $dir/$dlname $destdir/$dlname~test -z "$tstripme" || test -z "$striplib" || $striplib $destdir/$dlname~test -n "$linkname" || linkname=$realname~func_stripname "" ".a" "$linkname"~(cd "$destdir" && $LN_S -f $dlname $func_stripname_result.so)' - postuninstall_cmds='test -z "$dlname" || func_append rmfiles " $odir/$dlname"~for n in $old_library $library_names; do :; done~func_stripname "" ".a" "$n"~func_append rmfiles " $odir/$func_stripname_result.so"' - ;; - esac - shlibpath_var=LIBPATH - fi - ;; - -amigaos*) - case $host_cpu in - powerpc) - # Since July 2007 AmigaOS4 officially supports .so libraries. - # When compiling the executable, add -use-dynld -Lsobjs: to the compileline. - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' - ;; - m68k) - library_names_spec='$libname.ixlibrary $libname.a' - # Create ${libname}_ixlibrary.a entries in /sys/libs. - finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([[^/]]*\)\.ixlibrary$%\1%'\''`; $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' - ;; - esac - ;; - -beos*) - library_names_spec='$libname$shared_ext' - dynamic_linker="$host_os ld.so" - shlibpath_var=LIBRARY_PATH - ;; - -bsdi[[45]]*) - version_type=linux # correct to gnu/linux during the next big refactor - need_version=no - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' - soname_spec='$libname$release$shared_ext$major' - finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' - shlibpath_var=LD_LIBRARY_PATH - sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" - sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" - # the default ld.so.conf also contains /usr/contrib/lib and - # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow - # libtool to hard-code these into programs - ;; - -cygwin* | mingw* | pw32* | cegcc*) - version_type=windows - shrext_cmds=.dll - need_version=no - need_lib_prefix=no - - case $GCC,$cc_basename in - yes,*) - # gcc - library_names_spec='$libname.dll.a' - # DLL is installed to $(libdir)/../bin by postinstall_cmds - postinstall_cmds='base_file=`basename \$file`~ - dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~ - dldir=$destdir/`dirname \$dlpath`~ - test -d \$dldir || mkdir -p \$dldir~ - $install_prog $dir/$dlname \$dldir/$dlname~ - chmod a+x \$dldir/$dlname~ - if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then - eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; - fi' - postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ - dlpath=$dir/\$dldll~ - $RM \$dlpath' - shlibpath_overrides_runpath=yes - - case $host_os in - cygwin*) - # Cygwin DLLs use 'cyg' prefix rather than 'lib' - soname_spec='`echo $libname | sed -e 's/^lib/cyg/'``echo $release | $SED -e 's/[[.]]/-/g'`$versuffix$shared_ext' -m4_if([$1], [],[ - sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/lib/w32api"]) - ;; - mingw* | cegcc*) - # MinGW DLLs use traditional 'lib' prefix - soname_spec='$libname`echo $release | $SED -e 's/[[.]]/-/g'`$versuffix$shared_ext' - ;; - pw32*) - # pw32 DLLs use 'pw' prefix rather than 'lib' - library_names_spec='`echo $libname | sed -e 's/^lib/pw/'``echo $release | $SED -e 's/[[.]]/-/g'`$versuffix$shared_ext' - ;; - esac - dynamic_linker='Win32 ld.exe' - ;; - - *,cl*) - # Native MSVC - libname_spec='$name' - soname_spec='$libname`echo $release | $SED -e 's/[[.]]/-/g'`$versuffix$shared_ext' - library_names_spec='$libname.dll.lib' - - case $build_os in - mingw*) - sys_lib_search_path_spec= - lt_save_ifs=$IFS - IFS=';' - for lt_path in $LIB - do - IFS=$lt_save_ifs - # Let DOS variable expansion print the short 8.3 style file name. - lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` - sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" - done - IFS=$lt_save_ifs - # Convert to MSYS style. - sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([[a-zA-Z]]\\):| /\\1|g' -e 's|^ ||'` - ;; - cygwin*) - # Convert to unix form, then to dos form, then back to unix form - # but this time dos style (no spaces!) so that the unix form looks - # like /cygdrive/c/PROGRA~1:/cygdr... - sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` - sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` - sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` - ;; - *) - sys_lib_search_path_spec=$LIB - if $ECHO "$sys_lib_search_path_spec" | [$GREP ';[c-zC-Z]:/' >/dev/null]; then - # It is most probably a Windows format PATH. - sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` - else - sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` - fi - # FIXME: find the short name or the path components, as spaces are - # common. (e.g. "Program Files" -> "PROGRA~1") - ;; - esac - - # DLL is installed to $(libdir)/../bin by postinstall_cmds - postinstall_cmds='base_file=`basename \$file`~ - dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~ - dldir=$destdir/`dirname \$dlpath`~ - test -d \$dldir || mkdir -p \$dldir~ - $install_prog $dir/$dlname \$dldir/$dlname' - postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ - dlpath=$dir/\$dldll~ - $RM \$dlpath' - shlibpath_overrides_runpath=yes - dynamic_linker='Win32 link.exe' - ;; - - *) - # Assume MSVC wrapper - library_names_spec='$libname`echo $release | $SED -e 's/[[.]]/-/g'`$versuffix$shared_ext $libname.lib' - dynamic_linker='Win32 ld.exe' - ;; - esac - # FIXME: first we should search . and the directory the executable is in - shlibpath_var=PATH - ;; - -darwin* | rhapsody*) - dynamic_linker="$host_os dyld" - version_type=darwin - need_lib_prefix=no - need_version=no - library_names_spec='$libname$release$major$shared_ext $libname$shared_ext' - soname_spec='$libname$release$major$shared_ext' - shlibpath_overrides_runpath=yes - shlibpath_var=DYLD_LIBRARY_PATH - shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' -m4_if([$1], [],[ - sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/local/lib"]) - sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' - ;; - -dgux*) - version_type=linux # correct to gnu/linux during the next big refactor - need_lib_prefix=no - need_version=no - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' - soname_spec='$libname$release$shared_ext$major' - shlibpath_var=LD_LIBRARY_PATH - ;; - -freebsd* | dragonfly*) - # DragonFly does not have aout. When/if they implement a new - # versioning mechanism, adjust this. - if test -x /usr/bin/objformat; then - objformat=`/usr/bin/objformat` - else - case $host_os in - freebsd[[23]].*) objformat=aout ;; - *) objformat=elf ;; - esac - fi - version_type=freebsd-$objformat - case $version_type in - freebsd-elf*) - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' - soname_spec='$libname$release$shared_ext$major' - need_version=no - need_lib_prefix=no - ;; - freebsd-*) - library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' - need_version=yes - ;; - esac - shlibpath_var=LD_LIBRARY_PATH - case $host_os in - freebsd2.*) - shlibpath_overrides_runpath=yes - ;; - freebsd3.[[01]]* | freebsdelf3.[[01]]*) - shlibpath_overrides_runpath=yes - hardcode_into_libs=yes - ;; - freebsd3.[[2-9]]* | freebsdelf3.[[2-9]]* | \ - freebsd4.[[0-5]] | freebsdelf4.[[0-5]] | freebsd4.1.1 | freebsdelf4.1.1) - shlibpath_overrides_runpath=no - hardcode_into_libs=yes - ;; - *) # from 4.6 on, and DragonFly - shlibpath_overrides_runpath=yes - hardcode_into_libs=yes - ;; - esac - ;; - -haiku*) - version_type=linux # correct to gnu/linux during the next big refactor - need_lib_prefix=no - need_version=no - dynamic_linker="$host_os runtime_loader" - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' - soname_spec='$libname$release$shared_ext$major' - shlibpath_var=LIBRARY_PATH - shlibpath_overrides_runpath=no - sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' - hardcode_into_libs=yes - ;; - -hpux9* | hpux10* | hpux11*) - # Give a soname corresponding to the major version so that dld.sl refuses to - # link against other versions. - version_type=sunos - need_lib_prefix=no - need_version=no - case $host_cpu in - ia64*) - shrext_cmds='.so' - hardcode_into_libs=yes - dynamic_linker="$host_os dld.so" - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' - soname_spec='$libname$release$shared_ext$major' - if test 32 = "$HPUX_IA64_MODE"; then - sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" - sys_lib_dlsearch_path_spec=/usr/lib/hpux32 - else - sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" - sys_lib_dlsearch_path_spec=/usr/lib/hpux64 - fi - ;; - hppa*64*) - shrext_cmds='.sl' - hardcode_into_libs=yes - dynamic_linker="$host_os dld.sl" - shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH - shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' - soname_spec='$libname$release$shared_ext$major' - sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" - sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec - ;; - *) - shrext_cmds='.sl' - dynamic_linker="$host_os dld.sl" - shlibpath_var=SHLIB_PATH - shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' - soname_spec='$libname$release$shared_ext$major' - ;; - esac - # HP-UX runs *really* slowly unless shared libraries are mode 555, ... - postinstall_cmds='chmod 555 $lib' - # or fails outright, so override atomically: - install_override_mode=555 - ;; - -interix[[3-9]]*) - version_type=linux # correct to gnu/linux during the next big refactor - need_lib_prefix=no - need_version=no - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' - soname_spec='$libname$release$shared_ext$major' - dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=no - hardcode_into_libs=yes - ;; - -irix5* | irix6* | nonstopux*) - case $host_os in - nonstopux*) version_type=nonstopux ;; - *) - if test yes = "$lt_cv_prog_gnu_ld"; then - version_type=linux # correct to gnu/linux during the next big refactor - else - version_type=irix - fi ;; - esac - need_lib_prefix=no - need_version=no - soname_spec='$libname$release$shared_ext$major' - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$release$shared_ext $libname$shared_ext' - case $host_os in - irix5* | nonstopux*) - libsuff= shlibsuff= - ;; - *) - case $LD in # libtool.m4 will add one of these switches to LD - *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") - libsuff= shlibsuff= libmagic=32-bit;; - *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") - libsuff=32 shlibsuff=N32 libmagic=N32;; - *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") - libsuff=64 shlibsuff=64 libmagic=64-bit;; - *) libsuff= shlibsuff= libmagic=never-match;; - esac - ;; - esac - shlibpath_var=LD_LIBRARY${shlibsuff}_PATH - shlibpath_overrides_runpath=no - sys_lib_search_path_spec="/usr/lib$libsuff /lib$libsuff /usr/local/lib$libsuff" - sys_lib_dlsearch_path_spec="/usr/lib$libsuff /lib$libsuff" - hardcode_into_libs=yes - ;; - -# No shared lib support for Linux oldld, aout, or coff. -linux*oldld* | linux*aout* | linux*coff*) - dynamic_linker=no - ;; - -linux*android*) - version_type=none # Android doesn't support versioned libraries. - need_lib_prefix=no - need_version=no - library_names_spec='$libname$release$shared_ext' - soname_spec='$libname$release$shared_ext' - finish_cmds= - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes - - # This implies no fast_install, which is unacceptable. - # Some rework will be needed to allow for fast_install - # before this can be enabled. - hardcode_into_libs=yes - - dynamic_linker='Android linker' - # Don't embed -rpath directories since the linker doesn't support them. - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' - ;; - -# This must be glibc/ELF. -linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) - version_type=linux # correct to gnu/linux during the next big refactor - need_lib_prefix=no - need_version=no - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' - soname_spec='$libname$release$shared_ext$major' - finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=no - - # Some binutils ld are patched to set DT_RUNPATH - AC_CACHE_VAL([lt_cv_shlibpath_overrides_runpath], - [lt_cv_shlibpath_overrides_runpath=no - save_LDFLAGS=$LDFLAGS - save_libdir=$libdir - eval "libdir=/foo; wl=\"$_LT_TAGVAR(lt_prog_compiler_wl, $1)\"; \ - LDFLAGS=\"\$LDFLAGS $_LT_TAGVAR(hardcode_libdir_flag_spec, $1)\"" - AC_LINK_IFELSE([AC_LANG_PROGRAM([],[])], - [AS_IF([ ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null], - [lt_cv_shlibpath_overrides_runpath=yes])]) - LDFLAGS=$save_LDFLAGS - libdir=$save_libdir - ]) - shlibpath_overrides_runpath=$lt_cv_shlibpath_overrides_runpath - - # This implies no fast_install, which is unacceptable. - # Some rework will be needed to allow for fast_install - # before this can be enabled. - hardcode_into_libs=yes - - # Ideally, we could use ldconfig to report *all* directores which are - # searched for libraries, however this is still not possible. Aside from not - # being certain /sbin/ldconfig is available, command - # 'ldconfig -N -X -v | grep ^/' on 64bit Fedora does not report /usr/lib64, - # even though it is searched at run-time. Try to do the best guess by - # appending ld.so.conf contents (and includes) to the search path. - if test -f /etc/ld.so.conf; then - lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \[$]2)); skip = 1; } { if (!skip) print \[$]0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '` - sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra" - fi - - # We used to test for /lib/ld.so.1 and disable shared libraries on - # powerpc, because MkLinux only supported shared libraries with the - # GNU dynamic linker. Since this was broken with cross compilers, - # most powerpc-linux boxes support dynamic linking these days and - # people can always --disable-shared, the test was removed, and we - # assume the GNU/Linux dynamic linker is in use. - dynamic_linker='GNU/Linux ld.so' - ;; - -netbsdelf*-gnu) - version_type=linux - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=no - hardcode_into_libs=yes - dynamic_linker='NetBSD ld.elf_so' - ;; - -netbsd*) - version_type=sunos - need_lib_prefix=no - need_version=no - if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then - library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' - finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' - dynamic_linker='NetBSD (a.out) ld.so' - else - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' - soname_spec='$libname$release$shared_ext$major' - dynamic_linker='NetBSD ld.elf_so' - fi - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes - hardcode_into_libs=yes - ;; - -newsos6) - version_type=linux # correct to gnu/linux during the next big refactor - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes - ;; - -*nto* | *qnx*) - version_type=qnx - need_lib_prefix=no - need_version=no - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' - soname_spec='$libname$release$shared_ext$major' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=no - hardcode_into_libs=yes - dynamic_linker='ldqnx.so' - ;; - -openbsd* | bitrig*) - version_type=sunos - sys_lib_dlsearch_path_spec=/usr/lib - need_lib_prefix=no - if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then - need_version=no - else - need_version=yes - fi - library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' - finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes - ;; - -os2*) - libname_spec='$name' - version_type=windows - shrext_cmds=.dll - need_version=no - need_lib_prefix=no - # OS/2 can only load a DLL with a base name of 8 characters or less. - soname_spec='`test -n "$os2dllname" && libname="$os2dllname"; - v=$($ECHO $release$versuffix | tr -d .-); - n=$($ECHO $libname | cut -b -$((8 - ${#v})) | tr . _); - $ECHO $n$v`$shared_ext' - library_names_spec='${libname}_dll.$libext' - dynamic_linker='OS/2 ld.exe' - shlibpath_var=BEGINLIBPATH - sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" - sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec - postinstall_cmds='base_file=`basename \$file`~ - dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; $ECHO \$dlname'\''`~ - dldir=$destdir/`dirname \$dlpath`~ - test -d \$dldir || mkdir -p \$dldir~ - $install_prog $dir/$dlname \$dldir/$dlname~ - chmod a+x \$dldir/$dlname~ - if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then - eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; - fi' - postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; $ECHO \$dlname'\''`~ - dlpath=$dir/\$dldll~ - $RM \$dlpath' - ;; - -osf3* | osf4* | osf5*) - version_type=osf - need_lib_prefix=no - need_version=no - soname_spec='$libname$release$shared_ext$major' - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' - shlibpath_var=LD_LIBRARY_PATH - sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" - sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec - ;; - -rdos*) - dynamic_linker=no - ;; - -solaris*) - version_type=linux # correct to gnu/linux during the next big refactor - need_lib_prefix=no - need_version=no - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' - soname_spec='$libname$release$shared_ext$major' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes - hardcode_into_libs=yes - # ldd complains unless libraries are executable - postinstall_cmds='chmod +x $lib' - ;; - -sunos4*) - version_type=sunos - library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' - finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes - if test yes = "$with_gnu_ld"; then - need_lib_prefix=no - fi - need_version=yes - ;; - -sysv4 | sysv4.3*) - version_type=linux # correct to gnu/linux during the next big refactor - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' - soname_spec='$libname$release$shared_ext$major' - shlibpath_var=LD_LIBRARY_PATH - case $host_vendor in - sni) - shlibpath_overrides_runpath=no - need_lib_prefix=no - runpath_var=LD_RUN_PATH - ;; - siemens) - need_lib_prefix=no - ;; - motorola) - need_lib_prefix=no - need_version=no - shlibpath_overrides_runpath=no - sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' - ;; - esac - ;; - -sysv4*MP*) - if test -d /usr/nec; then - version_type=linux # correct to gnu/linux during the next big refactor - library_names_spec='$libname$shared_ext.$versuffix $libname$shared_ext.$major $libname$shared_ext' - soname_spec='$libname$shared_ext.$major' - shlibpath_var=LD_LIBRARY_PATH - fi - ;; - -sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) - version_type=sco - need_lib_prefix=no - need_version=no - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext $libname$shared_ext' - soname_spec='$libname$release$shared_ext$major' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes - hardcode_into_libs=yes - if test yes = "$with_gnu_ld"; then - sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' - else - sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' - case $host_os in - sco3.2v5*) - sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" - ;; - esac - fi - sys_lib_dlsearch_path_spec='/usr/lib' - ;; - -tpf*) - # TPF is a cross-target only. Preferred cross-host = GNU/Linux. - version_type=linux # correct to gnu/linux during the next big refactor - need_lib_prefix=no - need_version=no - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=no - hardcode_into_libs=yes - ;; - -uts4*) - version_type=linux # correct to gnu/linux during the next big refactor - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' - soname_spec='$libname$release$shared_ext$major' - shlibpath_var=LD_LIBRARY_PATH - ;; - -*) - dynamic_linker=no - ;; -esac -AC_MSG_RESULT([$dynamic_linker]) -test no = "$dynamic_linker" && can_build_shared=no - -variables_saved_for_relink="PATH $shlibpath_var $runpath_var" -if test yes = "$GCC"; then - variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" -fi - -if test set = "${lt_cv_sys_lib_search_path_spec+set}"; then - sys_lib_search_path_spec=$lt_cv_sys_lib_search_path_spec -fi - -if test set = "${lt_cv_sys_lib_dlsearch_path_spec+set}"; then - sys_lib_dlsearch_path_spec=$lt_cv_sys_lib_dlsearch_path_spec -fi - -# remember unaugmented sys_lib_dlsearch_path content for libtool script decls... -configure_time_dlsearch_path=$sys_lib_dlsearch_path_spec - -# ... but it needs LT_SYS_LIBRARY_PATH munging for other configure-time code -func_munge_path_list sys_lib_dlsearch_path_spec "$LT_SYS_LIBRARY_PATH" - -# to be used as default LT_SYS_LIBRARY_PATH value in generated libtool -configure_time_lt_sys_library_path=$LT_SYS_LIBRARY_PATH - -_LT_DECL([], [variables_saved_for_relink], [1], - [Variables whose values should be saved in libtool wrapper scripts and - restored at link time]) -_LT_DECL([], [need_lib_prefix], [0], - [Do we need the "lib" prefix for modules?]) -_LT_DECL([], [need_version], [0], [Do we need a version for libraries?]) -_LT_DECL([], [version_type], [0], [Library versioning type]) -_LT_DECL([], [runpath_var], [0], [Shared library runtime path variable]) -_LT_DECL([], [shlibpath_var], [0],[Shared library path variable]) -_LT_DECL([], [shlibpath_overrides_runpath], [0], - [Is shlibpath searched before the hard-coded library search path?]) -_LT_DECL([], [libname_spec], [1], [Format of library name prefix]) -_LT_DECL([], [library_names_spec], [1], - [[List of archive names. First name is the real one, the rest are links. - The last name is the one that the linker finds with -lNAME]]) -_LT_DECL([], [soname_spec], [1], - [[The coded name of the library, if different from the real name]]) -_LT_DECL([], [install_override_mode], [1], - [Permission mode override for installation of shared libraries]) -_LT_DECL([], [postinstall_cmds], [2], - [Command to use after installation of a shared archive]) -_LT_DECL([], [postuninstall_cmds], [2], - [Command to use after uninstallation of a shared archive]) -_LT_DECL([], [finish_cmds], [2], - [Commands used to finish a libtool library installation in a directory]) -_LT_DECL([], [finish_eval], [1], - [[As "finish_cmds", except a single script fragment to be evaled but - not shown]]) -_LT_DECL([], [hardcode_into_libs], [0], - [Whether we should hardcode library paths into libraries]) -_LT_DECL([], [sys_lib_search_path_spec], [2], - [Compile-time system search path for libraries]) -_LT_DECL([sys_lib_dlsearch_path_spec], [configure_time_dlsearch_path], [2], - [Detected run-time system search path for libraries]) -_LT_DECL([], [configure_time_lt_sys_library_path], [2], - [Explicit LT_SYS_LIBRARY_PATH set during ./configure time]) -])# _LT_SYS_DYNAMIC_LINKER - - -# _LT_PATH_TOOL_PREFIX(TOOL) -# -------------------------- -# find a file program that can recognize shared library -AC_DEFUN([_LT_PATH_TOOL_PREFIX], -[m4_require([_LT_DECL_EGREP])dnl -AC_MSG_CHECKING([for $1]) -AC_CACHE_VAL(lt_cv_path_MAGIC_CMD, -[case $MAGIC_CMD in -[[\\/*] | ?:[\\/]*]) - lt_cv_path_MAGIC_CMD=$MAGIC_CMD # Let the user override the test with a path. - ;; -*) - lt_save_MAGIC_CMD=$MAGIC_CMD - lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR -dnl $ac_dummy forces splitting on constant user-supplied paths. -dnl POSIX.2 word splitting is done only on the output of word expansions, -dnl not every word. This closes a longstanding sh security hole. - ac_dummy="m4_if([$2], , $PATH, [$2])" - for ac_dir in $ac_dummy; do - IFS=$lt_save_ifs - test -z "$ac_dir" && ac_dir=. - if test -f "$ac_dir/$1"; then - lt_cv_path_MAGIC_CMD=$ac_dir/"$1" - if test -n "$file_magic_test_file"; then - case $deplibs_check_method in - "file_magic "*) - file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` - MAGIC_CMD=$lt_cv_path_MAGIC_CMD - if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | - $EGREP "$file_magic_regex" > /dev/null; then - : - else - cat <<_LT_EOF 1>&2 - -*** Warning: the command libtool uses to detect shared libraries, -*** $file_magic_cmd, produces output that libtool cannot recognize. -*** The result is that libtool may fail to recognize shared libraries -*** as such. This will affect the creation of libtool libraries that -*** depend on shared libraries, but programs linked with such libtool -*** libraries will work regardless of this problem. Nevertheless, you -*** may want to report the problem to your system manager and/or to -*** bug-libtool@gnu.org - -_LT_EOF - fi ;; - esac - fi - break - fi - done - IFS=$lt_save_ifs - MAGIC_CMD=$lt_save_MAGIC_CMD - ;; -esac]) -MAGIC_CMD=$lt_cv_path_MAGIC_CMD -if test -n "$MAGIC_CMD"; then - AC_MSG_RESULT($MAGIC_CMD) -else - AC_MSG_RESULT(no) -fi -_LT_DECL([], [MAGIC_CMD], [0], - [Used to examine libraries when file_magic_cmd begins with "file"])dnl -])# _LT_PATH_TOOL_PREFIX - -# Old name: -AU_ALIAS([AC_PATH_TOOL_PREFIX], [_LT_PATH_TOOL_PREFIX]) -dnl aclocal-1.4 backwards compatibility: -dnl AC_DEFUN([AC_PATH_TOOL_PREFIX], []) - - -# _LT_PATH_MAGIC -# -------------- -# find a file program that can recognize a shared library -m4_defun([_LT_PATH_MAGIC], -[_LT_PATH_TOOL_PREFIX(${ac_tool_prefix}file, /usr/bin$PATH_SEPARATOR$PATH) -if test -z "$lt_cv_path_MAGIC_CMD"; then - if test -n "$ac_tool_prefix"; then - _LT_PATH_TOOL_PREFIX(file, /usr/bin$PATH_SEPARATOR$PATH) - else - MAGIC_CMD=: - fi -fi -])# _LT_PATH_MAGIC - - -# LT_PATH_LD -# ---------- -# find the pathname to the GNU or non-GNU linker -AC_DEFUN([LT_PATH_LD], -[AC_REQUIRE([AC_PROG_CC])dnl -AC_REQUIRE([AC_CANONICAL_HOST])dnl -AC_REQUIRE([AC_CANONICAL_BUILD])dnl -m4_require([_LT_DECL_SED])dnl -m4_require([_LT_DECL_EGREP])dnl -m4_require([_LT_PROG_ECHO_BACKSLASH])dnl - -AC_ARG_WITH([gnu-ld], - [AS_HELP_STRING([--with-gnu-ld], - [assume the C compiler uses GNU ld @<:@default=no@:>@])], - [test no = "$withval" || with_gnu_ld=yes], - [with_gnu_ld=no])dnl - -ac_prog=ld -if test yes = "$GCC"; then - # Check if gcc -print-prog-name=ld gives a path. - AC_MSG_CHECKING([for ld used by $CC]) - case $host in - *-*-mingw*) - # gcc leaves a trailing carriage return, which upsets mingw - ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; - *) - ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; - esac - case $ac_prog in - # Accept absolute paths. - [[\\/]]* | ?:[[\\/]]*) - re_direlt='/[[^/]][[^/]]*/\.\./' - # Canonicalize the pathname of ld - ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'` - while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do - ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"` - done - test -z "$LD" && LD=$ac_prog - ;; - "") - # If it fails, then pretend we aren't using GCC. - ac_prog=ld - ;; - *) - # If it is relative, then search for the first ld in PATH. - with_gnu_ld=unknown - ;; - esac -elif test yes = "$with_gnu_ld"; then - AC_MSG_CHECKING([for GNU ld]) -else - AC_MSG_CHECKING([for non-GNU ld]) -fi -AC_CACHE_VAL(lt_cv_path_LD, -[if test -z "$LD"; then - lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR - for ac_dir in $PATH; do - IFS=$lt_save_ifs - test -z "$ac_dir" && ac_dir=. - if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then - lt_cv_path_LD=$ac_dir/$ac_prog - # Check to see if the program is GNU ld. I'd rather use --version, - # but apparently some variants of GNU ld only accept -v. - # Break only if it was the GNU/non-GNU ld that we prefer. - case `"$lt_cv_path_LD" -v 2>&1 &1 conftest.i -cat conftest.i conftest.i >conftest2.i -: ${lt_DD:=$DD} -AC_PATH_PROGS_FEATURE_CHECK([lt_DD], [dd], -[if "$ac_path_lt_DD" bs=32 count=1 conftest.out 2>/dev/null; then - cmp -s conftest.i conftest.out \ - && ac_cv_path_lt_DD="$ac_path_lt_DD" ac_path_lt_DD_found=: -fi]) -rm -f conftest.i conftest2.i conftest.out]) -])# _LT_PATH_DD - - -# _LT_CMD_TRUNCATE -# ---------------- -# find command to truncate a binary pipe -m4_defun([_LT_CMD_TRUNCATE], -[m4_require([_LT_PATH_DD]) -AC_CACHE_CHECK([how to truncate binary pipes], [lt_cv_truncate_bin], -[printf 0123456789abcdef0123456789abcdef >conftest.i -cat conftest.i conftest.i >conftest2.i -lt_cv_truncate_bin= -if "$ac_cv_path_lt_DD" bs=32 count=1 conftest.out 2>/dev/null; then - cmp -s conftest.i conftest.out \ - && lt_cv_truncate_bin="$ac_cv_path_lt_DD bs=4096 count=1" -fi -rm -f conftest.i conftest2.i conftest.out -test -z "$lt_cv_truncate_bin" && lt_cv_truncate_bin="$SED -e 4q"]) -_LT_DECL([lt_truncate_bin], [lt_cv_truncate_bin], [1], - [Command to truncate a binary pipe]) -])# _LT_CMD_TRUNCATE - - -# _LT_CHECK_MAGIC_METHOD -# ---------------------- -# how to check for library dependencies -# -- PORTME fill in with the dynamic library characteristics -m4_defun([_LT_CHECK_MAGIC_METHOD], -[m4_require([_LT_DECL_EGREP]) -m4_require([_LT_DECL_OBJDUMP]) -AC_CACHE_CHECK([how to recognize dependent libraries], -lt_cv_deplibs_check_method, -[lt_cv_file_magic_cmd='$MAGIC_CMD' -lt_cv_file_magic_test_file= -lt_cv_deplibs_check_method='unknown' -# Need to set the preceding variable on all platforms that support -# interlibrary dependencies. -# 'none' -- dependencies not supported. -# 'unknown' -- same as none, but documents that we really don't know. -# 'pass_all' -- all dependencies passed with no checks. -# 'test_compile' -- check by making test program. -# 'file_magic [[regex]]' -- check by looking for files in library path -# that responds to the $file_magic_cmd with a given extended regex. -# If you have 'file' or equivalent on your system and you're not sure -# whether 'pass_all' will *always* work, you probably want this one. - -case $host_os in -aix[[4-9]]*) - lt_cv_deplibs_check_method=pass_all - ;; - -beos*) - lt_cv_deplibs_check_method=pass_all - ;; - -bsdi[[45]]*) - lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (shared object|dynamic lib)' - lt_cv_file_magic_cmd='/usr/bin/file -L' - lt_cv_file_magic_test_file=/shlib/libc.so - ;; - -cygwin*) - # func_win32_libid is a shell function defined in ltmain.sh - lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' - lt_cv_file_magic_cmd='func_win32_libid' - ;; - -mingw* | pw32*) - # Base MSYS/MinGW do not provide the 'file' command needed by - # func_win32_libid shell function, so use a weaker test based on 'objdump', - # unless we find 'file', for example because we are cross-compiling. - if ( file / ) >/dev/null 2>&1; then - lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' - lt_cv_file_magic_cmd='func_win32_libid' - else - # Keep this pattern in sync with the one in func_win32_libid. - lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' - lt_cv_file_magic_cmd='$OBJDUMP -f' - fi - ;; - -cegcc*) - # use the weaker test based on 'objdump'. See mingw*. - lt_cv_deplibs_check_method='file_magic file format pe-arm-.*little(.*architecture: arm)?' - lt_cv_file_magic_cmd='$OBJDUMP -f' - ;; - -darwin* | rhapsody*) - lt_cv_deplibs_check_method=pass_all - ;; - -freebsd* | dragonfly*) - if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then - case $host_cpu in - i*86 ) - # Not sure whether the presence of OpenBSD here was a mistake. - # Let's accept both of them until this is cleared up. - lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[[3-9]]86 (compact )?demand paged shared library' - lt_cv_file_magic_cmd=/usr/bin/file - lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*` - ;; - esac - else - lt_cv_deplibs_check_method=pass_all - fi - ;; - -haiku*) - lt_cv_deplibs_check_method=pass_all - ;; - -hpux10.20* | hpux11*) - lt_cv_file_magic_cmd=/usr/bin/file - case $host_cpu in - ia64*) - lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|ELF-[[0-9]][[0-9]]) shared object file - IA64' - lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so - ;; - hppa*64*) - [lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF[ -][0-9][0-9])(-bit)?( [LM]SB)? shared object( file)?[, -]* PA-RISC [0-9]\.[0-9]'] - lt_cv_file_magic_test_file=/usr/lib/pa20_64/libc.sl - ;; - *) - lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|PA-RISC[[0-9]]\.[[0-9]]) shared library' - lt_cv_file_magic_test_file=/usr/lib/libc.sl - ;; - esac - ;; - -interix[[3-9]]*) - # PIC code is broken on Interix 3.x, that's why |\.a not |_pic\.a here - lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so|\.a)$' - ;; - -irix5* | irix6* | nonstopux*) - case $LD in - *-32|*"-32 ") libmagic=32-bit;; - *-n32|*"-n32 ") libmagic=N32;; - *-64|*"-64 ") libmagic=64-bit;; - *) libmagic=never-match;; - esac - lt_cv_deplibs_check_method=pass_all - ;; - -# This must be glibc/ELF. -linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) - lt_cv_deplibs_check_method=pass_all - ;; - -netbsd* | netbsdelf*-gnu) - if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then - lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|_pic\.a)$' - else - lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so|_pic\.a)$' - fi - ;; - -newos6*) - lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (executable|dynamic lib)' - lt_cv_file_magic_cmd=/usr/bin/file - lt_cv_file_magic_test_file=/usr/lib/libnls.so - ;; - -*nto* | *qnx*) - lt_cv_deplibs_check_method=pass_all - ;; - -openbsd* | bitrig*) - if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then - lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|\.so|_pic\.a)$' - else - lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|_pic\.a)$' - fi - ;; - -osf3* | osf4* | osf5*) - lt_cv_deplibs_check_method=pass_all - ;; - -rdos*) - lt_cv_deplibs_check_method=pass_all - ;; - -solaris*) - lt_cv_deplibs_check_method=pass_all - ;; - -sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) - lt_cv_deplibs_check_method=pass_all - ;; - -sysv4 | sysv4.3*) - case $host_vendor in - motorola) - lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (shared object|dynamic lib) M[[0-9]][[0-9]]* Version [[0-9]]' - lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*` - ;; - ncr) - lt_cv_deplibs_check_method=pass_all - ;; - sequent) - lt_cv_file_magic_cmd='/bin/file' - lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB (shared object|dynamic lib )' - ;; - sni) - lt_cv_file_magic_cmd='/bin/file' - lt_cv_deplibs_check_method="file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB dynamic lib" - lt_cv_file_magic_test_file=/lib/libc.so - ;; - siemens) - lt_cv_deplibs_check_method=pass_all - ;; - pc) - lt_cv_deplibs_check_method=pass_all - ;; - esac - ;; - -tpf*) - lt_cv_deplibs_check_method=pass_all - ;; -os2*) - lt_cv_deplibs_check_method=pass_all - ;; -esac -]) - -file_magic_glob= -want_nocaseglob=no -if test "$build" = "$host"; then - case $host_os in - mingw* | pw32*) - if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then - want_nocaseglob=yes - else - file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[[\1]]\/[[\1]]\/g;/g"` - fi - ;; - esac -fi - -file_magic_cmd=$lt_cv_file_magic_cmd -deplibs_check_method=$lt_cv_deplibs_check_method -test -z "$deplibs_check_method" && deplibs_check_method=unknown - -_LT_DECL([], [deplibs_check_method], [1], - [Method to check whether dependent libraries are shared objects]) -_LT_DECL([], [file_magic_cmd], [1], - [Command to use when deplibs_check_method = "file_magic"]) -_LT_DECL([], [file_magic_glob], [1], - [How to find potential files when deplibs_check_method = "file_magic"]) -_LT_DECL([], [want_nocaseglob], [1], - [Find potential files using nocaseglob when deplibs_check_method = "file_magic"]) -])# _LT_CHECK_MAGIC_METHOD - - -# LT_PATH_NM -# ---------- -# find the pathname to a BSD- or MS-compatible name lister -AC_DEFUN([LT_PATH_NM], -[AC_REQUIRE([AC_PROG_CC])dnl -AC_CACHE_CHECK([for BSD- or MS-compatible name lister (nm)], lt_cv_path_NM, -[if test -n "$NM"; then - # Let the user override the test. - lt_cv_path_NM=$NM -else - lt_nm_to_check=${ac_tool_prefix}nm - if test -n "$ac_tool_prefix" && test "$build" = "$host"; then - lt_nm_to_check="$lt_nm_to_check nm" - fi - for lt_tmp_nm in $lt_nm_to_check; do - lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR - for ac_dir in $PATH /usr/ccs/bin/elf /usr/ccs/bin /usr/ucb /bin; do - IFS=$lt_save_ifs - test -z "$ac_dir" && ac_dir=. - tmp_nm=$ac_dir/$lt_tmp_nm - if test -f "$tmp_nm" || test -f "$tmp_nm$ac_exeext"; then - # Check to see if the nm accepts a BSD-compat flag. - # Adding the 'sed 1q' prevents false positives on HP-UX, which says: - # nm: unknown option "B" ignored - # Tru64's nm complains that /dev/null is an invalid object file - # MSYS converts /dev/null to NUL, MinGW nm treats NUL as empty - case $build_os in - mingw*) lt_bad_file=conftest.nm/nofile ;; - *) lt_bad_file=/dev/null ;; - esac - case `"$tmp_nm" -B $lt_bad_file 2>&1 | sed '1q'` in - *$lt_bad_file* | *'Invalid file or object type'*) - lt_cv_path_NM="$tmp_nm -B" - break 2 - ;; - *) - case `"$tmp_nm" -p /dev/null 2>&1 | sed '1q'` in - */dev/null*) - lt_cv_path_NM="$tmp_nm -p" - break 2 - ;; - *) - lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but - continue # so that we can try to find one that supports BSD flags - ;; - esac - ;; - esac - fi - done - IFS=$lt_save_ifs - done - : ${lt_cv_path_NM=no} -fi]) -if test no != "$lt_cv_path_NM"; then - NM=$lt_cv_path_NM -else - # Didn't find any BSD compatible name lister, look for dumpbin. - if test -n "$DUMPBIN"; then : - # Let the user override the test. - else - AC_CHECK_TOOLS(DUMPBIN, [dumpbin "link -dump"], :) - case `$DUMPBIN -symbols -headers /dev/null 2>&1 | sed '1q'` in - *COFF*) - DUMPBIN="$DUMPBIN -symbols -headers" - ;; - *) - DUMPBIN=: - ;; - esac - fi - AC_SUBST([DUMPBIN]) - if test : != "$DUMPBIN"; then - NM=$DUMPBIN - fi -fi -test -z "$NM" && NM=nm -AC_SUBST([NM]) -_LT_DECL([], [NM], [1], [A BSD- or MS-compatible name lister])dnl - -AC_CACHE_CHECK([the name lister ($NM) interface], [lt_cv_nm_interface], - [lt_cv_nm_interface="BSD nm" - echo "int some_variable = 0;" > conftest.$ac_ext - (eval echo "\"\$as_me:$LINENO: $ac_compile\"" >&AS_MESSAGE_LOG_FD) - (eval "$ac_compile" 2>conftest.err) - cat conftest.err >&AS_MESSAGE_LOG_FD - (eval echo "\"\$as_me:$LINENO: $NM \\\"conftest.$ac_objext\\\"\"" >&AS_MESSAGE_LOG_FD) - (eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out) - cat conftest.err >&AS_MESSAGE_LOG_FD - (eval echo "\"\$as_me:$LINENO: output\"" >&AS_MESSAGE_LOG_FD) - cat conftest.out >&AS_MESSAGE_LOG_FD - if $GREP 'External.*some_variable' conftest.out > /dev/null; then - lt_cv_nm_interface="MS dumpbin" - fi - rm -f conftest*]) -])# LT_PATH_NM - -# Old names: -AU_ALIAS([AM_PROG_NM], [LT_PATH_NM]) -AU_ALIAS([AC_PROG_NM], [LT_PATH_NM]) -dnl aclocal-1.4 backwards compatibility: -dnl AC_DEFUN([AM_PROG_NM], []) -dnl AC_DEFUN([AC_PROG_NM], []) - -# _LT_CHECK_SHAREDLIB_FROM_LINKLIB -# -------------------------------- -# how to determine the name of the shared library -# associated with a specific link library. -# -- PORTME fill in with the dynamic library characteristics -m4_defun([_LT_CHECK_SHAREDLIB_FROM_LINKLIB], -[m4_require([_LT_DECL_EGREP]) -m4_require([_LT_DECL_OBJDUMP]) -m4_require([_LT_DECL_DLLTOOL]) -AC_CACHE_CHECK([how to associate runtime and link libraries], -lt_cv_sharedlib_from_linklib_cmd, -[lt_cv_sharedlib_from_linklib_cmd='unknown' - -case $host_os in -cygwin* | mingw* | pw32* | cegcc*) - # two different shell functions defined in ltmain.sh; - # decide which one to use based on capabilities of $DLLTOOL - case `$DLLTOOL --help 2>&1` in - *--identify-strict*) - lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib - ;; - *) - lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback - ;; - esac - ;; -*) - # fallback: assume linklib IS sharedlib - lt_cv_sharedlib_from_linklib_cmd=$ECHO - ;; -esac -]) -sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd -test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO - -_LT_DECL([], [sharedlib_from_linklib_cmd], [1], - [Command to associate shared and link libraries]) -])# _LT_CHECK_SHAREDLIB_FROM_LINKLIB - - -# _LT_PATH_MANIFEST_TOOL -# ---------------------- -# locate the manifest tool -m4_defun([_LT_PATH_MANIFEST_TOOL], -[AC_CHECK_TOOL(MANIFEST_TOOL, mt, :) -test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt -AC_CACHE_CHECK([if $MANIFEST_TOOL is a manifest tool], [lt_cv_path_mainfest_tool], - [lt_cv_path_mainfest_tool=no - echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&AS_MESSAGE_LOG_FD - $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out - cat conftest.err >&AS_MESSAGE_LOG_FD - if $GREP 'Manifest Tool' conftest.out > /dev/null; then - lt_cv_path_mainfest_tool=yes - fi - rm -f conftest*]) -if test yes != "$lt_cv_path_mainfest_tool"; then - MANIFEST_TOOL=: -fi -_LT_DECL([], [MANIFEST_TOOL], [1], [Manifest tool])dnl -])# _LT_PATH_MANIFEST_TOOL - - -# _LT_DLL_DEF_P([FILE]) -# --------------------- -# True iff FILE is a Windows DLL '.def' file. -# Keep in sync with func_dll_def_p in the libtool script -AC_DEFUN([_LT_DLL_DEF_P], -[dnl - test DEF = "`$SED -n dnl - -e '\''s/^[[ ]]*//'\'' dnl Strip leading whitespace - -e '\''/^\(;.*\)*$/d'\'' dnl Delete empty lines and comments - -e '\''s/^\(EXPORTS\|LIBRARY\)\([[ ]].*\)*$/DEF/p'\'' dnl - -e q dnl Only consider the first "real" line - $1`" dnl -])# _LT_DLL_DEF_P - - -# LT_LIB_M -# -------- -# check for math library -AC_DEFUN([LT_LIB_M], -[AC_REQUIRE([AC_CANONICAL_HOST])dnl -LIBM= -case $host in -*-*-beos* | *-*-cegcc* | *-*-cygwin* | *-*-haiku* | *-*-pw32* | *-*-darwin*) - # These system don't have libm, or don't need it - ;; -*-ncr-sysv4.3*) - AC_CHECK_LIB(mw, _mwvalidcheckl, LIBM=-lmw) - AC_CHECK_LIB(m, cos, LIBM="$LIBM -lm") - ;; -*) - AC_CHECK_LIB(m, cos, LIBM=-lm) - ;; -esac -AC_SUBST([LIBM]) -])# LT_LIB_M - -# Old name: -AU_ALIAS([AC_CHECK_LIBM], [LT_LIB_M]) -dnl aclocal-1.4 backwards compatibility: -dnl AC_DEFUN([AC_CHECK_LIBM], []) - - -# _LT_COMPILER_NO_RTTI([TAGNAME]) -# ------------------------------- -m4_defun([_LT_COMPILER_NO_RTTI], -[m4_require([_LT_TAG_COMPILER])dnl - -_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)= - -if test yes = "$GCC"; then - case $cc_basename in - nvcc*) - _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -Xcompiler -fno-builtin' ;; - *) - _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin' ;; - esac - - _LT_COMPILER_OPTION([if $compiler supports -fno-rtti -fno-exceptions], - lt_cv_prog_compiler_rtti_exceptions, - [-fno-rtti -fno-exceptions], [], - [_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)="$_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1) -fno-rtti -fno-exceptions"]) -fi -_LT_TAGDECL([no_builtin_flag], [lt_prog_compiler_no_builtin_flag], [1], - [Compiler flag to turn off builtin functions]) -])# _LT_COMPILER_NO_RTTI - - -# _LT_CMD_GLOBAL_SYMBOLS -# ---------------------- -m4_defun([_LT_CMD_GLOBAL_SYMBOLS], -[AC_REQUIRE([AC_CANONICAL_HOST])dnl -AC_REQUIRE([AC_PROG_CC])dnl -AC_REQUIRE([AC_PROG_AWK])dnl -AC_REQUIRE([LT_PATH_NM])dnl -AC_REQUIRE([LT_PATH_LD])dnl -m4_require([_LT_DECL_SED])dnl -m4_require([_LT_DECL_EGREP])dnl -m4_require([_LT_TAG_COMPILER])dnl - -# Check for command to grab the raw symbol name followed by C symbol from nm. -AC_MSG_CHECKING([command to parse $NM output from $compiler object]) -AC_CACHE_VAL([lt_cv_sys_global_symbol_pipe], -[ -# These are sane defaults that work on at least a few old systems. -# [They come from Ultrix. What could be older than Ultrix?!! ;)] - -# Character class describing NM global symbol codes. -symcode='[[BCDEGRST]]' - -# Regexp to match symbols that can be accessed directly from C. -sympat='\([[_A-Za-z]][[_A-Za-z0-9]]*\)' - -# Define system-specific variables. -case $host_os in -aix*) - symcode='[[BCDT]]' - ;; -cygwin* | mingw* | pw32* | cegcc*) - symcode='[[ABCDGISTW]]' - ;; -hpux*) - if test ia64 = "$host_cpu"; then - symcode='[[ABCDEGRST]]' - fi - ;; -irix* | nonstopux*) - symcode='[[BCDEGRST]]' - ;; -osf*) - symcode='[[BCDEGQRST]]' - ;; -solaris*) - symcode='[[BDRT]]' - ;; -sco3.2v5*) - symcode='[[DT]]' - ;; -sysv4.2uw2*) - symcode='[[DT]]' - ;; -sysv5* | sco5v6* | unixware* | OpenUNIX*) - symcode='[[ABDT]]' - ;; -sysv4) - symcode='[[DFNSTU]]' - ;; -esac - -# If we're using GNU nm, then use its standard symbol codes. -case `$NM -V 2>&1` in -*GNU* | *'with BFD'*) - symcode='[[ABCDGIRSTW]]' ;; -esac - -if test "$lt_cv_nm_interface" = "MS dumpbin"; then - # Gets list of data symbols to import. - lt_cv_sys_global_symbol_to_import="sed -n -e 's/^I .* \(.*\)$/\1/p'" - # Adjust the below global symbol transforms to fixup imported variables. - lt_cdecl_hook=" -e 's/^I .* \(.*\)$/extern __declspec(dllimport) char \1;/p'" - lt_c_name_hook=" -e 's/^I .* \(.*\)$/ {\"\1\", (void *) 0},/p'" - lt_c_name_lib_hook="\ - -e 's/^I .* \(lib.*\)$/ {\"\1\", (void *) 0},/p'\ - -e 's/^I .* \(.*\)$/ {\"lib\1\", (void *) 0},/p'" -else - # Disable hooks by default. - lt_cv_sys_global_symbol_to_import= - lt_cdecl_hook= - lt_c_name_hook= - lt_c_name_lib_hook= -fi - -# Transform an extracted symbol line into a proper C declaration. -# Some systems (esp. on ia64) link data and code symbols differently, -# so use this general approach. -lt_cv_sys_global_symbol_to_cdecl="sed -n"\ -$lt_cdecl_hook\ -" -e 's/^T .* \(.*\)$/extern int \1();/p'"\ -" -e 's/^$symcode$symcode* .* \(.*\)$/extern char \1;/p'" - -# Transform an extracted symbol line into symbol name and symbol address -lt_cv_sys_global_symbol_to_c_name_address="sed -n"\ -$lt_c_name_hook\ -" -e 's/^: \(.*\) .*$/ {\"\1\", (void *) 0},/p'"\ -" -e 's/^$symcode$symcode* .* \(.*\)$/ {\"\1\", (void *) \&\1},/p'" - -# Transform an extracted symbol line into symbol name with lib prefix and -# symbol address. -lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n"\ -$lt_c_name_lib_hook\ -" -e 's/^: \(.*\) .*$/ {\"\1\", (void *) 0},/p'"\ -" -e 's/^$symcode$symcode* .* \(lib.*\)$/ {\"\1\", (void *) \&\1},/p'"\ -" -e 's/^$symcode$symcode* .* \(.*\)$/ {\"lib\1\", (void *) \&\1},/p'" - -# Handle CRLF in mingw tool chain -opt_cr= -case $build_os in -mingw*) - opt_cr=`$ECHO 'x\{0,1\}' | tr x '\015'` # option cr in regexp - ;; -esac - -# Try without a prefix underscore, then with it. -for ac_symprfx in "" "_"; do - - # Transform symcode, sympat, and symprfx into a raw symbol and a C symbol. - symxfrm="\\1 $ac_symprfx\\2 \\2" - - # Write the raw and C identifiers. - if test "$lt_cv_nm_interface" = "MS dumpbin"; then - # Fake it for dumpbin and say T for any non-static function, - # D for any global variable and I for any imported variable. - # Also find C++ and __fastcall symbols from MSVC++, - # which start with @ or ?. - lt_cv_sys_global_symbol_pipe="$AWK ['"\ -" {last_section=section; section=\$ 3};"\ -" /^COFF SYMBOL TABLE/{for(i in hide) delete hide[i]};"\ -" /Section length .*#relocs.*(pick any)/{hide[last_section]=1};"\ -" /^ *Symbol name *: /{split(\$ 0,sn,\":\"); si=substr(sn[2],2)};"\ -" /^ *Type *: code/{print \"T\",si,substr(si,length(prfx))};"\ -" /^ *Type *: data/{print \"I\",si,substr(si,length(prfx))};"\ -" \$ 0!~/External *\|/{next};"\ -" / 0+ UNDEF /{next}; / UNDEF \([^|]\)*()/{next};"\ -" {if(hide[section]) next};"\ -" {f=\"D\"}; \$ 0~/\(\).*\|/{f=\"T\"};"\ -" {split(\$ 0,a,/\||\r/); split(a[2],s)};"\ -" s[1]~/^[@?]/{print f,s[1],s[1]; next};"\ -" s[1]~prfx {split(s[1],t,\"@\"); print f,t[1],substr(t[1],length(prfx))}"\ -" ' prfx=^$ac_symprfx]" - else - lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[[ ]]\($symcode$symcode*\)[[ ]][[ ]]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" - fi - lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | sed '/ __gnu_lto/d'" - - # Check to see that the pipe works correctly. - pipe_works=no - - rm -f conftest* - cat > conftest.$ac_ext <<_LT_EOF -#ifdef __cplusplus -extern "C" { -#endif -char nm_test_var; -void nm_test_func(void); -void nm_test_func(void){} -#ifdef __cplusplus -} -#endif -int main(){nm_test_var='a';nm_test_func();return(0);} -_LT_EOF - - if AC_TRY_EVAL(ac_compile); then - # Now try to grab the symbols. - nlist=conftest.nm - $ECHO "$as_me:$LINENO: $NM conftest.$ac_objext | $lt_cv_sys_global_symbol_pipe > $nlist" >&AS_MESSAGE_LOG_FD - if eval "$NM" conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist 2>&AS_MESSAGE_LOG_FD && test -s "$nlist"; then - # Try sorting and uniquifying the output. - if sort "$nlist" | uniq > "$nlist"T; then - mv -f "$nlist"T "$nlist" - else - rm -f "$nlist"T - fi - - # Make sure that we snagged all the symbols we need. - if $GREP ' nm_test_var$' "$nlist" >/dev/null; then - if $GREP ' nm_test_func$' "$nlist" >/dev/null; then - cat <<_LT_EOF > conftest.$ac_ext -/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ -#if defined _WIN32 || defined __CYGWIN__ || defined _WIN32_WCE -/* DATA imports from DLLs on WIN32 can't be const, because runtime - relocations are performed -- see ld's documentation on pseudo-relocs. */ -# define LT@&t@_DLSYM_CONST -#elif defined __osf__ -/* This system does not cope well with relocations in const data. */ -# define LT@&t@_DLSYM_CONST -#else -# define LT@&t@_DLSYM_CONST const -#endif - -#ifdef __cplusplus -extern "C" { -#endif - -_LT_EOF - # Now generate the symbol file. - eval "$lt_cv_sys_global_symbol_to_cdecl"' < "$nlist" | $GREP -v main >> conftest.$ac_ext' - - cat <<_LT_EOF >> conftest.$ac_ext - -/* The mapping between symbol names and symbols. */ -LT@&t@_DLSYM_CONST struct { - const char *name; - void *address; -} -lt__PROGRAM__LTX_preloaded_symbols[[]] = -{ - { "@PROGRAM@", (void *) 0 }, -_LT_EOF - $SED "s/^$symcode$symcode* .* \(.*\)$/ {\"\1\", (void *) \&\1},/" < "$nlist" | $GREP -v main >> conftest.$ac_ext - cat <<\_LT_EOF >> conftest.$ac_ext - {0, (void *) 0} -}; - -/* This works around a problem in FreeBSD linker */ -#ifdef FREEBSD_WORKAROUND -static const void *lt_preloaded_setup() { - return lt__PROGRAM__LTX_preloaded_symbols; -} -#endif - -#ifdef __cplusplus -} -#endif -_LT_EOF - # Now try linking the two files. - mv conftest.$ac_objext conftstm.$ac_objext - lt_globsym_save_LIBS=$LIBS - lt_globsym_save_CFLAGS=$CFLAGS - LIBS=conftstm.$ac_objext - CFLAGS="$CFLAGS$_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)" - if AC_TRY_EVAL(ac_link) && test -s conftest$ac_exeext; then - pipe_works=yes - fi - LIBS=$lt_globsym_save_LIBS - CFLAGS=$lt_globsym_save_CFLAGS - else - echo "cannot find nm_test_func in $nlist" >&AS_MESSAGE_LOG_FD - fi - else - echo "cannot find nm_test_var in $nlist" >&AS_MESSAGE_LOG_FD - fi - else - echo "cannot run $lt_cv_sys_global_symbol_pipe" >&AS_MESSAGE_LOG_FD - fi - else - echo "$progname: failed program was:" >&AS_MESSAGE_LOG_FD - cat conftest.$ac_ext >&5 - fi - rm -rf conftest* conftst* - - # Do not use the global_symbol_pipe unless it works. - if test yes = "$pipe_works"; then - break - else - lt_cv_sys_global_symbol_pipe= - fi -done -]) -if test -z "$lt_cv_sys_global_symbol_pipe"; then - lt_cv_sys_global_symbol_to_cdecl= -fi -if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then - AC_MSG_RESULT(failed) -else - AC_MSG_RESULT(ok) -fi - -# Response file support. -if test "$lt_cv_nm_interface" = "MS dumpbin"; then - nm_file_list_spec='@' -elif $NM --help 2>/dev/null | grep '[[@]]FILE' >/dev/null; then - nm_file_list_spec='@' -fi - -_LT_DECL([global_symbol_pipe], [lt_cv_sys_global_symbol_pipe], [1], - [Take the output of nm and produce a listing of raw symbols and C names]) -_LT_DECL([global_symbol_to_cdecl], [lt_cv_sys_global_symbol_to_cdecl], [1], - [Transform the output of nm in a proper C declaration]) -_LT_DECL([global_symbol_to_import], [lt_cv_sys_global_symbol_to_import], [1], - [Transform the output of nm into a list of symbols to manually relocate]) -_LT_DECL([global_symbol_to_c_name_address], - [lt_cv_sys_global_symbol_to_c_name_address], [1], - [Transform the output of nm in a C name address pair]) -_LT_DECL([global_symbol_to_c_name_address_lib_prefix], - [lt_cv_sys_global_symbol_to_c_name_address_lib_prefix], [1], - [Transform the output of nm in a C name address pair when lib prefix is needed]) -_LT_DECL([nm_interface], [lt_cv_nm_interface], [1], - [The name lister interface]) -_LT_DECL([], [nm_file_list_spec], [1], - [Specify filename containing input files for $NM]) -]) # _LT_CMD_GLOBAL_SYMBOLS - - -# _LT_COMPILER_PIC([TAGNAME]) -# --------------------------- -m4_defun([_LT_COMPILER_PIC], -[m4_require([_LT_TAG_COMPILER])dnl -_LT_TAGVAR(lt_prog_compiler_wl, $1)= -_LT_TAGVAR(lt_prog_compiler_pic, $1)= -_LT_TAGVAR(lt_prog_compiler_static, $1)= - -m4_if([$1], [CXX], [ - # C++ specific cases for pic, static, wl, etc. - if test yes = "$GXX"; then - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' - - case $host_os in - aix*) - # All AIX code is PIC. - if test ia64 = "$host_cpu"; then - # AIX 5 now supports IA64 processor - _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - fi - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' - ;; - - amigaos*) - case $host_cpu in - powerpc) - # see comment about AmigaOS4 .so support - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' - ;; - m68k) - # FIXME: we need at least 68020 code to build shared libraries, but - # adding the '-m68020' flag to GCC prevents building anything better, - # like '-m68040'. - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-m68020 -resident32 -malways-restore-a4' - ;; - esac - ;; - - beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) - # PIC is the default for these OSes. - ;; - mingw* | cygwin* | os2* | pw32* | cegcc*) - # This hack is so that the source file can tell whether it is being - # built for inclusion in a dll (and should export symbols for example). - # Although the cygwin gcc ignores -fPIC, still need this for old-style - # (--disable-auto-import) libraries - m4_if([$1], [GCJ], [], - [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) - case $host_os in - os2*) - _LT_TAGVAR(lt_prog_compiler_static, $1)='$wl-static' - ;; - esac - ;; - darwin* | rhapsody*) - # PIC is the default on this platform - # Common symbols not allowed in MH_DYLIB files - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common' - ;; - *djgpp*) - # DJGPP does not support shared libraries at all - _LT_TAGVAR(lt_prog_compiler_pic, $1)= - ;; - haiku*) - # PIC is the default for Haiku. - # The "-static" flag exists, but is broken. - _LT_TAGVAR(lt_prog_compiler_static, $1)= - ;; - interix[[3-9]]*) - # Interix 3.x gcc -fpic/-fPIC options generate broken code. - # Instead, we relocate shared libraries at runtime. - ;; - sysv4*MP*) - if test -d /usr/nec; then - _LT_TAGVAR(lt_prog_compiler_pic, $1)=-Kconform_pic - fi - ;; - hpux*) - # PIC is the default for 64-bit PA HP-UX, but not for 32-bit - # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag - # sets the default TLS model and affects inlining. - case $host_cpu in - hppa*64*) - ;; - *) - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' - ;; - esac - ;; - *qnx* | *nto*) - # QNX uses GNU C++, but need to define -shared option too, otherwise - # it will coredump. - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' - ;; - *) - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' - ;; - esac - else - case $host_os in - aix[[4-9]]*) - # All AIX code is PIC. - if test ia64 = "$host_cpu"; then - # AIX 5 now supports IA64 processor - _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - else - _LT_TAGVAR(lt_prog_compiler_static, $1)='-bnso -bI:/lib/syscalls.exp' - fi - ;; - chorus*) - case $cc_basename in - cxch68*) - # Green Hills C++ Compiler - # _LT_TAGVAR(lt_prog_compiler_static, $1)="--no_auto_instantiation -u __main -u __premain -u _abort -r $COOL_DIR/lib/libOrb.a $MVME_DIR/lib/CC/libC.a $MVME_DIR/lib/classix/libcx.s.a" - ;; - esac - ;; - mingw* | cygwin* | os2* | pw32* | cegcc*) - # This hack is so that the source file can tell whether it is being - # built for inclusion in a dll (and should export symbols for example). - m4_if([$1], [GCJ], [], - [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) - ;; - dgux*) - case $cc_basename in - ec++*) - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' - ;; - ghcx*) - # Green Hills C++ Compiler - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' - ;; - *) - ;; - esac - ;; - freebsd* | dragonfly*) - # FreeBSD uses GNU C++ - ;; - hpux9* | hpux10* | hpux11*) - case $cc_basename in - CC*) - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_TAGVAR(lt_prog_compiler_static, $1)='$wl-a ${wl}archive' - if test ia64 != "$host_cpu"; then - _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z' - fi - ;; - aCC*) - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_TAGVAR(lt_prog_compiler_static, $1)='$wl-a ${wl}archive' - case $host_cpu in - hppa*64*|ia64*) - # +Z the default - ;; - *) - _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z' - ;; - esac - ;; - *) - ;; - esac - ;; - interix*) - # This is c89, which is MS Visual C++ (no shared libs) - # Anyone wants to do a port? - ;; - irix5* | irix6* | nonstopux*) - case $cc_basename in - CC*) - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' - # CC pic flag -KPIC is the default. - ;; - *) - ;; - esac - ;; - linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) - case $cc_basename in - KCC*) - # KAI C++ Compiler - _LT_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,' - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' - ;; - ecpc* ) - # old Intel C++ for x86_64, which still supported -KPIC. - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' - _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' - ;; - icpc* ) - # Intel C++, used to be incompatible with GCC. - # ICC 10 doesn't accept -KPIC any more. - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' - _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' - ;; - pgCC* | pgcpp*) - # Portland Group C++ compiler - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic' - _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - ;; - cxx*) - # Compaq C++ - # Make sure the PIC flag is empty. It appears that all Alpha - # Linux and Compaq Tru64 Unix objects are PIC. - _LT_TAGVAR(lt_prog_compiler_pic, $1)= - _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' - ;; - xlc* | xlC* | bgxl[[cC]]* | mpixl[[cC]]*) - # IBM XL 8.0, 9.0 on PPC and BlueGene - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-qpic' - _LT_TAGVAR(lt_prog_compiler_static, $1)='-qstaticlink' - ;; - *) - case `$CC -V 2>&1 | sed 5q` in - *Sun\ C*) - # Sun C++ 5.9 - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' - _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' - ;; - esac - ;; - esac - ;; - lynxos*) - ;; - m88k*) - ;; - mvs*) - case $cc_basename in - cxx*) - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-W c,exportall' - ;; - *) - ;; - esac - ;; - netbsd* | netbsdelf*-gnu) - ;; - *qnx* | *nto*) - # QNX uses GNU C++, but need to define -shared option too, otherwise - # it will coredump. - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' - ;; - osf3* | osf4* | osf5*) - case $cc_basename in - KCC*) - _LT_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,' - ;; - RCC*) - # Rational C++ 2.4.1 - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' - ;; - cxx*) - # Digital/Compaq C++ - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - # Make sure the PIC flag is empty. It appears that all Alpha - # Linux and Compaq Tru64 Unix objects are PIC. - _LT_TAGVAR(lt_prog_compiler_pic, $1)= - _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' - ;; - *) - ;; - esac - ;; - psos*) - ;; - solaris*) - case $cc_basename in - CC* | sunCC*) - # Sun C++ 4.2, 5.x and Centerline C++ - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' - _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' - ;; - gcx*) - # Green Hills C++ Compiler - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' - ;; - *) - ;; - esac - ;; - sunos4*) - case $cc_basename in - CC*) - # Sun C++ 4.x - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' - _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - ;; - lcc*) - # Lucid - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' - ;; - *) - ;; - esac - ;; - sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) - case $cc_basename in - CC*) - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' - _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - ;; - esac - ;; - tandem*) - case $cc_basename in - NCC*) - # NonStop-UX NCC 3.20 - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' - ;; - *) - ;; - esac - ;; - vxworks*) - ;; - *) - _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no - ;; - esac - fi -], -[ - if test yes = "$GCC"; then - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' - - case $host_os in - aix*) - # All AIX code is PIC. - if test ia64 = "$host_cpu"; then - # AIX 5 now supports IA64 processor - _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - fi - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' - ;; - - amigaos*) - case $host_cpu in - powerpc) - # see comment about AmigaOS4 .so support - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' - ;; - m68k) - # FIXME: we need at least 68020 code to build shared libraries, but - # adding the '-m68020' flag to GCC prevents building anything better, - # like '-m68040'. - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-m68020 -resident32 -malways-restore-a4' - ;; - esac - ;; - - beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) - # PIC is the default for these OSes. - ;; - - mingw* | cygwin* | pw32* | os2* | cegcc*) - # This hack is so that the source file can tell whether it is being - # built for inclusion in a dll (and should export symbols for example). - # Although the cygwin gcc ignores -fPIC, still need this for old-style - # (--disable-auto-import) libraries - m4_if([$1], [GCJ], [], - [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) - case $host_os in - os2*) - _LT_TAGVAR(lt_prog_compiler_static, $1)='$wl-static' - ;; - esac - ;; - - darwin* | rhapsody*) - # PIC is the default on this platform - # Common symbols not allowed in MH_DYLIB files - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common' - ;; - - haiku*) - # PIC is the default for Haiku. - # The "-static" flag exists, but is broken. - _LT_TAGVAR(lt_prog_compiler_static, $1)= - ;; - - hpux*) - # PIC is the default for 64-bit PA HP-UX, but not for 32-bit - # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag - # sets the default TLS model and affects inlining. - case $host_cpu in - hppa*64*) - # +Z the default - ;; - *) - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' - ;; - esac - ;; - - interix[[3-9]]*) - # Interix 3.x gcc -fpic/-fPIC options generate broken code. - # Instead, we relocate shared libraries at runtime. - ;; - - msdosdjgpp*) - # Just because we use GCC doesn't mean we suddenly get shared libraries - # on systems that don't support them. - _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no - enable_shared=no - ;; - - *nto* | *qnx*) - # QNX uses GNU C++, but need to define -shared option too, otherwise - # it will coredump. - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' - ;; - - sysv4*MP*) - if test -d /usr/nec; then - _LT_TAGVAR(lt_prog_compiler_pic, $1)=-Kconform_pic - fi - ;; - - *) - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' - ;; - esac - - case $cc_basename in - nvcc*) # Cuda Compiler Driver 2.2 - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Xlinker ' - if test -n "$_LT_TAGVAR(lt_prog_compiler_pic, $1)"; then - _LT_TAGVAR(lt_prog_compiler_pic, $1)="-Xcompiler $_LT_TAGVAR(lt_prog_compiler_pic, $1)" - fi - ;; - esac - else - # PORTME Check for flag to pass linker flags through the system compiler. - case $host_os in - aix*) - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - if test ia64 = "$host_cpu"; then - # AIX 5 now supports IA64 processor - _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - else - _LT_TAGVAR(lt_prog_compiler_static, $1)='-bnso -bI:/lib/syscalls.exp' - fi - ;; - - darwin* | rhapsody*) - # PIC is the default on this platform - # Common symbols not allowed in MH_DYLIB files - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common' - case $cc_basename in - nagfor*) - # NAG Fortran compiler - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,-Wl,,' - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' - _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - ;; - esac - ;; - - mingw* | cygwin* | pw32* | os2* | cegcc*) - # This hack is so that the source file can tell whether it is being - # built for inclusion in a dll (and should export symbols for example). - m4_if([$1], [GCJ], [], - [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) - case $host_os in - os2*) - _LT_TAGVAR(lt_prog_compiler_static, $1)='$wl-static' - ;; - esac - ;; - - hpux9* | hpux10* | hpux11*) - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but - # not for PA HP-UX. - case $host_cpu in - hppa*64*|ia64*) - # +Z the default - ;; - *) - _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z' - ;; - esac - # Is there a better lt_prog_compiler_static that works with the bundled CC? - _LT_TAGVAR(lt_prog_compiler_static, $1)='$wl-a ${wl}archive' - ;; - - irix5* | irix6* | nonstopux*) - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - # PIC (with -KPIC) is the default. - _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' - ;; - - linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) - case $cc_basename in - # old Intel for x86_64, which still supported -KPIC. - ecc*) - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' - _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' - ;; - # flang / f18. f95 an alias for gfortran or flang on Debian - flang* | f18* | f95*) - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' - _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' - ;; - # icc used to be incompatible with GCC. - # ICC 10 doesn't accept -KPIC any more. - icc* | ifort*) - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' - _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' - ;; - # Lahey Fortran 8.1. - lf95*) - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_TAGVAR(lt_prog_compiler_pic, $1)='--shared' - _LT_TAGVAR(lt_prog_compiler_static, $1)='--static' - ;; - nagfor*) - # NAG Fortran compiler - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,-Wl,,' - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' - _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - ;; - tcc*) - # Fabrice Bellard et al's Tiny C Compiler - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' - _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' - ;; - pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*) - # Portland Group compilers (*not* the Pentium gcc compiler, - # which looks to be a dead project) - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic' - _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - ;; - ccc*) - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - # All Alpha code is PIC. - _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' - ;; - xl* | bgxl* | bgf* | mpixl*) - # IBM XL C 8.0/Fortran 10.1, 11.1 on PPC and BlueGene - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-qpic' - _LT_TAGVAR(lt_prog_compiler_static, $1)='-qstaticlink' - ;; - *) - case `$CC -V 2>&1 | sed 5q` in - *Sun\ Ceres\ Fortran* | *Sun*Fortran*\ [[1-7]].* | *Sun*Fortran*\ 8.[[0-3]]*) - # Sun Fortran 8.3 passes all unrecognized flags to the linker - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' - _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - _LT_TAGVAR(lt_prog_compiler_wl, $1)='' - ;; - *Sun\ F* | *Sun*Fortran*) - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' - _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' - ;; - *Sun\ C*) - # Sun C 5.9 - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' - _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - ;; - *Intel*\ [[CF]]*Compiler*) - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' - _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' - ;; - *Portland\ Group*) - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic' - _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - ;; - esac - ;; - esac - ;; - - newsos6) - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' - _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - ;; - - *nto* | *qnx*) - # QNX uses GNU C++, but need to define -shared option too, otherwise - # it will coredump. - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' - ;; - - osf3* | osf4* | osf5*) - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - # All OSF/1 code is PIC. - _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' - ;; - - rdos*) - _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' - ;; - - solaris*) - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' - _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - case $cc_basename in - f77* | f90* | f95* | sunf77* | sunf90* | sunf95*) - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ';; - *) - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,';; - esac - ;; - - sunos4*) - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' - _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - ;; - - sysv4 | sysv4.2uw2* | sysv4.3*) - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' - _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - ;; - - sysv4*MP*) - if test -d /usr/nec; then - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-Kconform_pic' - _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - fi - ;; - - sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' - _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - ;; - - unicos*) - _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' - _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no - ;; - - uts4*) - _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' - _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' - ;; - - *) - _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no - ;; - esac - fi -]) -case $host_os in - # For platforms that do not support PIC, -DPIC is meaningless: - *djgpp*) - _LT_TAGVAR(lt_prog_compiler_pic, $1)= - ;; - *) - _LT_TAGVAR(lt_prog_compiler_pic, $1)="$_LT_TAGVAR(lt_prog_compiler_pic, $1)@&t@m4_if([$1],[],[ -DPIC],[m4_if([$1],[CXX],[ -DPIC],[])])" - ;; -esac - -AC_CACHE_CHECK([for $compiler option to produce PIC], - [_LT_TAGVAR(lt_cv_prog_compiler_pic, $1)], - [_LT_TAGVAR(lt_cv_prog_compiler_pic, $1)=$_LT_TAGVAR(lt_prog_compiler_pic, $1)]) -_LT_TAGVAR(lt_prog_compiler_pic, $1)=$_LT_TAGVAR(lt_cv_prog_compiler_pic, $1) - -# -# Check to make sure the PIC flag actually works. -# -if test -n "$_LT_TAGVAR(lt_prog_compiler_pic, $1)"; then - _LT_COMPILER_OPTION([if $compiler PIC flag $_LT_TAGVAR(lt_prog_compiler_pic, $1) works], - [_LT_TAGVAR(lt_cv_prog_compiler_pic_works, $1)], - [$_LT_TAGVAR(lt_prog_compiler_pic, $1)@&t@m4_if([$1],[],[ -DPIC],[m4_if([$1],[CXX],[ -DPIC],[])])], [], - [case $_LT_TAGVAR(lt_prog_compiler_pic, $1) in - "" | " "*) ;; - *) _LT_TAGVAR(lt_prog_compiler_pic, $1)=" $_LT_TAGVAR(lt_prog_compiler_pic, $1)" ;; - esac], - [_LT_TAGVAR(lt_prog_compiler_pic, $1)= - _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no]) -fi -_LT_TAGDECL([pic_flag], [lt_prog_compiler_pic], [1], - [Additional compiler flags for building library objects]) - -_LT_TAGDECL([wl], [lt_prog_compiler_wl], [1], - [How to pass a linker flag through the compiler]) -# -# Check to make sure the static flag actually works. -# -wl=$_LT_TAGVAR(lt_prog_compiler_wl, $1) eval lt_tmp_static_flag=\"$_LT_TAGVAR(lt_prog_compiler_static, $1)\" -_LT_LINKER_OPTION([if $compiler static flag $lt_tmp_static_flag works], - _LT_TAGVAR(lt_cv_prog_compiler_static_works, $1), - $lt_tmp_static_flag, - [], - [_LT_TAGVAR(lt_prog_compiler_static, $1)=]) -_LT_TAGDECL([link_static_flag], [lt_prog_compiler_static], [1], - [Compiler flag to prevent dynamic linking]) -])# _LT_COMPILER_PIC - - -# _LT_LINKER_SHLIBS([TAGNAME]) -# ---------------------------- -# See if the linker supports building shared libraries. -m4_defun([_LT_LINKER_SHLIBS], -[AC_REQUIRE([LT_PATH_LD])dnl -AC_REQUIRE([LT_PATH_NM])dnl -m4_require([_LT_PATH_MANIFEST_TOOL])dnl -m4_require([_LT_FILEUTILS_DEFAULTS])dnl -m4_require([_LT_DECL_EGREP])dnl -m4_require([_LT_DECL_SED])dnl -m4_require([_LT_CMD_GLOBAL_SYMBOLS])dnl -m4_require([_LT_TAG_COMPILER])dnl -AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries]) -m4_if([$1], [CXX], [ - _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' - _LT_TAGVAR(exclude_expsyms, $1)=['_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*'] - case $host_os in - aix[[4-9]]*) - # If we're using GNU nm, then we don't want the "-C" option. - # -C means demangle to GNU nm, but means don't demangle to AIX nm. - # Without the "-l" option, or with the "-B" option, AIX nm treats - # weak defined symbols like other global defined symbols, whereas - # GNU nm marks them as "W". - # While the 'weak' keyword is ignored in the Export File, we need - # it in the Import File for the 'aix-soname' feature, so we have - # to replace the "-B" option with "-P" for AIX nm. - if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then - _LT_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && ([substr](\$ 3,1,1) != ".")) { if (\$ 2 == "W") { print \$ 3 " weak" } else { print \$ 3 } } }'\'' | sort -u > $export_symbols' - else - _LT_TAGVAR(export_symbols_cmds, $1)='`func_echo_all $NM | $SED -e '\''s/B\([[^B]]*\)$/P\1/'\''` -PCpgl $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) && ([substr](\$ 1,1,1) != ".")) { if ((\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) { print \$ 1 " weak" } else { print \$ 1 } } }'\'' | sort -u > $export_symbols' - fi - ;; - pw32*) - _LT_TAGVAR(export_symbols_cmds, $1)=$ltdll_cmds - ;; - cygwin* | mingw* | cegcc*) - case $cc_basename in - cl*) - _LT_TAGVAR(exclude_expsyms, $1)='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*' - ;; - *) - _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/;s/^.*[[ ]]__nm__\([[^ ]]*\)[[ ]][[^ ]]*/\1 DATA/;/^I[[ ]]/d;/^[[AITW]][[ ]]/s/.* //'\'' | sort | uniq > $export_symbols' - _LT_TAGVAR(exclude_expsyms, $1)=['[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname'] - ;; - esac - ;; - linux* | k*bsd*-gnu | gnu*) - _LT_TAGVAR(link_all_deplibs, $1)=no - ;; - *) - _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' - ;; - esac -], [ - runpath_var= - _LT_TAGVAR(allow_undefined_flag, $1)= - _LT_TAGVAR(always_export_symbols, $1)=no - _LT_TAGVAR(archive_cmds, $1)= - _LT_TAGVAR(archive_expsym_cmds, $1)= - _LT_TAGVAR(compiler_needs_object, $1)=no - _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no - _LT_TAGVAR(export_dynamic_flag_spec, $1)= - _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' - _LT_TAGVAR(hardcode_automatic, $1)=no - _LT_TAGVAR(hardcode_direct, $1)=no - _LT_TAGVAR(hardcode_direct_absolute, $1)=no - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= - _LT_TAGVAR(hardcode_libdir_separator, $1)= - _LT_TAGVAR(hardcode_minus_L, $1)=no - _LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported - _LT_TAGVAR(inherit_rpath, $1)=no - _LT_TAGVAR(link_all_deplibs, $1)=unknown - _LT_TAGVAR(module_cmds, $1)= - _LT_TAGVAR(module_expsym_cmds, $1)= - _LT_TAGVAR(old_archive_from_new_cmds, $1)= - _LT_TAGVAR(old_archive_from_expsyms_cmds, $1)= - _LT_TAGVAR(thread_safe_flag_spec, $1)= - _LT_TAGVAR(whole_archive_flag_spec, $1)= - # include_expsyms should be a list of space-separated symbols to be *always* - # included in the symbol list - _LT_TAGVAR(include_expsyms, $1)= - # exclude_expsyms can be an extended regexp of symbols to exclude - # it will be wrapped by ' (' and ')$', so one must not match beginning or - # end of line. Example: 'a|bc|.*d.*' will exclude the symbols 'a' and 'bc', - # as well as any symbol that contains 'd'. - _LT_TAGVAR(exclude_expsyms, $1)=['_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*'] - # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out - # platforms (ab)use it in PIC code, but their linkers get confused if - # the symbol is explicitly referenced. Since portable code cannot - # rely on this symbol name, it's probably fine to never include it in - # preloaded symbol tables. - # Exclude shared library initialization/finalization symbols. -dnl Note also adjust exclude_expsyms for C++ above. - extract_expsyms_cmds= - - case $host_os in - cygwin* | mingw* | pw32* | cegcc*) - # FIXME: the MSVC++ port hasn't been tested in a loooong time - # When not using gcc, we currently assume that we are using - # Microsoft Visual C++. - if test yes != "$GCC"; then - with_gnu_ld=no - fi - ;; - interix*) - # we just hope/assume this is gcc and not c89 (= MSVC++) - with_gnu_ld=yes - ;; - openbsd* | bitrig*) - with_gnu_ld=no - ;; - linux* | k*bsd*-gnu | gnu*) - _LT_TAGVAR(link_all_deplibs, $1)=no - ;; - esac - - _LT_TAGVAR(ld_shlibs, $1)=yes - - # On some targets, GNU ld is compatible enough with the native linker - # that we're better off using the native interface for both. - lt_use_gnu_ld_interface=no - if test yes = "$with_gnu_ld"; then - case $host_os in - aix*) - # The AIX port of GNU ld has always aspired to compatibility - # with the native linker. However, as the warning in the GNU ld - # block says, versions before 2.19.5* couldn't really create working - # shared libraries, regardless of the interface used. - case `$LD -v 2>&1` in - *\ \(GNU\ Binutils\)\ 2.19.5*) ;; - *\ \(GNU\ Binutils\)\ 2.[[2-9]]*) ;; - *\ \(GNU\ Binutils\)\ [[3-9]]*) ;; - *) - lt_use_gnu_ld_interface=yes - ;; - esac - ;; - *) - lt_use_gnu_ld_interface=yes - ;; - esac - fi - - if test yes = "$lt_use_gnu_ld_interface"; then - # If archive_cmds runs LD, not CC, wlarc should be empty - wlarc='$wl' - - # Set some defaults for GNU ld with shared library support. These - # are reset later if shared libraries are not supported. Putting them - # here allows them to be overridden if necessary. - runpath_var=LD_RUN_PATH - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' - _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-dynamic' - # ancient GNU ld didn't support --whole-archive et. al. - if $LD --help 2>&1 | $GREP 'no-whole-archive' > /dev/null; then - _LT_TAGVAR(whole_archive_flag_spec, $1)=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive' - else - _LT_TAGVAR(whole_archive_flag_spec, $1)= - fi - supports_anon_versioning=no - case `$LD -v | $SED -e 's/([^)]\+)\s\+//' 2>&1` in - *GNU\ gold*) supports_anon_versioning=yes ;; - *\ [[01]].* | *\ 2.[[0-9]].* | *\ 2.10.*) ;; # catch versions < 2.11 - *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... - *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... - *\ 2.11.*) ;; # other 2.11 versions - *) supports_anon_versioning=yes ;; - esac - - # See if GNU ld supports shared libraries. - case $host_os in - aix[[3-9]]*) - # On AIX/PPC, the GNU linker is very broken - if test ia64 != "$host_cpu"; then - _LT_TAGVAR(ld_shlibs, $1)=no - cat <<_LT_EOF 1>&2 - -*** Warning: the GNU linker, at least up to release 2.19, is reported -*** to be unable to reliably create shared libraries on AIX. -*** Therefore, libtool is disabling shared libraries support. If you -*** really care for shared libraries, you may want to install binutils -*** 2.20 or above, or modify your PATH so that a non-GNU linker is found. -*** You will then need to restart the configuration process. - -_LT_EOF - fi - ;; - - amigaos*) - case $host_cpu in - powerpc) - # see comment about AmigaOS4 .so support - _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' - _LT_TAGVAR(archive_expsym_cmds, $1)='' - ;; - m68k) - _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' - _LT_TAGVAR(hardcode_minus_L, $1)=yes - ;; - esac - ;; - - beos*) - if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then - _LT_TAGVAR(allow_undefined_flag, $1)=unsupported - # Joseph Beckenbach says some releases of gcc - # support --undefined. This deserves some investigation. FIXME - _LT_TAGVAR(archive_cmds, $1)='$CC -nostart $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' - else - _LT_TAGVAR(ld_shlibs, $1)=no - fi - ;; - - cygwin* | mingw* | pw32* | cegcc*) - # _LT_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless, - # as there is no search path for DLLs. - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' - _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-all-symbols' - _LT_TAGVAR(allow_undefined_flag, $1)=unsupported - _LT_TAGVAR(always_export_symbols, $1)=no - _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes - _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/;s/^.*[[ ]]__nm__\([[^ ]]*\)[[ ]][[^ ]]*/\1 DATA/;/^I[[ ]]/d;/^[[AITW]][[ ]]/s/.* //'\'' | sort | uniq > $export_symbols' - _LT_TAGVAR(exclude_expsyms, $1)=['[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname'] - - if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then - _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' - # If the export-symbols file already is a .def file, use it as - # is; otherwise, prepend EXPORTS... - _LT_TAGVAR(archive_expsym_cmds, $1)='if _LT_DLL_DEF_P([$export_symbols]); then - cp $export_symbols $output_objdir/$soname.def; - else - echo EXPORTS > $output_objdir/$soname.def; - cat $export_symbols >> $output_objdir/$soname.def; - fi~ - $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' - else - _LT_TAGVAR(ld_shlibs, $1)=no - fi - ;; - - haiku*) - _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' - _LT_TAGVAR(link_all_deplibs, $1)=yes - ;; - - os2*) - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' - _LT_TAGVAR(hardcode_minus_L, $1)=yes - _LT_TAGVAR(allow_undefined_flag, $1)=unsupported - shrext_cmds=.dll - _LT_TAGVAR(archive_cmds, $1)='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ - $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ - $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ - $ECHO EXPORTS >> $output_objdir/$libname.def~ - emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~ - $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ - emximp -o $lib $output_objdir/$libname.def' - _LT_TAGVAR(archive_expsym_cmds, $1)='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ - $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ - $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ - $ECHO EXPORTS >> $output_objdir/$libname.def~ - prefix_cmds="$SED"~ - if test EXPORTS = "`$SED 1q $export_symbols`"; then - prefix_cmds="$prefix_cmds -e 1d"; - fi~ - prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~ - cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~ - $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ - emximp -o $lib $output_objdir/$libname.def' - _LT_TAGVAR(old_archive_From_new_cmds, $1)='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def' - _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes - ;; - - interix[[3-9]]*) - _LT_TAGVAR(hardcode_direct, $1)=no - _LT_TAGVAR(hardcode_shlibpath_var, $1)=no - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir' - _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E' - # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. - # Instead, shared libraries are loaded at an image base (0x10000000 by - # default) and relocated if they conflict, which is a slow very memory - # consuming and fragmenting process. To avoid this, we pick a random, - # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link - # time. Moving up from 0x10000000 also allows more sbrk(2) space. - _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' - _LT_TAGVAR(archive_expsym_cmds, $1)='sed "s|^|_|" $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--retain-symbols-file,$output_objdir/$soname.expsym $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' - ;; - - gnu* | linux* | tpf* | k*bsd*-gnu | kopensolaris*-gnu) - tmp_diet=no - if test linux-dietlibc = "$host_os"; then - case $cc_basename in - diet\ *) tmp_diet=yes;; # linux-dietlibc with static linking (!diet-dyn) - esac - fi - if $LD --help 2>&1 | $EGREP ': supported targets:.* elf' > /dev/null \ - && test no = "$tmp_diet" - then - tmp_addflag=' $pic_flag' - tmp_sharedflag='-shared' - case $cc_basename,$host_cpu in - pgcc*) # Portland Group C compiler - _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' - tmp_addflag=' $pic_flag' - ;; - pgf77* | pgf90* | pgf95* | pgfortran*) - # Portland Group f77 and f90 compilers - _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' - tmp_addflag=' $pic_flag -Mnomain' ;; - ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64 - tmp_addflag=' -i_dynamic' ;; - efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64 - tmp_addflag=' -i_dynamic -nofor_main' ;; - ifc* | ifort*) # Intel Fortran compiler - tmp_addflag=' -nofor_main' ;; - lf95*) # Lahey Fortran 8.1 - _LT_TAGVAR(whole_archive_flag_spec, $1)= - tmp_sharedflag='--shared' ;; - nagfor*) # NAGFOR 5.3 - tmp_sharedflag='-Wl,-shared' ;; - xl[[cC]]* | bgxl[[cC]]* | mpixl[[cC]]*) # IBM XL C 8.0 on PPC (deal with xlf below) - tmp_sharedflag='-qmkshrobj' - tmp_addflag= ;; - nvcc*) # Cuda Compiler Driver 2.2 - _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' - _LT_TAGVAR(compiler_needs_object, $1)=yes - ;; - esac - case `$CC -V 2>&1 | sed 5q` in - *Sun\ C*) # Sun C 5.9 - _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' - _LT_TAGVAR(compiler_needs_object, $1)=yes - tmp_sharedflag='-G' ;; - *Sun\ F*) # Sun Fortran 8.3 - tmp_sharedflag='-G' ;; - esac - _LT_TAGVAR(archive_cmds, $1)='$CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' - - if test yes = "$supports_anon_versioning"; then - _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~ - cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ - echo "local: *; };" >> $output_objdir/$libname.ver~ - $CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-version-script $wl$output_objdir/$libname.ver -o $lib' - fi - - case $cc_basename in - tcc*) - _LT_TAGVAR(export_dynamic_flag_spec, $1)='-rdynamic' - ;; - xlf* | bgf* | bgxlf* | mpixlf*) - # IBM XL Fortran 10.1 on PPC cannot create shared libs itself - _LT_TAGVAR(whole_archive_flag_spec, $1)='--whole-archive$convenience --no-whole-archive' - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' - _LT_TAGVAR(archive_cmds, $1)='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib' - if test yes = "$supports_anon_versioning"; then - _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~ - cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ - echo "local: *; };" >> $output_objdir/$libname.ver~ - $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' - fi - ;; - esac - else - _LT_TAGVAR(ld_shlibs, $1)=no - fi - ;; - - netbsd* | netbsdelf*-gnu) - if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then - _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' - wlarc= - else - _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' - _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' - fi - ;; - - solaris*) - if $LD -v 2>&1 | $GREP 'BFD 2\.8' > /dev/null; then - _LT_TAGVAR(ld_shlibs, $1)=no - cat <<_LT_EOF 1>&2 - -*** Warning: The releases 2.8.* of the GNU linker cannot reliably -*** create shared libraries on Solaris systems. Therefore, libtool -*** is disabling shared libraries support. We urge you to upgrade GNU -*** binutils to release 2.9.1 or newer. Another option is to modify -*** your PATH or compiler configuration so that the native linker is -*** used, and then restart. - -_LT_EOF - elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then - _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' - _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' - else - _LT_TAGVAR(ld_shlibs, $1)=no - fi - ;; - - sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*) - case `$LD -v 2>&1` in - *\ [[01]].* | *\ 2.[[0-9]].* | *\ 2.1[[0-5]].*) - _LT_TAGVAR(ld_shlibs, $1)=no - cat <<_LT_EOF 1>&2 - -*** Warning: Releases of the GNU linker prior to 2.16.91.0.3 cannot -*** reliably create shared libraries on SCO systems. Therefore, libtool -*** is disabling shared libraries support. We urge you to upgrade GNU -*** binutils to release 2.16.91.0.3 or newer. Another option is to modify -*** your PATH or compiler configuration so that the native linker is -*** used, and then restart. - -_LT_EOF - ;; - *) - # For security reasons, it is highly recommended that you always - # use absolute paths for naming shared libraries, and exclude the - # DT_RUNPATH tag from executables and libraries. But doing so - # requires that you compile everything twice, which is a pain. - if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' - _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' - _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' - else - _LT_TAGVAR(ld_shlibs, $1)=no - fi - ;; - esac - ;; - - sunos4*) - _LT_TAGVAR(archive_cmds, $1)='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags' - wlarc= - _LT_TAGVAR(hardcode_direct, $1)=yes - _LT_TAGVAR(hardcode_shlibpath_var, $1)=no - ;; - - *) - if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then - _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' - _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' - else - _LT_TAGVAR(ld_shlibs, $1)=no - fi - ;; - esac - - if test no = "$_LT_TAGVAR(ld_shlibs, $1)"; then - runpath_var= - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= - _LT_TAGVAR(export_dynamic_flag_spec, $1)= - _LT_TAGVAR(whole_archive_flag_spec, $1)= - fi - else - # PORTME fill in a description of your system's linker (not GNU ld) - case $host_os in - aix3*) - _LT_TAGVAR(allow_undefined_flag, $1)=unsupported - _LT_TAGVAR(always_export_symbols, $1)=yes - _LT_TAGVAR(archive_expsym_cmds, $1)='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname' - # Note: this linker hardcodes the directories in LIBPATH if there - # are no directories specified by -L. - _LT_TAGVAR(hardcode_minus_L, $1)=yes - if test yes = "$GCC" && test -z "$lt_prog_compiler_static"; then - # Neither direct hardcoding nor static linking is supported with a - # broken collect2. - _LT_TAGVAR(hardcode_direct, $1)=unsupported - fi - ;; - - aix[[4-9]]*) - if test ia64 = "$host_cpu"; then - # On IA64, the linker does run time linking by default, so we don't - # have to do anything special. - aix_use_runtimelinking=no - exp_sym_flag='-Bexport' - no_entry_flag= - else - # If we're using GNU nm, then we don't want the "-C" option. - # -C means demangle to GNU nm, but means don't demangle to AIX nm. - # Without the "-l" option, or with the "-B" option, AIX nm treats - # weak defined symbols like other global defined symbols, whereas - # GNU nm marks them as "W". - # While the 'weak' keyword is ignored in the Export File, we need - # it in the Import File for the 'aix-soname' feature, so we have - # to replace the "-B" option with "-P" for AIX nm. - if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then - _LT_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && ([substr](\$ 3,1,1) != ".")) { if (\$ 2 == "W") { print \$ 3 " weak" } else { print \$ 3 } } }'\'' | sort -u > $export_symbols' - else - _LT_TAGVAR(export_symbols_cmds, $1)='`func_echo_all $NM | $SED -e '\''s/B\([[^B]]*\)$/P\1/'\''` -PCpgl $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) && ([substr](\$ 1,1,1) != ".")) { if ((\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) { print \$ 1 " weak" } else { print \$ 1 } } }'\'' | sort -u > $export_symbols' - fi - aix_use_runtimelinking=no - - # Test if we are trying to use run time linking or normal - # AIX style linking. If -brtl is somewhere in LDFLAGS, we - # have runtime linking enabled, and use it for executables. - # For shared libraries, we enable/disable runtime linking - # depending on the kind of the shared library created - - # when "with_aix_soname,aix_use_runtimelinking" is: - # "aix,no" lib.a(lib.so.V) shared, rtl:no, for executables - # "aix,yes" lib.so shared, rtl:yes, for executables - # lib.a static archive - # "both,no" lib.so.V(shr.o) shared, rtl:yes - # lib.a(lib.so.V) shared, rtl:no, for executables - # "both,yes" lib.so.V(shr.o) shared, rtl:yes, for executables - # lib.a(lib.so.V) shared, rtl:no - # "svr4,*" lib.so.V(shr.o) shared, rtl:yes, for executables - # lib.a static archive - case $host_os in aix4.[[23]]|aix4.[[23]].*|aix[[5-9]]*) - for ld_flag in $LDFLAGS; do - if (test x-brtl = "x$ld_flag" || test x-Wl,-brtl = "x$ld_flag"); then - aix_use_runtimelinking=yes - break - fi - done - if test svr4,no = "$with_aix_soname,$aix_use_runtimelinking"; then - # With aix-soname=svr4, we create the lib.so.V shared archives only, - # so we don't have lib.a shared libs to link our executables. - # We have to force runtime linking in this case. - aix_use_runtimelinking=yes - LDFLAGS="$LDFLAGS -Wl,-brtl" - fi - ;; - esac - - exp_sym_flag='-bexport' - no_entry_flag='-bnoentry' - fi - - # When large executables or shared objects are built, AIX ld can - # have problems creating the table of contents. If linking a library - # or program results in "error TOC overflow" add -mminimal-toc to - # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not - # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. - - _LT_TAGVAR(archive_cmds, $1)='' - _LT_TAGVAR(hardcode_direct, $1)=yes - _LT_TAGVAR(hardcode_direct_absolute, $1)=yes - _LT_TAGVAR(hardcode_libdir_separator, $1)=':' - _LT_TAGVAR(link_all_deplibs, $1)=yes - _LT_TAGVAR(file_list_spec, $1)='$wl-f,' - case $with_aix_soname,$aix_use_runtimelinking in - aix,*) ;; # traditional, no import file - svr4,* | *,yes) # use import file - # The Import File defines what to hardcode. - _LT_TAGVAR(hardcode_direct, $1)=no - _LT_TAGVAR(hardcode_direct_absolute, $1)=no - ;; - esac - - if test yes = "$GCC"; then - case $host_os in aix4.[[012]]|aix4.[[012]].*) - # We only want to do this on AIX 4.2 and lower, the check - # below for broken collect2 doesn't work under 4.3+ - collect2name=`$CC -print-prog-name=collect2` - if test -f "$collect2name" && - strings "$collect2name" | $GREP resolve_lib_name >/dev/null - then - # We have reworked collect2 - : - else - # We have old collect2 - _LT_TAGVAR(hardcode_direct, $1)=unsupported - # It fails to find uninstalled libraries when the uninstalled - # path is not listed in the libpath. Setting hardcode_minus_L - # to unsupported forces relinking - _LT_TAGVAR(hardcode_minus_L, $1)=yes - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' - _LT_TAGVAR(hardcode_libdir_separator, $1)= - fi - ;; - esac - shared_flag='-shared' - if test yes = "$aix_use_runtimelinking"; then - shared_flag="$shared_flag "'$wl-G' - fi - # Need to ensure runtime linking is disabled for the traditional - # shared library, or the linker may eventually find shared libraries - # /with/ Import File - we do not want to mix them. - shared_flag_aix='-shared' - shared_flag_svr4='-shared $wl-G' - else - # not using gcc - if test ia64 = "$host_cpu"; then - # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release - # chokes on -Wl,-G. The following line is correct: - shared_flag='-G' - else - if test yes = "$aix_use_runtimelinking"; then - shared_flag='$wl-G' - else - shared_flag='$wl-bM:SRE' - fi - shared_flag_aix='$wl-bM:SRE' - shared_flag_svr4='$wl-G' - fi - fi - - _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-bexpall' - # It seems that -bexpall does not export symbols beginning with - # underscore (_), so it is better to generate a list of symbols to export. - _LT_TAGVAR(always_export_symbols, $1)=yes - if test aix,yes = "$with_aix_soname,$aix_use_runtimelinking"; then - # Warning - without using the other runtime loading flags (-brtl), - # -berok will link without error, but may produce a broken library. - _LT_TAGVAR(allow_undefined_flag, $1)='-berok' - # Determine the default libpath from the value encoded in an - # empty executable. - _LT_SYS_MODULE_PATH_AIX([$1]) - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-blibpath:$libdir:'"$aix_libpath" - _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $deplibs $wl'$no_entry_flag' $compiler_flags `if test -n "$allow_undefined_flag"; then func_echo_all "$wl$allow_undefined_flag"; else :; fi` $wl'$exp_sym_flag:\$export_symbols' '$shared_flag - else - if test ia64 = "$host_cpu"; then - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-R $libdir:/usr/lib:/lib' - _LT_TAGVAR(allow_undefined_flag, $1)="-z nodefs" - _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\$wl$no_entry_flag"' $compiler_flags $wl$allow_undefined_flag '"\$wl$exp_sym_flag:\$export_symbols" - else - # Determine the default libpath from the value encoded in an - # empty executable. - _LT_SYS_MODULE_PATH_AIX([$1]) - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-blibpath:$libdir:'"$aix_libpath" - # Warning - without using the other run time loading flags, - # -berok will link without error, but may produce a broken library. - _LT_TAGVAR(no_undefined_flag, $1)=' $wl-bernotok' - _LT_TAGVAR(allow_undefined_flag, $1)=' $wl-berok' - if test yes = "$with_gnu_ld"; then - # We only use this code for GNU lds that support --whole-archive. - _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive$convenience $wl--no-whole-archive' - else - # Exported symbols can be pulled into shared objects from archives - _LT_TAGVAR(whole_archive_flag_spec, $1)='$convenience' - fi - _LT_TAGVAR(archive_cmds_need_lc, $1)=yes - _LT_TAGVAR(archive_expsym_cmds, $1)='$RM -r $output_objdir/$realname.d~$MKDIR $output_objdir/$realname.d' - # -brtl affects multiple linker settings, -berok does not and is overridden later - compiler_flags_filtered='`func_echo_all "$compiler_flags " | $SED -e "s%-brtl\\([[, ]]\\)%-berok\\1%g"`' - if test svr4 != "$with_aix_soname"; then - # This is similar to how AIX traditionally builds its shared libraries. - _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$CC '$shared_flag_aix' -o $output_objdir/$realname.d/$soname $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$realname.d/$soname' - fi - if test aix != "$with_aix_soname"; then - _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$CC '$shared_flag_svr4' -o $output_objdir/$realname.d/$shared_archive_member_spec.o $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$STRIP -e $output_objdir/$realname.d/$shared_archive_member_spec.o~( func_echo_all "#! $soname($shared_archive_member_spec.o)"; if test shr_64 = "$shared_archive_member_spec"; then func_echo_all "# 64"; else func_echo_all "# 32"; fi; cat $export_symbols ) > $output_objdir/$realname.d/$shared_archive_member_spec.imp~$AR $AR_FLAGS $output_objdir/$soname $output_objdir/$realname.d/$shared_archive_member_spec.o $output_objdir/$realname.d/$shared_archive_member_spec.imp' - else - # used by -dlpreopen to get the symbols - _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$MV $output_objdir/$realname.d/$soname $output_objdir' - fi - _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$RM -r $output_objdir/$realname.d' - fi - fi - ;; - - amigaos*) - case $host_cpu in - powerpc) - # see comment about AmigaOS4 .so support - _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' - _LT_TAGVAR(archive_expsym_cmds, $1)='' - ;; - m68k) - _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' - _LT_TAGVAR(hardcode_minus_L, $1)=yes - ;; - esac - ;; - - bsdi[[45]]*) - _LT_TAGVAR(export_dynamic_flag_spec, $1)=-rdynamic - ;; - - cygwin* | mingw* | pw32* | cegcc*) - # When not using gcc, we currently assume that we are using - # Microsoft Visual C++. - # hardcode_libdir_flag_spec is actually meaningless, as there is - # no search path for DLLs. - case $cc_basename in - cl*) - # Native MSVC - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' ' - _LT_TAGVAR(allow_undefined_flag, $1)=unsupported - _LT_TAGVAR(always_export_symbols, $1)=yes - _LT_TAGVAR(file_list_spec, $1)='@' - # Tell ltmain to make .lib files, not .a files. - libext=lib - # Tell ltmain to make .dll files, not .so files. - shrext_cmds=.dll - # FIXME: Setting linknames here is a bad hack. - _LT_TAGVAR(archive_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~linknames=' - _LT_TAGVAR(archive_expsym_cmds, $1)='if _LT_DLL_DEF_P([$export_symbols]); then - cp "$export_symbols" "$output_objdir/$soname.def"; - echo "$tool_output_objdir$soname.def" > "$output_objdir/$soname.exp"; - else - $SED -e '\''s/^/-link -EXPORT:/'\'' < $export_symbols > $output_objdir/$soname.exp; - fi~ - $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ - linknames=' - # The linker will not automatically build a static lib if we build a DLL. - # _LT_TAGVAR(old_archive_from_new_cmds, $1)='true' - _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes - _LT_TAGVAR(exclude_expsyms, $1)='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*' - _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1,DATA/'\'' | $SED -e '\''/^[[AITW]][[ ]]/s/.*[[ ]]//'\'' | sort | uniq > $export_symbols' - # Don't use ranlib - _LT_TAGVAR(old_postinstall_cmds, $1)='chmod 644 $oldlib' - _LT_TAGVAR(postlink_cmds, $1)='lt_outputfile="@OUTPUT@"~ - lt_tool_outputfile="@TOOL_OUTPUT@"~ - case $lt_outputfile in - *.exe|*.EXE) ;; - *) - lt_outputfile=$lt_outputfile.exe - lt_tool_outputfile=$lt_tool_outputfile.exe - ;; - esac~ - if test : != "$MANIFEST_TOOL" && test -f "$lt_outputfile.manifest"; then - $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; - $RM "$lt_outputfile.manifest"; - fi' - ;; - *) - # Assume MSVC wrapper - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' ' - _LT_TAGVAR(allow_undefined_flag, $1)=unsupported - # Tell ltmain to make .lib files, not .a files. - libext=lib - # Tell ltmain to make .dll files, not .so files. - shrext_cmds=.dll - # FIXME: Setting linknames here is a bad hack. - _LT_TAGVAR(archive_cmds, $1)='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' - # The linker will automatically build a .lib file if we build a DLL. - _LT_TAGVAR(old_archive_from_new_cmds, $1)='true' - # FIXME: Should let the user specify the lib program. - _LT_TAGVAR(old_archive_cmds, $1)='lib -OUT:$oldlib$oldobjs$old_deplibs' - _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes - ;; - esac - ;; - - darwin* | rhapsody*) - _LT_DARWIN_LINKER_FEATURES($1) - ;; - - dgux*) - _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' - _LT_TAGVAR(hardcode_shlibpath_var, $1)=no - ;; - - # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor - # support. Future versions do this automatically, but an explicit c++rt0.o - # does not break anything, and helps significantly (at the cost of a little - # extra space). - freebsd2.2*) - _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o' - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' - _LT_TAGVAR(hardcode_direct, $1)=yes - _LT_TAGVAR(hardcode_shlibpath_var, $1)=no - ;; - - # Unfortunately, older versions of FreeBSD 2 do not have this feature. - freebsd2.*) - _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' - _LT_TAGVAR(hardcode_direct, $1)=yes - _LT_TAGVAR(hardcode_minus_L, $1)=yes - _LT_TAGVAR(hardcode_shlibpath_var, $1)=no - ;; - - # FreeBSD 3 and greater uses gcc -shared to do shared libraries. - freebsd* | dragonfly*) - _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' - _LT_TAGVAR(hardcode_direct, $1)=yes - _LT_TAGVAR(hardcode_shlibpath_var, $1)=no - ;; - - hpux9*) - if test yes = "$GCC"; then - _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared $pic_flag $wl+b $wl$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' - else - _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' - fi - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl+b $wl$libdir' - _LT_TAGVAR(hardcode_libdir_separator, $1)=: - _LT_TAGVAR(hardcode_direct, $1)=yes - - # hardcode_minus_L: Not really in the search PATH, - # but as the default location of the library. - _LT_TAGVAR(hardcode_minus_L, $1)=yes - _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E' - ;; - - hpux10*) - if test yes,no = "$GCC,$with_gnu_ld"; then - _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags' - else - _LT_TAGVAR(archive_cmds, $1)='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' - fi - if test no = "$with_gnu_ld"; then - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl+b $wl$libdir' - _LT_TAGVAR(hardcode_libdir_separator, $1)=: - _LT_TAGVAR(hardcode_direct, $1)=yes - _LT_TAGVAR(hardcode_direct_absolute, $1)=yes - _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E' - # hardcode_minus_L: Not really in the search PATH, - # but as the default location of the library. - _LT_TAGVAR(hardcode_minus_L, $1)=yes - fi - ;; - - hpux11*) - if test yes,no = "$GCC,$with_gnu_ld"; then - case $host_cpu in - hppa*64*) - _LT_TAGVAR(archive_cmds, $1)='$CC -shared $wl+h $wl$soname -o $lib $libobjs $deplibs $compiler_flags' - ;; - ia64*) - _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $wl+h $wl$soname $wl+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' - ;; - *) - _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags' - ;; - esac - else - case $host_cpu in - hppa*64*) - _LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname -o $lib $libobjs $deplibs $compiler_flags' - ;; - ia64*) - _LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname $wl+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' - ;; - *) - m4_if($1, [], [ - # Older versions of the 11.00 compiler do not understand -b yet - # (HP92453-01 A.11.01.20 doesn't, HP92453-01 B.11.X.35175-35176.GP does) - _LT_LINKER_OPTION([if $CC understands -b], - _LT_TAGVAR(lt_cv_prog_compiler__b, $1), [-b], - [_LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags'], - [_LT_TAGVAR(archive_cmds, $1)='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags'])], - [_LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags']) - ;; - esac - fi - if test no = "$with_gnu_ld"; then - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl+b $wl$libdir' - _LT_TAGVAR(hardcode_libdir_separator, $1)=: - - case $host_cpu in - hppa*64*|ia64*) - _LT_TAGVAR(hardcode_direct, $1)=no - _LT_TAGVAR(hardcode_shlibpath_var, $1)=no - ;; - *) - _LT_TAGVAR(hardcode_direct, $1)=yes - _LT_TAGVAR(hardcode_direct_absolute, $1)=yes - _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E' - - # hardcode_minus_L: Not really in the search PATH, - # but as the default location of the library. - _LT_TAGVAR(hardcode_minus_L, $1)=yes - ;; - esac - fi - ;; - - irix5* | irix6* | nonstopux*) - if test yes = "$GCC"; then - _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' - # Try to use the -exported_symbol ld option, if it does not - # work, assume that -exports_file does not work either and - # implicitly export all symbols. - # This should be the same for all languages, so no per-tag cache variable. - AC_CACHE_CHECK([whether the $host_os linker accepts -exported_symbol], - [lt_cv_irix_exported_symbol], - [save_LDFLAGS=$LDFLAGS - LDFLAGS="$LDFLAGS -shared $wl-exported_symbol ${wl}foo $wl-update_registry $wl/dev/null" - AC_LINK_IFELSE( - [AC_LANG_SOURCE( - [AC_LANG_CASE([C], [[int foo (void) { return 0; }]], - [C++], [[int foo (void) { return 0; }]], - [Fortran 77], [[ - subroutine foo - end]], - [Fortran], [[ - subroutine foo - end]])])], - [lt_cv_irix_exported_symbol=yes], - [lt_cv_irix_exported_symbol=no]) - LDFLAGS=$save_LDFLAGS]) - if test yes = "$lt_cv_irix_exported_symbol"; then - _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations $wl-exports_file $wl$export_symbols -o $lib' - fi - _LT_TAGVAR(link_all_deplibs, $1)=no - else - _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' - _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -exports_file $export_symbols -o $lib' - fi - _LT_TAGVAR(archive_cmds_need_lc, $1)='no' - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' - _LT_TAGVAR(hardcode_libdir_separator, $1)=: - _LT_TAGVAR(inherit_rpath, $1)=yes - _LT_TAGVAR(link_all_deplibs, $1)=yes - ;; - - linux*) - case $cc_basename in - tcc*) - # Fabrice Bellard et al's Tiny C Compiler - _LT_TAGVAR(ld_shlibs, $1)=yes - _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' - ;; - esac - ;; - - netbsd* | netbsdelf*-gnu) - if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then - _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out - else - _LT_TAGVAR(archive_cmds, $1)='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF - fi - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' - _LT_TAGVAR(hardcode_direct, $1)=yes - _LT_TAGVAR(hardcode_shlibpath_var, $1)=no - ;; - - newsos6) - _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - _LT_TAGVAR(hardcode_direct, $1)=yes - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' - _LT_TAGVAR(hardcode_libdir_separator, $1)=: - _LT_TAGVAR(hardcode_shlibpath_var, $1)=no - ;; - - *nto* | *qnx*) - ;; - - openbsd* | bitrig*) - if test -f /usr/libexec/ld.so; then - _LT_TAGVAR(hardcode_direct, $1)=yes - _LT_TAGVAR(hardcode_shlibpath_var, $1)=no - _LT_TAGVAR(hardcode_direct_absolute, $1)=yes - if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then - _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' - _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags $wl-retain-symbols-file,$export_symbols' - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir' - _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E' - else - _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir' - fi - else - _LT_TAGVAR(ld_shlibs, $1)=no - fi - ;; - - os2*) - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' - _LT_TAGVAR(hardcode_minus_L, $1)=yes - _LT_TAGVAR(allow_undefined_flag, $1)=unsupported - shrext_cmds=.dll - _LT_TAGVAR(archive_cmds, $1)='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ - $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ - $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ - $ECHO EXPORTS >> $output_objdir/$libname.def~ - emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~ - $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ - emximp -o $lib $output_objdir/$libname.def' - _LT_TAGVAR(archive_expsym_cmds, $1)='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ - $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ - $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ - $ECHO EXPORTS >> $output_objdir/$libname.def~ - prefix_cmds="$SED"~ - if test EXPORTS = "`$SED 1q $export_symbols`"; then - prefix_cmds="$prefix_cmds -e 1d"; - fi~ - prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~ - cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~ - $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ - emximp -o $lib $output_objdir/$libname.def' - _LT_TAGVAR(old_archive_From_new_cmds, $1)='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def' - _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes - ;; - - osf3*) - if test yes = "$GCC"; then - _LT_TAGVAR(allow_undefined_flag, $1)=' $wl-expect_unresolved $wl\*' - _LT_TAGVAR(archive_cmds, $1)='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' - else - _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' - _LT_TAGVAR(archive_cmds, $1)='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' - fi - _LT_TAGVAR(archive_cmds_need_lc, $1)='no' - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' - _LT_TAGVAR(hardcode_libdir_separator, $1)=: - ;; - - osf4* | osf5*) # as osf3* with the addition of -msym flag - if test yes = "$GCC"; then - _LT_TAGVAR(allow_undefined_flag, $1)=' $wl-expect_unresolved $wl\*' - _LT_TAGVAR(archive_cmds, $1)='$CC -shared$allow_undefined_flag $pic_flag $libobjs $deplibs $compiler_flags $wl-msym $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' - else - _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' - _LT_TAGVAR(archive_cmds, $1)='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' - _LT_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; printf "%s\\n" "-hidden">> $lib.exp~ - $CC -shared$allow_undefined_flag $wl-input $wl$lib.exp $compiler_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib~$RM $lib.exp' - - # Both c and cxx compiler support -rpath directly - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' - fi - _LT_TAGVAR(archive_cmds_need_lc, $1)='no' - _LT_TAGVAR(hardcode_libdir_separator, $1)=: - ;; - - solaris*) - _LT_TAGVAR(no_undefined_flag, $1)=' -z defs' - if test yes = "$GCC"; then - wlarc='$wl' - _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $wl-z ${wl}text $wl-h $wl$soname -o $lib $libobjs $deplibs $compiler_flags' - _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ - $CC -shared $pic_flag $wl-z ${wl}text $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' - else - case `$CC -V 2>&1` in - *"Compilers 5.0"*) - wlarc='' - _LT_TAGVAR(archive_cmds, $1)='$LD -G$allow_undefined_flag -h $soname -o $lib $libobjs $deplibs $linker_flags' - _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ - $LD -G$allow_undefined_flag -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$RM $lib.exp' - ;; - *) - wlarc='$wl' - _LT_TAGVAR(archive_cmds, $1)='$CC -G$allow_undefined_flag -h $soname -o $lib $libobjs $deplibs $compiler_flags' - _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ - $CC -G$allow_undefined_flag -M $lib.exp -h $soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' - ;; - esac - fi - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' - _LT_TAGVAR(hardcode_shlibpath_var, $1)=no - case $host_os in - solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; - *) - # The compiler driver will combine and reorder linker options, - # but understands '-z linker_flag'. GCC discards it without '$wl', - # but is careful enough not to reorder. - # Supported since Solaris 2.6 (maybe 2.5.1?) - if test yes = "$GCC"; then - _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl-z ${wl}allextract$convenience $wl-z ${wl}defaultextract' - else - _LT_TAGVAR(whole_archive_flag_spec, $1)='-z allextract$convenience -z defaultextract' - fi - ;; - esac - _LT_TAGVAR(link_all_deplibs, $1)=yes - ;; - - sunos4*) - if test sequent = "$host_vendor"; then - # Use $CC to link under sequent, because it throws in some extra .o - # files that make .init and .fini sections work. - _LT_TAGVAR(archive_cmds, $1)='$CC -G $wl-h $soname -o $lib $libobjs $deplibs $compiler_flags' - else - _LT_TAGVAR(archive_cmds, $1)='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags' - fi - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' - _LT_TAGVAR(hardcode_direct, $1)=yes - _LT_TAGVAR(hardcode_minus_L, $1)=yes - _LT_TAGVAR(hardcode_shlibpath_var, $1)=no - ;; - - sysv4) - case $host_vendor in - sni) - _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - _LT_TAGVAR(hardcode_direct, $1)=yes # is this really true??? - ;; - siemens) - ## LD is ld it makes a PLAMLIB - ## CC just makes a GrossModule. - _LT_TAGVAR(archive_cmds, $1)='$LD -G -o $lib $libobjs $deplibs $linker_flags' - _LT_TAGVAR(reload_cmds, $1)='$CC -r -o $output$reload_objs' - _LT_TAGVAR(hardcode_direct, $1)=no - ;; - motorola) - _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - _LT_TAGVAR(hardcode_direct, $1)=no #Motorola manual says yes, but my tests say they lie - ;; - esac - runpath_var='LD_RUN_PATH' - _LT_TAGVAR(hardcode_shlibpath_var, $1)=no - ;; - - sysv4.3*) - _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - _LT_TAGVAR(hardcode_shlibpath_var, $1)=no - _LT_TAGVAR(export_dynamic_flag_spec, $1)='-Bexport' - ;; - - sysv4*MP*) - if test -d /usr/nec; then - _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - _LT_TAGVAR(hardcode_shlibpath_var, $1)=no - runpath_var=LD_RUN_PATH - hardcode_runpath_var=yes - _LT_TAGVAR(ld_shlibs, $1)=yes - fi - ;; - - sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[[01]].[[10]]* | unixware7* | sco3.2v5.0.[[024]]*) - _LT_TAGVAR(no_undefined_flag, $1)='$wl-z,text' - _LT_TAGVAR(archive_cmds_need_lc, $1)=no - _LT_TAGVAR(hardcode_shlibpath_var, $1)=no - runpath_var='LD_RUN_PATH' - - if test yes = "$GCC"; then - _LT_TAGVAR(archive_cmds, $1)='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - else - _LT_TAGVAR(archive_cmds, $1)='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - fi - ;; - - sysv5* | sco3.2v5* | sco5v6*) - # Note: We CANNOT use -z defs as we might desire, because we do not - # link with -lc, and that would cause any symbols used from libc to - # always be unresolved, which means just about no library would - # ever link correctly. If we're not using GNU ld we use -z text - # though, which does catch some bad symbols but isn't as heavy-handed - # as -z defs. - _LT_TAGVAR(no_undefined_flag, $1)='$wl-z,text' - _LT_TAGVAR(allow_undefined_flag, $1)='$wl-z,nodefs' - _LT_TAGVAR(archive_cmds_need_lc, $1)=no - _LT_TAGVAR(hardcode_shlibpath_var, $1)=no - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-R,$libdir' - _LT_TAGVAR(hardcode_libdir_separator, $1)=':' - _LT_TAGVAR(link_all_deplibs, $1)=yes - _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-Bexport' - runpath_var='LD_RUN_PATH' - - if test yes = "$GCC"; then - _LT_TAGVAR(archive_cmds, $1)='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - else - _LT_TAGVAR(archive_cmds, $1)='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - fi - ;; - - uts4*) - _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' - _LT_TAGVAR(hardcode_shlibpath_var, $1)=no - ;; - - *) - _LT_TAGVAR(ld_shlibs, $1)=no - ;; - esac - - if test sni = "$host_vendor"; then - case $host in - sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) - _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-Blargedynsym' - ;; - esac - fi - fi -]) -AC_MSG_RESULT([$_LT_TAGVAR(ld_shlibs, $1)]) -test no = "$_LT_TAGVAR(ld_shlibs, $1)" && can_build_shared=no - -_LT_TAGVAR(with_gnu_ld, $1)=$with_gnu_ld - -_LT_DECL([], [libext], [0], [Old archive suffix (normally "a")])dnl -_LT_DECL([], [shrext_cmds], [1], [Shared library suffix (normally ".so")])dnl -_LT_DECL([], [extract_expsyms_cmds], [2], - [The commands to extract the exported symbol list from a shared archive]) - -# -# Do we need to explicitly link libc? -# -case "x$_LT_TAGVAR(archive_cmds_need_lc, $1)" in -x|xyes) - # Assume -lc should be added - _LT_TAGVAR(archive_cmds_need_lc, $1)=yes - - if test yes,yes = "$GCC,$enable_shared"; then - case $_LT_TAGVAR(archive_cmds, $1) in - *'~'*) - # FIXME: we may have to deal with multi-command sequences. - ;; - '$CC '*) - # Test whether the compiler implicitly links with -lc since on some - # systems, -lgcc has to come before -lc. If gcc already passes -lc - # to ld, don't add -lc before -lgcc. - AC_CACHE_CHECK([whether -lc should be explicitly linked in], - [lt_cv_]_LT_TAGVAR(archive_cmds_need_lc, $1), - [$RM conftest* - echo "$lt_simple_compile_test_code" > conftest.$ac_ext - - if AC_TRY_EVAL(ac_compile) 2>conftest.err; then - soname=conftest - lib=conftest - libobjs=conftest.$ac_objext - deplibs= - wl=$_LT_TAGVAR(lt_prog_compiler_wl, $1) - pic_flag=$_LT_TAGVAR(lt_prog_compiler_pic, $1) - compiler_flags=-v - linker_flags=-v - verstring= - output_objdir=. - libname=conftest - lt_save_allow_undefined_flag=$_LT_TAGVAR(allow_undefined_flag, $1) - _LT_TAGVAR(allow_undefined_flag, $1)= - if AC_TRY_EVAL(_LT_TAGVAR(archive_cmds, $1) 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) - then - lt_cv_[]_LT_TAGVAR(archive_cmds_need_lc, $1)=no - else - lt_cv_[]_LT_TAGVAR(archive_cmds_need_lc, $1)=yes - fi - _LT_TAGVAR(allow_undefined_flag, $1)=$lt_save_allow_undefined_flag - else - cat conftest.err 1>&5 - fi - $RM conftest* - ]) - _LT_TAGVAR(archive_cmds_need_lc, $1)=$lt_cv_[]_LT_TAGVAR(archive_cmds_need_lc, $1) - ;; - esac - fi - ;; -esac - -_LT_TAGDECL([build_libtool_need_lc], [archive_cmds_need_lc], [0], - [Whether or not to add -lc for building shared libraries]) -_LT_TAGDECL([allow_libtool_libs_with_static_runtimes], - [enable_shared_with_static_runtimes], [0], - [Whether or not to disallow shared libs when runtime libs are static]) -_LT_TAGDECL([], [export_dynamic_flag_spec], [1], - [Compiler flag to allow reflexive dlopens]) -_LT_TAGDECL([], [whole_archive_flag_spec], [1], - [Compiler flag to generate shared objects directly from archives]) -_LT_TAGDECL([], [compiler_needs_object], [1], - [Whether the compiler copes with passing no objects directly]) -_LT_TAGDECL([], [old_archive_from_new_cmds], [2], - [Create an old-style archive from a shared archive]) -_LT_TAGDECL([], [old_archive_from_expsyms_cmds], [2], - [Create a temporary old-style archive to link instead of a shared archive]) -_LT_TAGDECL([], [archive_cmds], [2], [Commands used to build a shared archive]) -_LT_TAGDECL([], [archive_expsym_cmds], [2]) -_LT_TAGDECL([], [module_cmds], [2], - [Commands used to build a loadable module if different from building - a shared archive.]) -_LT_TAGDECL([], [module_expsym_cmds], [2]) -_LT_TAGDECL([], [with_gnu_ld], [1], - [Whether we are building with GNU ld or not]) -_LT_TAGDECL([], [allow_undefined_flag], [1], - [Flag that allows shared libraries with undefined symbols to be built]) -_LT_TAGDECL([], [no_undefined_flag], [1], - [Flag that enforces no undefined symbols]) -_LT_TAGDECL([], [hardcode_libdir_flag_spec], [1], - [Flag to hardcode $libdir into a binary during linking. - This must work even if $libdir does not exist]) -_LT_TAGDECL([], [hardcode_libdir_separator], [1], - [Whether we need a single "-rpath" flag with a separated argument]) -_LT_TAGDECL([], [hardcode_direct], [0], - [Set to "yes" if using DIR/libNAME$shared_ext during linking hardcodes - DIR into the resulting binary]) -_LT_TAGDECL([], [hardcode_direct_absolute], [0], - [Set to "yes" if using DIR/libNAME$shared_ext during linking hardcodes - DIR into the resulting binary and the resulting library dependency is - "absolute", i.e impossible to change by setting $shlibpath_var if the - library is relocated]) -_LT_TAGDECL([], [hardcode_minus_L], [0], - [Set to "yes" if using the -LDIR flag during linking hardcodes DIR - into the resulting binary]) -_LT_TAGDECL([], [hardcode_shlibpath_var], [0], - [Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR - into the resulting binary]) -_LT_TAGDECL([], [hardcode_automatic], [0], - [Set to "yes" if building a shared library automatically hardcodes DIR - into the library and all subsequent libraries and executables linked - against it]) -_LT_TAGDECL([], [inherit_rpath], [0], - [Set to yes if linker adds runtime paths of dependent libraries - to runtime path list]) -_LT_TAGDECL([], [link_all_deplibs], [0], - [Whether libtool must link a program against all its dependency libraries]) -_LT_TAGDECL([], [always_export_symbols], [0], - [Set to "yes" if exported symbols are required]) -_LT_TAGDECL([], [export_symbols_cmds], [2], - [The commands to list exported symbols]) -_LT_TAGDECL([], [exclude_expsyms], [1], - [Symbols that should not be listed in the preloaded symbols]) -_LT_TAGDECL([], [include_expsyms], [1], - [Symbols that must always be exported]) -_LT_TAGDECL([], [prelink_cmds], [2], - [Commands necessary for linking programs (against libraries) with templates]) -_LT_TAGDECL([], [postlink_cmds], [2], - [Commands necessary for finishing linking programs]) -_LT_TAGDECL([], [file_list_spec], [1], - [Specify filename containing input files]) -dnl FIXME: Not yet implemented -dnl _LT_TAGDECL([], [thread_safe_flag_spec], [1], -dnl [Compiler flag to generate thread safe objects]) -])# _LT_LINKER_SHLIBS - - -# _LT_LANG_C_CONFIG([TAG]) -# ------------------------ -# Ensure that the configuration variables for a C compiler are suitably -# defined. These variables are subsequently used by _LT_CONFIG to write -# the compiler configuration to 'libtool'. -m4_defun([_LT_LANG_C_CONFIG], -[m4_require([_LT_DECL_EGREP])dnl -lt_save_CC=$CC -AC_LANG_PUSH(C) - -# Source file extension for C test sources. -ac_ext=c - -# Object file extension for compiled C test sources. -objext=o -_LT_TAGVAR(objext, $1)=$objext - -# Code to be used in simple compile tests -lt_simple_compile_test_code="int some_variable = 0;" - -# Code to be used in simple link tests -lt_simple_link_test_code='int main(){return(0);}' - -_LT_TAG_COMPILER -# Save the default compiler, since it gets overwritten when the other -# tags are being tested, and _LT_TAGVAR(compiler, []) is a NOP. -compiler_DEFAULT=$CC - -# save warnings/boilerplate of simple test code -_LT_COMPILER_BOILERPLATE -_LT_LINKER_BOILERPLATE - -if test -n "$compiler"; then - _LT_COMPILER_NO_RTTI($1) - _LT_COMPILER_PIC($1) - _LT_COMPILER_C_O($1) - _LT_COMPILER_FILE_LOCKS($1) - _LT_LINKER_SHLIBS($1) - _LT_SYS_DYNAMIC_LINKER($1) - _LT_LINKER_HARDCODE_LIBPATH($1) - LT_SYS_DLOPEN_SELF - _LT_CMD_STRIPLIB - - # Report what library types will actually be built - AC_MSG_CHECKING([if libtool supports shared libraries]) - AC_MSG_RESULT([$can_build_shared]) - - AC_MSG_CHECKING([whether to build shared libraries]) - test no = "$can_build_shared" && enable_shared=no - - # On AIX, shared libraries and static libraries use the same namespace, and - # are all built from PIC. - case $host_os in - aix3*) - test yes = "$enable_shared" && enable_static=no - if test -n "$RANLIB"; then - archive_cmds="$archive_cmds~\$RANLIB \$lib" - postinstall_cmds='$RANLIB $lib' - fi - ;; - - aix[[4-9]]*) - if test ia64 != "$host_cpu"; then - case $enable_shared,$with_aix_soname,$aix_use_runtimelinking in - yes,aix,yes) ;; # shared object as lib.so file only - yes,svr4,*) ;; # shared object as lib.so archive member only - yes,*) enable_static=no ;; # shared object in lib.a archive as well - esac - fi - ;; - esac - AC_MSG_RESULT([$enable_shared]) - - AC_MSG_CHECKING([whether to build static libraries]) - # Make sure either enable_shared or enable_static is yes. - test yes = "$enable_shared" || enable_static=yes - AC_MSG_RESULT([$enable_static]) - - _LT_CONFIG($1) -fi -AC_LANG_POP -CC=$lt_save_CC -])# _LT_LANG_C_CONFIG - - -# _LT_LANG_CXX_CONFIG([TAG]) -# -------------------------- -# Ensure that the configuration variables for a C++ compiler are suitably -# defined. These variables are subsequently used by _LT_CONFIG to write -# the compiler configuration to 'libtool'. -m4_defun([_LT_LANG_CXX_CONFIG], -[m4_require([_LT_FILEUTILS_DEFAULTS])dnl -m4_require([_LT_DECL_EGREP])dnl -m4_require([_LT_PATH_MANIFEST_TOOL])dnl -if test -n "$CXX" && ( test no != "$CXX" && - ( (test g++ = "$CXX" && `g++ -v >/dev/null 2>&1` ) || - (test g++ != "$CXX"))); then - AC_PROG_CXXCPP -else - _lt_caught_CXX_error=yes -fi - -AC_LANG_PUSH(C++) -_LT_TAGVAR(archive_cmds_need_lc, $1)=no -_LT_TAGVAR(allow_undefined_flag, $1)= -_LT_TAGVAR(always_export_symbols, $1)=no -_LT_TAGVAR(archive_expsym_cmds, $1)= -_LT_TAGVAR(compiler_needs_object, $1)=no -_LT_TAGVAR(export_dynamic_flag_spec, $1)= -_LT_TAGVAR(hardcode_direct, $1)=no -_LT_TAGVAR(hardcode_direct_absolute, $1)=no -_LT_TAGVAR(hardcode_libdir_flag_spec, $1)= -_LT_TAGVAR(hardcode_libdir_separator, $1)= -_LT_TAGVAR(hardcode_minus_L, $1)=no -_LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported -_LT_TAGVAR(hardcode_automatic, $1)=no -_LT_TAGVAR(inherit_rpath, $1)=no -_LT_TAGVAR(module_cmds, $1)= -_LT_TAGVAR(module_expsym_cmds, $1)= -_LT_TAGVAR(link_all_deplibs, $1)=unknown -_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds -_LT_TAGVAR(reload_flag, $1)=$reload_flag -_LT_TAGVAR(reload_cmds, $1)=$reload_cmds -_LT_TAGVAR(no_undefined_flag, $1)= -_LT_TAGVAR(whole_archive_flag_spec, $1)= -_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no - -# Source file extension for C++ test sources. -ac_ext=cpp - -# Object file extension for compiled C++ test sources. -objext=o -_LT_TAGVAR(objext, $1)=$objext - -# No sense in running all these tests if we already determined that -# the CXX compiler isn't working. Some variables (like enable_shared) -# are currently assumed to apply to all compilers on this platform, -# and will be corrupted by setting them based on a non-working compiler. -if test yes != "$_lt_caught_CXX_error"; then - # Code to be used in simple compile tests - lt_simple_compile_test_code="int some_variable = 0;" - - # Code to be used in simple link tests - lt_simple_link_test_code='int main(int, char *[[]]) { return(0); }' - - # ltmain only uses $CC for tagged configurations so make sure $CC is set. - _LT_TAG_COMPILER - - # save warnings/boilerplate of simple test code - _LT_COMPILER_BOILERPLATE - _LT_LINKER_BOILERPLATE - - # Allow CC to be a program name with arguments. - lt_save_CC=$CC - lt_save_CFLAGS=$CFLAGS - lt_save_LD=$LD - lt_save_GCC=$GCC - GCC=$GXX - lt_save_with_gnu_ld=$with_gnu_ld - lt_save_path_LD=$lt_cv_path_LD - if test -n "${lt_cv_prog_gnu_ldcxx+set}"; then - lt_cv_prog_gnu_ld=$lt_cv_prog_gnu_ldcxx - else - $as_unset lt_cv_prog_gnu_ld - fi - if test -n "${lt_cv_path_LDCXX+set}"; then - lt_cv_path_LD=$lt_cv_path_LDCXX - else - $as_unset lt_cv_path_LD - fi - test -z "${LDCXX+set}" || LD=$LDCXX - CC=${CXX-"c++"} - CFLAGS=$CXXFLAGS - compiler=$CC - _LT_TAGVAR(compiler, $1)=$CC - _LT_CC_BASENAME([$compiler]) - - if test -n "$compiler"; then - # We don't want -fno-exception when compiling C++ code, so set the - # no_builtin_flag separately - if test yes = "$GXX"; then - _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin' - else - _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)= - fi - - if test yes = "$GXX"; then - # Set up default GNU C++ configuration - - LT_PATH_LD - - # Check if GNU C++ uses GNU ld as the underlying linker, since the - # archiving commands below assume that GNU ld is being used. - if test yes = "$with_gnu_ld"; then - _LT_TAGVAR(archive_cmds, $1)='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' - _LT_TAGVAR(archive_expsym_cmds, $1)='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' - - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' - _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-dynamic' - - # If archive_cmds runs LD, not CC, wlarc should be empty - # XXX I think wlarc can be eliminated in ltcf-cxx, but I need to - # investigate it a little bit more. (MM) - wlarc='$wl' - - # ancient GNU ld didn't support --whole-archive et. al. - if eval "`$CC -print-prog-name=ld` --help 2>&1" | - $GREP 'no-whole-archive' > /dev/null; then - _LT_TAGVAR(whole_archive_flag_spec, $1)=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive' - else - _LT_TAGVAR(whole_archive_flag_spec, $1)= - fi - else - with_gnu_ld=no - wlarc= - - # A generic and very simple default shared library creation - # command for GNU C++ for the case where it uses the native - # linker, instead of GNU ld. If possible, this setting should - # overridden to take advantage of the native linker features on - # the platform it is being used on. - _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' - fi - - # Commands to make compiler produce verbose output that lists - # what "hidden" libraries, object files and flags are used when - # linking a shared library. - output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP " \-L"' - - else - GXX=no - with_gnu_ld=no - wlarc= - fi - - # PORTME: fill in a description of your system's C++ link characteristics - AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries]) - _LT_TAGVAR(ld_shlibs, $1)=yes - case $host_os in - aix3*) - # FIXME: insert proper C++ library support - _LT_TAGVAR(ld_shlibs, $1)=no - ;; - aix[[4-9]]*) - if test ia64 = "$host_cpu"; then - # On IA64, the linker does run time linking by default, so we don't - # have to do anything special. - aix_use_runtimelinking=no - exp_sym_flag='-Bexport' - no_entry_flag= - else - aix_use_runtimelinking=no - - # Test if we are trying to use run time linking or normal - # AIX style linking. If -brtl is somewhere in LDFLAGS, we - # have runtime linking enabled, and use it for executables. - # For shared libraries, we enable/disable runtime linking - # depending on the kind of the shared library created - - # when "with_aix_soname,aix_use_runtimelinking" is: - # "aix,no" lib.a(lib.so.V) shared, rtl:no, for executables - # "aix,yes" lib.so shared, rtl:yes, for executables - # lib.a static archive - # "both,no" lib.so.V(shr.o) shared, rtl:yes - # lib.a(lib.so.V) shared, rtl:no, for executables - # "both,yes" lib.so.V(shr.o) shared, rtl:yes, for executables - # lib.a(lib.so.V) shared, rtl:no - # "svr4,*" lib.so.V(shr.o) shared, rtl:yes, for executables - # lib.a static archive - case $host_os in aix4.[[23]]|aix4.[[23]].*|aix[[5-9]]*) - for ld_flag in $LDFLAGS; do - case $ld_flag in - *-brtl*) - aix_use_runtimelinking=yes - break - ;; - esac - done - if test svr4,no = "$with_aix_soname,$aix_use_runtimelinking"; then - # With aix-soname=svr4, we create the lib.so.V shared archives only, - # so we don't have lib.a shared libs to link our executables. - # We have to force runtime linking in this case. - aix_use_runtimelinking=yes - LDFLAGS="$LDFLAGS -Wl,-brtl" - fi - ;; - esac - - exp_sym_flag='-bexport' - no_entry_flag='-bnoentry' - fi - - # When large executables or shared objects are built, AIX ld can - # have problems creating the table of contents. If linking a library - # or program results in "error TOC overflow" add -mminimal-toc to - # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not - # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. - - _LT_TAGVAR(archive_cmds, $1)='' - _LT_TAGVAR(hardcode_direct, $1)=yes - _LT_TAGVAR(hardcode_direct_absolute, $1)=yes - _LT_TAGVAR(hardcode_libdir_separator, $1)=':' - _LT_TAGVAR(link_all_deplibs, $1)=yes - _LT_TAGVAR(file_list_spec, $1)='$wl-f,' - case $with_aix_soname,$aix_use_runtimelinking in - aix,*) ;; # no import file - svr4,* | *,yes) # use import file - # The Import File defines what to hardcode. - _LT_TAGVAR(hardcode_direct, $1)=no - _LT_TAGVAR(hardcode_direct_absolute, $1)=no - ;; - esac - - if test yes = "$GXX"; then - case $host_os in aix4.[[012]]|aix4.[[012]].*) - # We only want to do this on AIX 4.2 and lower, the check - # below for broken collect2 doesn't work under 4.3+ - collect2name=`$CC -print-prog-name=collect2` - if test -f "$collect2name" && - strings "$collect2name" | $GREP resolve_lib_name >/dev/null - then - # We have reworked collect2 - : - else - # We have old collect2 - _LT_TAGVAR(hardcode_direct, $1)=unsupported - # It fails to find uninstalled libraries when the uninstalled - # path is not listed in the libpath. Setting hardcode_minus_L - # to unsupported forces relinking - _LT_TAGVAR(hardcode_minus_L, $1)=yes - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' - _LT_TAGVAR(hardcode_libdir_separator, $1)= - fi - esac - shared_flag='-shared' - if test yes = "$aix_use_runtimelinking"; then - shared_flag=$shared_flag' $wl-G' - fi - # Need to ensure runtime linking is disabled for the traditional - # shared library, or the linker may eventually find shared libraries - # /with/ Import File - we do not want to mix them. - shared_flag_aix='-shared' - shared_flag_svr4='-shared $wl-G' - else - # not using gcc - if test ia64 = "$host_cpu"; then - # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release - # chokes on -Wl,-G. The following line is correct: - shared_flag='-G' - else - if test yes = "$aix_use_runtimelinking"; then - shared_flag='$wl-G' - else - shared_flag='$wl-bM:SRE' - fi - shared_flag_aix='$wl-bM:SRE' - shared_flag_svr4='$wl-G' - fi - fi - - _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-bexpall' - # It seems that -bexpall does not export symbols beginning with - # underscore (_), so it is better to generate a list of symbols to - # export. - _LT_TAGVAR(always_export_symbols, $1)=yes - if test aix,yes = "$with_aix_soname,$aix_use_runtimelinking"; then - # Warning - without using the other runtime loading flags (-brtl), - # -berok will link without error, but may produce a broken library. - # The "-G" linker flag allows undefined symbols. - _LT_TAGVAR(no_undefined_flag, $1)='-bernotok' - # Determine the default libpath from the value encoded in an empty - # executable. - _LT_SYS_MODULE_PATH_AIX([$1]) - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-blibpath:$libdir:'"$aix_libpath" - - _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $deplibs $wl'$no_entry_flag' $compiler_flags `if test -n "$allow_undefined_flag"; then func_echo_all "$wl$allow_undefined_flag"; else :; fi` $wl'$exp_sym_flag:\$export_symbols' '$shared_flag - else - if test ia64 = "$host_cpu"; then - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-R $libdir:/usr/lib:/lib' - _LT_TAGVAR(allow_undefined_flag, $1)="-z nodefs" - _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\$wl$no_entry_flag"' $compiler_flags $wl$allow_undefined_flag '"\$wl$exp_sym_flag:\$export_symbols" - else - # Determine the default libpath from the value encoded in an - # empty executable. - _LT_SYS_MODULE_PATH_AIX([$1]) - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-blibpath:$libdir:'"$aix_libpath" - # Warning - without using the other run time loading flags, - # -berok will link without error, but may produce a broken library. - _LT_TAGVAR(no_undefined_flag, $1)=' $wl-bernotok' - _LT_TAGVAR(allow_undefined_flag, $1)=' $wl-berok' - if test yes = "$with_gnu_ld"; then - # We only use this code for GNU lds that support --whole-archive. - _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive$convenience $wl--no-whole-archive' - else - # Exported symbols can be pulled into shared objects from archives - _LT_TAGVAR(whole_archive_flag_spec, $1)='$convenience' - fi - _LT_TAGVAR(archive_cmds_need_lc, $1)=yes - _LT_TAGVAR(archive_expsym_cmds, $1)='$RM -r $output_objdir/$realname.d~$MKDIR $output_objdir/$realname.d' - # -brtl affects multiple linker settings, -berok does not and is overridden later - compiler_flags_filtered='`func_echo_all "$compiler_flags " | $SED -e "s%-brtl\\([[, ]]\\)%-berok\\1%g"`' - if test svr4 != "$with_aix_soname"; then - # This is similar to how AIX traditionally builds its shared - # libraries. Need -bnortl late, we may have -brtl in LDFLAGS. - _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$CC '$shared_flag_aix' -o $output_objdir/$realname.d/$soname $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$realname.d/$soname' - fi - if test aix != "$with_aix_soname"; then - _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$CC '$shared_flag_svr4' -o $output_objdir/$realname.d/$shared_archive_member_spec.o $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$STRIP -e $output_objdir/$realname.d/$shared_archive_member_spec.o~( func_echo_all "#! $soname($shared_archive_member_spec.o)"; if test shr_64 = "$shared_archive_member_spec"; then func_echo_all "# 64"; else func_echo_all "# 32"; fi; cat $export_symbols ) > $output_objdir/$realname.d/$shared_archive_member_spec.imp~$AR $AR_FLAGS $output_objdir/$soname $output_objdir/$realname.d/$shared_archive_member_spec.o $output_objdir/$realname.d/$shared_archive_member_spec.imp' - else - # used by -dlpreopen to get the symbols - _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$MV $output_objdir/$realname.d/$soname $output_objdir' - fi - _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$RM -r $output_objdir/$realname.d' - fi - fi - ;; - - beos*) - if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then - _LT_TAGVAR(allow_undefined_flag, $1)=unsupported - # Joseph Beckenbach says some releases of gcc - # support --undefined. This deserves some investigation. FIXME - _LT_TAGVAR(archive_cmds, $1)='$CC -nostart $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' - else - _LT_TAGVAR(ld_shlibs, $1)=no - fi - ;; - - chorus*) - case $cc_basename in - *) - # FIXME: insert proper C++ library support - _LT_TAGVAR(ld_shlibs, $1)=no - ;; - esac - ;; - - cygwin* | mingw* | pw32* | cegcc*) - case $GXX,$cc_basename in - ,cl* | no,cl*) - # Native MSVC - # hardcode_libdir_flag_spec is actually meaningless, as there is - # no search path for DLLs. - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' ' - _LT_TAGVAR(allow_undefined_flag, $1)=unsupported - _LT_TAGVAR(always_export_symbols, $1)=yes - _LT_TAGVAR(file_list_spec, $1)='@' - # Tell ltmain to make .lib files, not .a files. - libext=lib - # Tell ltmain to make .dll files, not .so files. - shrext_cmds=.dll - # FIXME: Setting linknames here is a bad hack. - _LT_TAGVAR(archive_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~linknames=' - _LT_TAGVAR(archive_expsym_cmds, $1)='if _LT_DLL_DEF_P([$export_symbols]); then - cp "$export_symbols" "$output_objdir/$soname.def"; - echo "$tool_output_objdir$soname.def" > "$output_objdir/$soname.exp"; - else - $SED -e '\''s/^/-link -EXPORT:/'\'' < $export_symbols > $output_objdir/$soname.exp; - fi~ - $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ - linknames=' - # The linker will not automatically build a static lib if we build a DLL. - # _LT_TAGVAR(old_archive_from_new_cmds, $1)='true' - _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes - # Don't use ranlib - _LT_TAGVAR(old_postinstall_cmds, $1)='chmod 644 $oldlib' - _LT_TAGVAR(postlink_cmds, $1)='lt_outputfile="@OUTPUT@"~ - lt_tool_outputfile="@TOOL_OUTPUT@"~ - case $lt_outputfile in - *.exe|*.EXE) ;; - *) - lt_outputfile=$lt_outputfile.exe - lt_tool_outputfile=$lt_tool_outputfile.exe - ;; - esac~ - func_to_tool_file "$lt_outputfile"~ - if test : != "$MANIFEST_TOOL" && test -f "$lt_outputfile.manifest"; then - $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; - $RM "$lt_outputfile.manifest"; - fi' - ;; - *) - # g++ - # _LT_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless, - # as there is no search path for DLLs. - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' - _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-all-symbols' - _LT_TAGVAR(allow_undefined_flag, $1)=unsupported - _LT_TAGVAR(always_export_symbols, $1)=no - _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes - - if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then - _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' - # If the export-symbols file already is a .def file, use it as - # is; otherwise, prepend EXPORTS... - _LT_TAGVAR(archive_expsym_cmds, $1)='if _LT_DLL_DEF_P([$export_symbols]); then - cp $export_symbols $output_objdir/$soname.def; - else - echo EXPORTS > $output_objdir/$soname.def; - cat $export_symbols >> $output_objdir/$soname.def; - fi~ - $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' - else - _LT_TAGVAR(ld_shlibs, $1)=no - fi - ;; - esac - ;; - darwin* | rhapsody*) - _LT_DARWIN_LINKER_FEATURES($1) - ;; - - os2*) - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' - _LT_TAGVAR(hardcode_minus_L, $1)=yes - _LT_TAGVAR(allow_undefined_flag, $1)=unsupported - shrext_cmds=.dll - _LT_TAGVAR(archive_cmds, $1)='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ - $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ - $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ - $ECHO EXPORTS >> $output_objdir/$libname.def~ - emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~ - $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ - emximp -o $lib $output_objdir/$libname.def' - _LT_TAGVAR(archive_expsym_cmds, $1)='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ - $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ - $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ - $ECHO EXPORTS >> $output_objdir/$libname.def~ - prefix_cmds="$SED"~ - if test EXPORTS = "`$SED 1q $export_symbols`"; then - prefix_cmds="$prefix_cmds -e 1d"; - fi~ - prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~ - cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~ - $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ - emximp -o $lib $output_objdir/$libname.def' - _LT_TAGVAR(old_archive_From_new_cmds, $1)='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def' - _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes - ;; - - dgux*) - case $cc_basename in - ec++*) - # FIXME: insert proper C++ library support - _LT_TAGVAR(ld_shlibs, $1)=no - ;; - ghcx*) - # Green Hills C++ Compiler - # FIXME: insert proper C++ library support - _LT_TAGVAR(ld_shlibs, $1)=no - ;; - *) - # FIXME: insert proper C++ library support - _LT_TAGVAR(ld_shlibs, $1)=no - ;; - esac - ;; - - freebsd2.*) - # C++ shared libraries reported to be fairly broken before - # switch to ELF - _LT_TAGVAR(ld_shlibs, $1)=no - ;; - - freebsd-elf*) - _LT_TAGVAR(archive_cmds_need_lc, $1)=no - ;; - - freebsd* | dragonfly*) - # FreeBSD 3 and later use GNU C++ and GNU ld with standard ELF - # conventions - _LT_TAGVAR(ld_shlibs, $1)=yes - ;; - - haiku*) - _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' - _LT_TAGVAR(link_all_deplibs, $1)=yes - ;; - - hpux9*) - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl+b $wl$libdir' - _LT_TAGVAR(hardcode_libdir_separator, $1)=: - _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E' - _LT_TAGVAR(hardcode_direct, $1)=yes - _LT_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH, - # but as the default - # location of the library. - - case $cc_basename in - CC*) - # FIXME: insert proper C++ library support - _LT_TAGVAR(ld_shlibs, $1)=no - ;; - aCC*) - _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -b $wl+b $wl$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' - # Commands to make compiler produce verbose output that lists - # what "hidden" libraries, object files and flags are used when - # linking a shared library. - # - # There doesn't appear to be a way to prevent this compiler from - # explicitly linking system object files so we need to strip them - # from the output so that they don't get included in the library - # dependencies. - output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $EGREP " \-L"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' - ;; - *) - if test yes = "$GXX"; then - _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared -nostdlib $pic_flag $wl+b $wl$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' - else - # FIXME: insert proper C++ library support - _LT_TAGVAR(ld_shlibs, $1)=no - fi - ;; - esac - ;; - - hpux10*|hpux11*) - if test no = "$with_gnu_ld"; then - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl+b $wl$libdir' - _LT_TAGVAR(hardcode_libdir_separator, $1)=: - - case $host_cpu in - hppa*64*|ia64*) - ;; - *) - _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E' - ;; - esac - fi - case $host_cpu in - hppa*64*|ia64*) - _LT_TAGVAR(hardcode_direct, $1)=no - _LT_TAGVAR(hardcode_shlibpath_var, $1)=no - ;; - *) - _LT_TAGVAR(hardcode_direct, $1)=yes - _LT_TAGVAR(hardcode_direct_absolute, $1)=yes - _LT_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH, - # but as the default - # location of the library. - ;; - esac - - case $cc_basename in - CC*) - # FIXME: insert proper C++ library support - _LT_TAGVAR(ld_shlibs, $1)=no - ;; - aCC*) - case $host_cpu in - hppa*64*) - _LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' - ;; - ia64*) - _LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname $wl+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' - ;; - *) - _LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' - ;; - esac - # Commands to make compiler produce verbose output that lists - # what "hidden" libraries, object files and flags are used when - # linking a shared library. - # - # There doesn't appear to be a way to prevent this compiler from - # explicitly linking system object files so we need to strip them - # from the output so that they don't get included in the library - # dependencies. - output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $GREP " \-L"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' - ;; - *) - if test yes = "$GXX"; then - if test no = "$with_gnu_ld"; then - case $host_cpu in - hppa*64*) - _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib -fPIC $wl+h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' - ;; - ia64*) - _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $pic_flag $wl+h $wl$soname $wl+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' - ;; - *) - _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' - ;; - esac - fi - else - # FIXME: insert proper C++ library support - _LT_TAGVAR(ld_shlibs, $1)=no - fi - ;; - esac - ;; - - interix[[3-9]]*) - _LT_TAGVAR(hardcode_direct, $1)=no - _LT_TAGVAR(hardcode_shlibpath_var, $1)=no - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir' - _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E' - # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. - # Instead, shared libraries are loaded at an image base (0x10000000 by - # default) and relocated if they conflict, which is a slow very memory - # consuming and fragmenting process. To avoid this, we pick a random, - # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link - # time. Moving up from 0x10000000 also allows more sbrk(2) space. - _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' - _LT_TAGVAR(archive_expsym_cmds, $1)='sed "s|^|_|" $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--retain-symbols-file,$output_objdir/$soname.expsym $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' - ;; - irix5* | irix6*) - case $cc_basename in - CC*) - # SGI C++ - _LT_TAGVAR(archive_cmds, $1)='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' - - # Archives containing C++ object files must be created using - # "CC -ar", where "CC" is the IRIX C++ compiler. This is - # necessary to make sure instantiated templates are included - # in the archive. - _LT_TAGVAR(old_archive_cmds, $1)='$CC -ar -WR,-u -o $oldlib $oldobjs' - ;; - *) - if test yes = "$GXX"; then - if test no = "$with_gnu_ld"; then - _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' - else - _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` -o $lib' - fi - fi - _LT_TAGVAR(link_all_deplibs, $1)=yes - ;; - esac - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' - _LT_TAGVAR(hardcode_libdir_separator, $1)=: - _LT_TAGVAR(inherit_rpath, $1)=yes - ;; - - linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) - case $cc_basename in - KCC*) - # Kuck and Associates, Inc. (KAI) C++ Compiler - - # KCC will only create a shared library if the output file - # ends with ".so" (or ".sl" for HP-UX), so rename the library - # to its proper name (with version) after linking. - _LT_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' - _LT_TAGVAR(archive_expsym_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib $wl-retain-symbols-file,$export_symbols; mv \$templib $lib' - # Commands to make compiler produce verbose output that lists - # what "hidden" libraries, object files and flags are used when - # linking a shared library. - # - # There doesn't appear to be a way to prevent this compiler from - # explicitly linking system object files so we need to strip them - # from the output so that they don't get included in the library - # dependencies. - output_verbose_link_cmd='templist=`$CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 | $GREP "ld"`; rm -f libconftest$shared_ext; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' - - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir' - _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-dynamic' - - # Archives containing C++ object files must be created using - # "CC -Bstatic", where "CC" is the KAI C++ compiler. - _LT_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs' - ;; - icpc* | ecpc* ) - # Intel C++ - with_gnu_ld=yes - # version 8.0 and above of icpc choke on multiply defined symbols - # if we add $predep_objects and $postdep_objects, however 7.1 and - # earlier do not add the objects themselves. - case `$CC -V 2>&1` in - *"Version 7."*) - _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' - _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' - ;; - *) # Version 8.0 or newer - tmp_idyn= - case $host_cpu in - ia64*) tmp_idyn=' -i_dynamic';; - esac - _LT_TAGVAR(archive_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' - _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' - ;; - esac - _LT_TAGVAR(archive_cmds_need_lc, $1)=no - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir' - _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-dynamic' - _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive$convenience $wl--no-whole-archive' - ;; - pgCC* | pgcpp*) - # Portland Group C++ compiler - case `$CC -V` in - *pgCC\ [[1-5]].* | *pgcpp\ [[1-5]].*) - _LT_TAGVAR(prelink_cmds, $1)='tpldir=Template.dir~ - rm -rf $tpldir~ - $CC --prelink_objects --instantiation_dir $tpldir $objs $libobjs $compile_deplibs~ - compile_command="$compile_command `find $tpldir -name \*.o | sort | $NL2SP`"' - _LT_TAGVAR(old_archive_cmds, $1)='tpldir=Template.dir~ - rm -rf $tpldir~ - $CC --prelink_objects --instantiation_dir $tpldir $oldobjs$old_deplibs~ - $AR $AR_FLAGS $oldlib$oldobjs$old_deplibs `find $tpldir -name \*.o | sort | $NL2SP`~ - $RANLIB $oldlib' - _LT_TAGVAR(archive_cmds, $1)='tpldir=Template.dir~ - rm -rf $tpldir~ - $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ - $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' - _LT_TAGVAR(archive_expsym_cmds, $1)='tpldir=Template.dir~ - rm -rf $tpldir~ - $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ - $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' - ;; - *) # Version 6 and above use weak symbols - _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' - _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' - ;; - esac - - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl--rpath $wl$libdir' - _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-dynamic' - _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' - ;; - cxx*) - # Compaq C++ - _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' - _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib $wl-retain-symbols-file $wl$export_symbols' - - runpath_var=LD_RUN_PATH - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' - _LT_TAGVAR(hardcode_libdir_separator, $1)=: - - # Commands to make compiler produce verbose output that lists - # what "hidden" libraries, object files and flags are used when - # linking a shared library. - # - # There doesn't appear to be a way to prevent this compiler from - # explicitly linking system object files so we need to strip them - # from the output so that they don't get included in the library - # dependencies. - output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld .*$\)/\1/"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "X$list" | $Xsed' - ;; - xl* | mpixl* | bgxl*) - # IBM XL 8.0 on PPC, with GNU ld - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' - _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-dynamic' - _LT_TAGVAR(archive_cmds, $1)='$CC -qmkshrobj $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' - if test yes = "$supports_anon_versioning"; then - _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~ - cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ - echo "local: *; };" >> $output_objdir/$libname.ver~ - $CC -qmkshrobj $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-version-script $wl$output_objdir/$libname.ver -o $lib' - fi - ;; - *) - case `$CC -V 2>&1 | sed 5q` in - *Sun\ C*) - # Sun C++ 5.9 - _LT_TAGVAR(no_undefined_flag, $1)=' -zdefs' - _LT_TAGVAR(archive_cmds, $1)='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' - _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-retain-symbols-file $wl$export_symbols' - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' - _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' - _LT_TAGVAR(compiler_needs_object, $1)=yes - - # Not sure whether something based on - # $CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 - # would be better. - output_verbose_link_cmd='func_echo_all' - - # Archives containing C++ object files must be created using - # "CC -xar", where "CC" is the Sun C++ compiler. This is - # necessary to make sure instantiated templates are included - # in the archive. - _LT_TAGVAR(old_archive_cmds, $1)='$CC -xar -o $oldlib $oldobjs' - ;; - esac - ;; - esac - ;; - - lynxos*) - # FIXME: insert proper C++ library support - _LT_TAGVAR(ld_shlibs, $1)=no - ;; - - m88k*) - # FIXME: insert proper C++ library support - _LT_TAGVAR(ld_shlibs, $1)=no - ;; - - mvs*) - case $cc_basename in - cxx*) - # FIXME: insert proper C++ library support - _LT_TAGVAR(ld_shlibs, $1)=no - ;; - *) - # FIXME: insert proper C++ library support - _LT_TAGVAR(ld_shlibs, $1)=no - ;; - esac - ;; - - netbsd*) - if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then - _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $predep_objects $libobjs $deplibs $postdep_objects $linker_flags' - wlarc= - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' - _LT_TAGVAR(hardcode_direct, $1)=yes - _LT_TAGVAR(hardcode_shlibpath_var, $1)=no - fi - # Workaround some broken pre-1.5 toolchains - output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP conftest.$objext | $SED -e "s:-lgcc -lc -lgcc::"' - ;; - - *nto* | *qnx*) - _LT_TAGVAR(ld_shlibs, $1)=yes - ;; - - openbsd* | bitrig*) - if test -f /usr/libexec/ld.so; then - _LT_TAGVAR(hardcode_direct, $1)=yes - _LT_TAGVAR(hardcode_shlibpath_var, $1)=no - _LT_TAGVAR(hardcode_direct_absolute, $1)=yes - _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir' - if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`"; then - _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-retain-symbols-file,$export_symbols -o $lib' - _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E' - _LT_TAGVAR(whole_archive_flag_spec, $1)=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive' - fi - output_verbose_link_cmd=func_echo_all - else - _LT_TAGVAR(ld_shlibs, $1)=no - fi - ;; - - osf3* | osf4* | osf5*) - case $cc_basename in - KCC*) - # Kuck and Associates, Inc. (KAI) C++ Compiler - - # KCC will only create a shared library if the output file - # ends with ".so" (or ".sl" for HP-UX), so rename the library - # to its proper name (with version) after linking. - _LT_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo "$lib" | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' - - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir' - _LT_TAGVAR(hardcode_libdir_separator, $1)=: - - # Archives containing C++ object files must be created using - # the KAI C++ compiler. - case $host in - osf3*) _LT_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs' ;; - *) _LT_TAGVAR(old_archive_cmds, $1)='$CC -o $oldlib $oldobjs' ;; - esac - ;; - RCC*) - # Rational C++ 2.4.1 - # FIXME: insert proper C++ library support - _LT_TAGVAR(ld_shlibs, $1)=no - ;; - cxx*) - case $host in - osf3*) - _LT_TAGVAR(allow_undefined_flag, $1)=' $wl-expect_unresolved $wl\*' - _LT_TAGVAR(archive_cmds, $1)='$CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $soname `test -n "$verstring" && func_echo_all "$wl-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' - ;; - *) - _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' - _LT_TAGVAR(archive_cmds, $1)='$CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' - _LT_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done~ - echo "-hidden">> $lib.exp~ - $CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname $wl-input $wl$lib.exp `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib~ - $RM $lib.exp' - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' - ;; - esac - - _LT_TAGVAR(hardcode_libdir_separator, $1)=: - - # Commands to make compiler produce verbose output that lists - # what "hidden" libraries, object files and flags are used when - # linking a shared library. - # - # There doesn't appear to be a way to prevent this compiler from - # explicitly linking system object files so we need to strip them - # from the output so that they don't get included in the library - # dependencies. - output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld" | $GREP -v "ld:"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' - ;; - *) - if test yes,no = "$GXX,$with_gnu_ld"; then - _LT_TAGVAR(allow_undefined_flag, $1)=' $wl-expect_unresolved $wl\*' - case $host in - osf3*) - _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' - ;; - *) - _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-msym $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' - ;; - esac - - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir' - _LT_TAGVAR(hardcode_libdir_separator, $1)=: - - # Commands to make compiler produce verbose output that lists - # what "hidden" libraries, object files and flags are used when - # linking a shared library. - output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP " \-L"' - - else - # FIXME: insert proper C++ library support - _LT_TAGVAR(ld_shlibs, $1)=no - fi - ;; - esac - ;; - - psos*) - # FIXME: insert proper C++ library support - _LT_TAGVAR(ld_shlibs, $1)=no - ;; - - sunos4*) - case $cc_basename in - CC*) - # Sun C++ 4.x - # FIXME: insert proper C++ library support - _LT_TAGVAR(ld_shlibs, $1)=no - ;; - lcc*) - # Lucid - # FIXME: insert proper C++ library support - _LT_TAGVAR(ld_shlibs, $1)=no - ;; - *) - # FIXME: insert proper C++ library support - _LT_TAGVAR(ld_shlibs, $1)=no - ;; - esac - ;; - - solaris*) - case $cc_basename in - CC* | sunCC*) - # Sun C++ 4.2, 5.x and Centerline C++ - _LT_TAGVAR(archive_cmds_need_lc,$1)=yes - _LT_TAGVAR(no_undefined_flag, $1)=' -zdefs' - _LT_TAGVAR(archive_cmds, $1)='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' - _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ - $CC -G$allow_undefined_flag $wl-M $wl$lib.exp -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' - - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' - _LT_TAGVAR(hardcode_shlibpath_var, $1)=no - case $host_os in - solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; - *) - # The compiler driver will combine and reorder linker options, - # but understands '-z linker_flag'. - # Supported since Solaris 2.6 (maybe 2.5.1?) - _LT_TAGVAR(whole_archive_flag_spec, $1)='-z allextract$convenience -z defaultextract' - ;; - esac - _LT_TAGVAR(link_all_deplibs, $1)=yes - - output_verbose_link_cmd='func_echo_all' - - # Archives containing C++ object files must be created using - # "CC -xar", where "CC" is the Sun C++ compiler. This is - # necessary to make sure instantiated templates are included - # in the archive. - _LT_TAGVAR(old_archive_cmds, $1)='$CC -xar -o $oldlib $oldobjs' - ;; - gcx*) - # Green Hills C++ Compiler - _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib' - - # The C++ compiler must be used to create the archive. - _LT_TAGVAR(old_archive_cmds, $1)='$CC $LDFLAGS -archive -o $oldlib $oldobjs' - ;; - *) - # GNU C++ compiler with Solaris linker - if test yes,no = "$GXX,$with_gnu_ld"; then - _LT_TAGVAR(no_undefined_flag, $1)=' $wl-z ${wl}defs' - if $CC --version | $GREP -v '^2\.7' > /dev/null; then - _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib' - _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ - $CC -shared $pic_flag -nostdlib $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' - - # Commands to make compiler produce verbose output that lists - # what "hidden" libraries, object files and flags are used when - # linking a shared library. - output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP " \-L"' - else - # g++ 2.7 appears to require '-G' NOT '-shared' on this - # platform. - _LT_TAGVAR(archive_cmds, $1)='$CC -G -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib' - _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ - $CC -G -nostdlib $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' - - # Commands to make compiler produce verbose output that lists - # what "hidden" libraries, object files and flags are used when - # linking a shared library. - output_verbose_link_cmd='$CC -G $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP " \-L"' - fi - - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-R $wl$libdir' - case $host_os in - solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; - *) - _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl-z ${wl}allextract$convenience $wl-z ${wl}defaultextract' - ;; - esac - fi - ;; - esac - ;; - - sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[[01]].[[10]]* | unixware7* | sco3.2v5.0.[[024]]*) - _LT_TAGVAR(no_undefined_flag, $1)='$wl-z,text' - _LT_TAGVAR(archive_cmds_need_lc, $1)=no - _LT_TAGVAR(hardcode_shlibpath_var, $1)=no - runpath_var='LD_RUN_PATH' - - case $cc_basename in - CC*) - _LT_TAGVAR(archive_cmds, $1)='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - ;; - *) - _LT_TAGVAR(archive_cmds, $1)='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - ;; - esac - ;; - - sysv5* | sco3.2v5* | sco5v6*) - # Note: We CANNOT use -z defs as we might desire, because we do not - # link with -lc, and that would cause any symbols used from libc to - # always be unresolved, which means just about no library would - # ever link correctly. If we're not using GNU ld we use -z text - # though, which does catch some bad symbols but isn't as heavy-handed - # as -z defs. - _LT_TAGVAR(no_undefined_flag, $1)='$wl-z,text' - _LT_TAGVAR(allow_undefined_flag, $1)='$wl-z,nodefs' - _LT_TAGVAR(archive_cmds_need_lc, $1)=no - _LT_TAGVAR(hardcode_shlibpath_var, $1)=no - _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-R,$libdir' - _LT_TAGVAR(hardcode_libdir_separator, $1)=':' - _LT_TAGVAR(link_all_deplibs, $1)=yes - _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-Bexport' - runpath_var='LD_RUN_PATH' - - case $cc_basename in - CC*) - _LT_TAGVAR(archive_cmds, $1)='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - _LT_TAGVAR(old_archive_cmds, $1)='$CC -Tprelink_objects $oldobjs~ - '"$_LT_TAGVAR(old_archive_cmds, $1)" - _LT_TAGVAR(reload_cmds, $1)='$CC -Tprelink_objects $reload_objs~ - '"$_LT_TAGVAR(reload_cmds, $1)" - ;; - *) - _LT_TAGVAR(archive_cmds, $1)='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - ;; - esac - ;; - - tandem*) - case $cc_basename in - NCC*) - # NonStop-UX NCC 3.20 - # FIXME: insert proper C++ library support - _LT_TAGVAR(ld_shlibs, $1)=no - ;; - *) - # FIXME: insert proper C++ library support - _LT_TAGVAR(ld_shlibs, $1)=no - ;; - esac - ;; - - vxworks*) - # FIXME: insert proper C++ library support - _LT_TAGVAR(ld_shlibs, $1)=no - ;; - - *) - # FIXME: insert proper C++ library support - _LT_TAGVAR(ld_shlibs, $1)=no - ;; - esac - - AC_MSG_RESULT([$_LT_TAGVAR(ld_shlibs, $1)]) - test no = "$_LT_TAGVAR(ld_shlibs, $1)" && can_build_shared=no - - _LT_TAGVAR(GCC, $1)=$GXX - _LT_TAGVAR(LD, $1)=$LD - - ## CAVEAT EMPTOR: - ## There is no encapsulation within the following macros, do not change - ## the running order or otherwise move them around unless you know exactly - ## what you are doing... - _LT_SYS_HIDDEN_LIBDEPS($1) - _LT_COMPILER_PIC($1) - _LT_COMPILER_C_O($1) - _LT_COMPILER_FILE_LOCKS($1) - _LT_LINKER_SHLIBS($1) - _LT_SYS_DYNAMIC_LINKER($1) - _LT_LINKER_HARDCODE_LIBPATH($1) - - _LT_CONFIG($1) - fi # test -n "$compiler" - - CC=$lt_save_CC - CFLAGS=$lt_save_CFLAGS - LDCXX=$LD - LD=$lt_save_LD - GCC=$lt_save_GCC - with_gnu_ld=$lt_save_with_gnu_ld - lt_cv_path_LDCXX=$lt_cv_path_LD - lt_cv_path_LD=$lt_save_path_LD - lt_cv_prog_gnu_ldcxx=$lt_cv_prog_gnu_ld - lt_cv_prog_gnu_ld=$lt_save_with_gnu_ld -fi # test yes != "$_lt_caught_CXX_error" - -AC_LANG_POP -])# _LT_LANG_CXX_CONFIG - - -# _LT_FUNC_STRIPNAME_CNF -# ---------------------- -# func_stripname_cnf prefix suffix name -# strip PREFIX and SUFFIX off of NAME. -# PREFIX and SUFFIX must not contain globbing or regex special -# characters, hashes, percent signs, but SUFFIX may contain a leading -# dot (in which case that matches only a dot). -# -# This function is identical to the (non-XSI) version of func_stripname, -# except this one can be used by m4 code that may be executed by configure, -# rather than the libtool script. -m4_defun([_LT_FUNC_STRIPNAME_CNF],[dnl -AC_REQUIRE([_LT_DECL_SED]) -AC_REQUIRE([_LT_PROG_ECHO_BACKSLASH]) -func_stripname_cnf () -{ - case @S|@2 in - .*) func_stripname_result=`$ECHO "@S|@3" | $SED "s%^@S|@1%%; s%\\\\@S|@2\$%%"`;; - *) func_stripname_result=`$ECHO "@S|@3" | $SED "s%^@S|@1%%; s%@S|@2\$%%"`;; - esac -} # func_stripname_cnf -])# _LT_FUNC_STRIPNAME_CNF - - -# _LT_SYS_HIDDEN_LIBDEPS([TAGNAME]) -# --------------------------------- -# Figure out "hidden" library dependencies from verbose -# compiler output when linking a shared library. -# Parse the compiler output and extract the necessary -# objects, libraries and library flags. -m4_defun([_LT_SYS_HIDDEN_LIBDEPS], -[m4_require([_LT_FILEUTILS_DEFAULTS])dnl -AC_REQUIRE([_LT_FUNC_STRIPNAME_CNF])dnl -# Dependencies to place before and after the object being linked: -_LT_TAGVAR(predep_objects, $1)= -_LT_TAGVAR(postdep_objects, $1)= -_LT_TAGVAR(predeps, $1)= -_LT_TAGVAR(postdeps, $1)= -_LT_TAGVAR(compiler_lib_search_path, $1)= - -dnl we can't use the lt_simple_compile_test_code here, -dnl because it contains code intended for an executable, -dnl not a library. It's possible we should let each -dnl tag define a new lt_????_link_test_code variable, -dnl but it's only used here... -m4_if([$1], [], [cat > conftest.$ac_ext <<_LT_EOF -int a; -void foo (void) { a = 0; } -_LT_EOF -], [$1], [CXX], [cat > conftest.$ac_ext <<_LT_EOF -class Foo -{ -public: - Foo (void) { a = 0; } -private: - int a; -}; -_LT_EOF -], [$1], [F77], [cat > conftest.$ac_ext <<_LT_EOF - subroutine foo - implicit none - integer*4 a - a=0 - return - end -_LT_EOF -], [$1], [FC], [cat > conftest.$ac_ext <<_LT_EOF - subroutine foo - implicit none - integer a - a=0 - return - end -_LT_EOF -], [$1], [GCJ], [cat > conftest.$ac_ext <<_LT_EOF -public class foo { - private int a; - public void bar (void) { - a = 0; - } -}; -_LT_EOF -], [$1], [GO], [cat > conftest.$ac_ext <<_LT_EOF -package foo -func foo() { -} -_LT_EOF -]) - -_lt_libdeps_save_CFLAGS=$CFLAGS -case "$CC $CFLAGS " in #( -*\ -flto*\ *) CFLAGS="$CFLAGS -fno-lto" ;; -*\ -fwhopr*\ *) CFLAGS="$CFLAGS -fno-whopr" ;; -*\ -fuse-linker-plugin*\ *) CFLAGS="$CFLAGS -fno-use-linker-plugin" ;; -esac - -dnl Parse the compiler output and extract the necessary -dnl objects, libraries and library flags. -if AC_TRY_EVAL(ac_compile); then - # Parse the compiler output and extract the necessary - # objects, libraries and library flags. - - # Sentinel used to keep track of whether or not we are before - # the conftest object file. - pre_test_object_deps_done=no - - for p in `eval "$output_verbose_link_cmd"`; do - case $prev$p in - - -L* | -R* | -l*) - # Some compilers place space between "-{L,R}" and the path. - # Remove the space. - if test x-L = "$p" || - test x-R = "$p"; then - prev=$p - continue - fi - - # Expand the sysroot to ease extracting the directories later. - if test -z "$prev"; then - case $p in - -L*) func_stripname_cnf '-L' '' "$p"; prev=-L; p=$func_stripname_result ;; - -R*) func_stripname_cnf '-R' '' "$p"; prev=-R; p=$func_stripname_result ;; - -l*) func_stripname_cnf '-l' '' "$p"; prev=-l; p=$func_stripname_result ;; - esac - fi - case $p in - =*) func_stripname_cnf '=' '' "$p"; p=$lt_sysroot$func_stripname_result ;; - esac - if test no = "$pre_test_object_deps_done"; then - case $prev in - -L | -R) - # Internal compiler library paths should come after those - # provided the user. The postdeps already come after the - # user supplied libs so there is no need to process them. - if test -z "$_LT_TAGVAR(compiler_lib_search_path, $1)"; then - _LT_TAGVAR(compiler_lib_search_path, $1)=$prev$p - else - _LT_TAGVAR(compiler_lib_search_path, $1)="${_LT_TAGVAR(compiler_lib_search_path, $1)} $prev$p" - fi - ;; - # The "-l" case would never come before the object being - # linked, so don't bother handling this case. - esac - else - if test -z "$_LT_TAGVAR(postdeps, $1)"; then - _LT_TAGVAR(postdeps, $1)=$prev$p - else - _LT_TAGVAR(postdeps, $1)="${_LT_TAGVAR(postdeps, $1)} $prev$p" - fi - fi - prev= - ;; - - *.lto.$objext) ;; # Ignore GCC LTO objects - *.$objext) - # This assumes that the test object file only shows up - # once in the compiler output. - if test "$p" = "conftest.$objext"; then - pre_test_object_deps_done=yes - continue - fi - - if test no = "$pre_test_object_deps_done"; then - if test -z "$_LT_TAGVAR(predep_objects, $1)"; then - _LT_TAGVAR(predep_objects, $1)=$p - else - _LT_TAGVAR(predep_objects, $1)="$_LT_TAGVAR(predep_objects, $1) $p" - fi - else - if test -z "$_LT_TAGVAR(postdep_objects, $1)"; then - _LT_TAGVAR(postdep_objects, $1)=$p - else - _LT_TAGVAR(postdep_objects, $1)="$_LT_TAGVAR(postdep_objects, $1) $p" - fi - fi - ;; - - *) ;; # Ignore the rest. - - esac - done - - # Clean up. - rm -f a.out a.exe -else - echo "libtool.m4: error: problem compiling $1 test program" -fi - -$RM -f confest.$objext -CFLAGS=$_lt_libdeps_save_CFLAGS - -# PORTME: override above test on systems where it is broken -m4_if([$1], [CXX], -[case $host_os in -interix[[3-9]]*) - # Interix 3.5 installs completely hosed .la files for C++, so rather than - # hack all around it, let's just trust "g++" to DTRT. - _LT_TAGVAR(predep_objects,$1)= - _LT_TAGVAR(postdep_objects,$1)= - _LT_TAGVAR(postdeps,$1)= - ;; -esac -]) - -case " $_LT_TAGVAR(postdeps, $1) " in -*" -lc "*) _LT_TAGVAR(archive_cmds_need_lc, $1)=no ;; -esac - _LT_TAGVAR(compiler_lib_search_dirs, $1)= -if test -n "${_LT_TAGVAR(compiler_lib_search_path, $1)}"; then - _LT_TAGVAR(compiler_lib_search_dirs, $1)=`echo " ${_LT_TAGVAR(compiler_lib_search_path, $1)}" | $SED -e 's! -L! !g' -e 's!^ !!'` -fi -_LT_TAGDECL([], [compiler_lib_search_dirs], [1], - [The directories searched by this compiler when creating a shared library]) -_LT_TAGDECL([], [predep_objects], [1], - [Dependencies to place before and after the objects being linked to - create a shared library]) -_LT_TAGDECL([], [postdep_objects], [1]) -_LT_TAGDECL([], [predeps], [1]) -_LT_TAGDECL([], [postdeps], [1]) -_LT_TAGDECL([], [compiler_lib_search_path], [1], - [The library search path used internally by the compiler when linking - a shared library]) -])# _LT_SYS_HIDDEN_LIBDEPS - - -# _LT_LANG_F77_CONFIG([TAG]) -# -------------------------- -# Ensure that the configuration variables for a Fortran 77 compiler are -# suitably defined. These variables are subsequently used by _LT_CONFIG -# to write the compiler configuration to 'libtool'. -m4_defun([_LT_LANG_F77_CONFIG], -[AC_LANG_PUSH(Fortran 77) -if test -z "$F77" || test no = "$F77"; then - _lt_disable_F77=yes -fi - -_LT_TAGVAR(archive_cmds_need_lc, $1)=no -_LT_TAGVAR(allow_undefined_flag, $1)= -_LT_TAGVAR(always_export_symbols, $1)=no -_LT_TAGVAR(archive_expsym_cmds, $1)= -_LT_TAGVAR(export_dynamic_flag_spec, $1)= -_LT_TAGVAR(hardcode_direct, $1)=no -_LT_TAGVAR(hardcode_direct_absolute, $1)=no -_LT_TAGVAR(hardcode_libdir_flag_spec, $1)= -_LT_TAGVAR(hardcode_libdir_separator, $1)= -_LT_TAGVAR(hardcode_minus_L, $1)=no -_LT_TAGVAR(hardcode_automatic, $1)=no -_LT_TAGVAR(inherit_rpath, $1)=no -_LT_TAGVAR(module_cmds, $1)= -_LT_TAGVAR(module_expsym_cmds, $1)= -_LT_TAGVAR(link_all_deplibs, $1)=unknown -_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds -_LT_TAGVAR(reload_flag, $1)=$reload_flag -_LT_TAGVAR(reload_cmds, $1)=$reload_cmds -_LT_TAGVAR(no_undefined_flag, $1)= -_LT_TAGVAR(whole_archive_flag_spec, $1)= -_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no - -# Source file extension for f77 test sources. -ac_ext=f - -# Object file extension for compiled f77 test sources. -objext=o -_LT_TAGVAR(objext, $1)=$objext - -# No sense in running all these tests if we already determined that -# the F77 compiler isn't working. Some variables (like enable_shared) -# are currently assumed to apply to all compilers on this platform, -# and will be corrupted by setting them based on a non-working compiler. -if test yes != "$_lt_disable_F77"; then - # Code to be used in simple compile tests - lt_simple_compile_test_code="\ - subroutine t - return - end -" - - # Code to be used in simple link tests - lt_simple_link_test_code="\ - program t - end -" - - # ltmain only uses $CC for tagged configurations so make sure $CC is set. - _LT_TAG_COMPILER - - # save warnings/boilerplate of simple test code - _LT_COMPILER_BOILERPLATE - _LT_LINKER_BOILERPLATE - - # Allow CC to be a program name with arguments. - lt_save_CC=$CC - lt_save_GCC=$GCC - lt_save_CFLAGS=$CFLAGS - CC=${F77-"f77"} - CFLAGS=$FFLAGS - compiler=$CC - _LT_TAGVAR(compiler, $1)=$CC - _LT_CC_BASENAME([$compiler]) - GCC=$G77 - if test -n "$compiler"; then - AC_MSG_CHECKING([if libtool supports shared libraries]) - AC_MSG_RESULT([$can_build_shared]) - - AC_MSG_CHECKING([whether to build shared libraries]) - test no = "$can_build_shared" && enable_shared=no - - # On AIX, shared libraries and static libraries use the same namespace, and - # are all built from PIC. - case $host_os in - aix3*) - test yes = "$enable_shared" && enable_static=no - if test -n "$RANLIB"; then - archive_cmds="$archive_cmds~\$RANLIB \$lib" - postinstall_cmds='$RANLIB $lib' - fi - ;; - aix[[4-9]]*) - if test ia64 != "$host_cpu"; then - case $enable_shared,$with_aix_soname,$aix_use_runtimelinking in - yes,aix,yes) ;; # shared object as lib.so file only - yes,svr4,*) ;; # shared object as lib.so archive member only - yes,*) enable_static=no ;; # shared object in lib.a archive as well - esac - fi - ;; - esac - AC_MSG_RESULT([$enable_shared]) - - AC_MSG_CHECKING([whether to build static libraries]) - # Make sure either enable_shared or enable_static is yes. - test yes = "$enable_shared" || enable_static=yes - AC_MSG_RESULT([$enable_static]) - - _LT_TAGVAR(GCC, $1)=$G77 - _LT_TAGVAR(LD, $1)=$LD - - ## CAVEAT EMPTOR: - ## There is no encapsulation within the following macros, do not change - ## the running order or otherwise move them around unless you know exactly - ## what you are doing... - _LT_COMPILER_PIC($1) - _LT_COMPILER_C_O($1) - _LT_COMPILER_FILE_LOCKS($1) - _LT_LINKER_SHLIBS($1) - _LT_SYS_DYNAMIC_LINKER($1) - _LT_LINKER_HARDCODE_LIBPATH($1) - - _LT_CONFIG($1) - fi # test -n "$compiler" - - GCC=$lt_save_GCC - CC=$lt_save_CC - CFLAGS=$lt_save_CFLAGS -fi # test yes != "$_lt_disable_F77" - -AC_LANG_POP -])# _LT_LANG_F77_CONFIG - - -# _LT_LANG_FC_CONFIG([TAG]) -# ------------------------- -# Ensure that the configuration variables for a Fortran compiler are -# suitably defined. These variables are subsequently used by _LT_CONFIG -# to write the compiler configuration to 'libtool'. -m4_defun([_LT_LANG_FC_CONFIG], -[AC_LANG_PUSH(Fortran) - -if test -z "$FC" || test no = "$FC"; then - _lt_disable_FC=yes -fi - -_LT_TAGVAR(archive_cmds_need_lc, $1)=no -_LT_TAGVAR(allow_undefined_flag, $1)= -_LT_TAGVAR(always_export_symbols, $1)=no -_LT_TAGVAR(archive_expsym_cmds, $1)= -_LT_TAGVAR(export_dynamic_flag_spec, $1)= -_LT_TAGVAR(hardcode_direct, $1)=no -_LT_TAGVAR(hardcode_direct_absolute, $1)=no -_LT_TAGVAR(hardcode_libdir_flag_spec, $1)= -_LT_TAGVAR(hardcode_libdir_separator, $1)= -_LT_TAGVAR(hardcode_minus_L, $1)=no -_LT_TAGVAR(hardcode_automatic, $1)=no -_LT_TAGVAR(inherit_rpath, $1)=no -_LT_TAGVAR(module_cmds, $1)= -_LT_TAGVAR(module_expsym_cmds, $1)= -_LT_TAGVAR(link_all_deplibs, $1)=unknown -_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds -_LT_TAGVAR(reload_flag, $1)=$reload_flag -_LT_TAGVAR(reload_cmds, $1)=$reload_cmds -_LT_TAGVAR(no_undefined_flag, $1)= -_LT_TAGVAR(whole_archive_flag_spec, $1)= -_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no - -# Source file extension for fc test sources. -ac_ext=${ac_fc_srcext-f} - -# Object file extension for compiled fc test sources. -objext=o -_LT_TAGVAR(objext, $1)=$objext - -# No sense in running all these tests if we already determined that -# the FC compiler isn't working. Some variables (like enable_shared) -# are currently assumed to apply to all compilers on this platform, -# and will be corrupted by setting them based on a non-working compiler. -if test yes != "$_lt_disable_FC"; then - # Code to be used in simple compile tests - lt_simple_compile_test_code="\ - subroutine t - return - end -" - - # Code to be used in simple link tests - lt_simple_link_test_code="\ - program t - end -" - - # ltmain only uses $CC for tagged configurations so make sure $CC is set. - _LT_TAG_COMPILER - - # save warnings/boilerplate of simple test code - _LT_COMPILER_BOILERPLATE - _LT_LINKER_BOILERPLATE - - # Allow CC to be a program name with arguments. - lt_save_CC=$CC - lt_save_GCC=$GCC - lt_save_CFLAGS=$CFLAGS - CC=${FC-"f95"} - CFLAGS=$FCFLAGS - compiler=$CC - GCC=$ac_cv_fc_compiler_gnu - - _LT_TAGVAR(compiler, $1)=$CC - _LT_CC_BASENAME([$compiler]) - - if test -n "$compiler"; then - AC_MSG_CHECKING([if libtool supports shared libraries]) - AC_MSG_RESULT([$can_build_shared]) - - AC_MSG_CHECKING([whether to build shared libraries]) - test no = "$can_build_shared" && enable_shared=no - - # On AIX, shared libraries and static libraries use the same namespace, and - # are all built from PIC. - case $host_os in - aix3*) - test yes = "$enable_shared" && enable_static=no - if test -n "$RANLIB"; then - archive_cmds="$archive_cmds~\$RANLIB \$lib" - postinstall_cmds='$RANLIB $lib' - fi - ;; - aix[[4-9]]*) - if test ia64 != "$host_cpu"; then - case $enable_shared,$with_aix_soname,$aix_use_runtimelinking in - yes,aix,yes) ;; # shared object as lib.so file only - yes,svr4,*) ;; # shared object as lib.so archive member only - yes,*) enable_static=no ;; # shared object in lib.a archive as well - esac - fi - ;; - esac - AC_MSG_RESULT([$enable_shared]) - - AC_MSG_CHECKING([whether to build static libraries]) - # Make sure either enable_shared or enable_static is yes. - test yes = "$enable_shared" || enable_static=yes - AC_MSG_RESULT([$enable_static]) - - _LT_TAGVAR(GCC, $1)=$ac_cv_fc_compiler_gnu - _LT_TAGVAR(LD, $1)=$LD - - ## CAVEAT EMPTOR: - ## There is no encapsulation within the following macros, do not change - ## the running order or otherwise move them around unless you know exactly - ## what you are doing... - _LT_SYS_HIDDEN_LIBDEPS($1) - _LT_COMPILER_PIC($1) - _LT_COMPILER_C_O($1) - _LT_COMPILER_FILE_LOCKS($1) - _LT_LINKER_SHLIBS($1) - _LT_SYS_DYNAMIC_LINKER($1) - _LT_LINKER_HARDCODE_LIBPATH($1) - - _LT_CONFIG($1) - fi # test -n "$compiler" - - GCC=$lt_save_GCC - CC=$lt_save_CC - CFLAGS=$lt_save_CFLAGS -fi # test yes != "$_lt_disable_FC" - -AC_LANG_POP -])# _LT_LANG_FC_CONFIG - - -# _LT_LANG_GCJ_CONFIG([TAG]) -# -------------------------- -# Ensure that the configuration variables for the GNU Java Compiler compiler -# are suitably defined. These variables are subsequently used by _LT_CONFIG -# to write the compiler configuration to 'libtool'. -m4_defun([_LT_LANG_GCJ_CONFIG], -[AC_REQUIRE([LT_PROG_GCJ])dnl -AC_LANG_SAVE - -# Source file extension for Java test sources. -ac_ext=java - -# Object file extension for compiled Java test sources. -objext=o -_LT_TAGVAR(objext, $1)=$objext - -# Code to be used in simple compile tests -lt_simple_compile_test_code="class foo {}" - -# Code to be used in simple link tests -lt_simple_link_test_code='public class conftest { public static void main(String[[]] argv) {}; }' - -# ltmain only uses $CC for tagged configurations so make sure $CC is set. -_LT_TAG_COMPILER - -# save warnings/boilerplate of simple test code -_LT_COMPILER_BOILERPLATE -_LT_LINKER_BOILERPLATE - -# Allow CC to be a program name with arguments. -lt_save_CC=$CC -lt_save_CFLAGS=$CFLAGS -lt_save_GCC=$GCC -GCC=yes -CC=${GCJ-"gcj"} -CFLAGS=$GCJFLAGS -compiler=$CC -_LT_TAGVAR(compiler, $1)=$CC -_LT_TAGVAR(LD, $1)=$LD -_LT_CC_BASENAME([$compiler]) - -# GCJ did not exist at the time GCC didn't implicitly link libc in. -_LT_TAGVAR(archive_cmds_need_lc, $1)=no - -_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds -_LT_TAGVAR(reload_flag, $1)=$reload_flag -_LT_TAGVAR(reload_cmds, $1)=$reload_cmds - -if test -n "$compiler"; then - _LT_COMPILER_NO_RTTI($1) - _LT_COMPILER_PIC($1) - _LT_COMPILER_C_O($1) - _LT_COMPILER_FILE_LOCKS($1) - _LT_LINKER_SHLIBS($1) - _LT_LINKER_HARDCODE_LIBPATH($1) - - _LT_CONFIG($1) -fi - -AC_LANG_RESTORE - -GCC=$lt_save_GCC -CC=$lt_save_CC -CFLAGS=$lt_save_CFLAGS -])# _LT_LANG_GCJ_CONFIG - - -# _LT_LANG_GO_CONFIG([TAG]) -# -------------------------- -# Ensure that the configuration variables for the GNU Go compiler -# are suitably defined. These variables are subsequently used by _LT_CONFIG -# to write the compiler configuration to 'libtool'. -m4_defun([_LT_LANG_GO_CONFIG], -[AC_REQUIRE([LT_PROG_GO])dnl -AC_LANG_SAVE - -# Source file extension for Go test sources. -ac_ext=go - -# Object file extension for compiled Go test sources. -objext=o -_LT_TAGVAR(objext, $1)=$objext - -# Code to be used in simple compile tests -lt_simple_compile_test_code="package main; func main() { }" - -# Code to be used in simple link tests -lt_simple_link_test_code='package main; func main() { }' - -# ltmain only uses $CC for tagged configurations so make sure $CC is set. -_LT_TAG_COMPILER - -# save warnings/boilerplate of simple test code -_LT_COMPILER_BOILERPLATE -_LT_LINKER_BOILERPLATE - -# Allow CC to be a program name with arguments. -lt_save_CC=$CC -lt_save_CFLAGS=$CFLAGS -lt_save_GCC=$GCC -GCC=yes -CC=${GOC-"gccgo"} -CFLAGS=$GOFLAGS -compiler=$CC -_LT_TAGVAR(compiler, $1)=$CC -_LT_TAGVAR(LD, $1)=$LD -_LT_CC_BASENAME([$compiler]) - -# Go did not exist at the time GCC didn't implicitly link libc in. -_LT_TAGVAR(archive_cmds_need_lc, $1)=no - -_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds -_LT_TAGVAR(reload_flag, $1)=$reload_flag -_LT_TAGVAR(reload_cmds, $1)=$reload_cmds - -if test -n "$compiler"; then - _LT_COMPILER_NO_RTTI($1) - _LT_COMPILER_PIC($1) - _LT_COMPILER_C_O($1) - _LT_COMPILER_FILE_LOCKS($1) - _LT_LINKER_SHLIBS($1) - _LT_LINKER_HARDCODE_LIBPATH($1) - - _LT_CONFIG($1) -fi - -AC_LANG_RESTORE - -GCC=$lt_save_GCC -CC=$lt_save_CC -CFLAGS=$lt_save_CFLAGS -])# _LT_LANG_GO_CONFIG - - -# _LT_LANG_RC_CONFIG([TAG]) -# ------------------------- -# Ensure that the configuration variables for the Windows resource compiler -# are suitably defined. These variables are subsequently used by _LT_CONFIG -# to write the compiler configuration to 'libtool'. -m4_defun([_LT_LANG_RC_CONFIG], -[AC_REQUIRE([LT_PROG_RC])dnl -AC_LANG_SAVE - -# Source file extension for RC test sources. -ac_ext=rc - -# Object file extension for compiled RC test sources. -objext=o -_LT_TAGVAR(objext, $1)=$objext - -# Code to be used in simple compile tests -lt_simple_compile_test_code='sample MENU { MENUITEM "&Soup", 100, CHECKED }' - -# Code to be used in simple link tests -lt_simple_link_test_code=$lt_simple_compile_test_code - -# ltmain only uses $CC for tagged configurations so make sure $CC is set. -_LT_TAG_COMPILER - -# save warnings/boilerplate of simple test code -_LT_COMPILER_BOILERPLATE -_LT_LINKER_BOILERPLATE - -# Allow CC to be a program name with arguments. -lt_save_CC=$CC -lt_save_CFLAGS=$CFLAGS -lt_save_GCC=$GCC -GCC= -CC=${RC-"windres"} -CFLAGS= -compiler=$CC -_LT_TAGVAR(compiler, $1)=$CC -_LT_CC_BASENAME([$compiler]) -_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=yes - -if test -n "$compiler"; then - : - _LT_CONFIG($1) -fi - -GCC=$lt_save_GCC -AC_LANG_RESTORE -CC=$lt_save_CC -CFLAGS=$lt_save_CFLAGS -])# _LT_LANG_RC_CONFIG - - -# LT_PROG_GCJ -# ----------- -AC_DEFUN([LT_PROG_GCJ], -[m4_ifdef([AC_PROG_GCJ], [AC_PROG_GCJ], - [m4_ifdef([A][M_PROG_GCJ], [A][M_PROG_GCJ], - [AC_CHECK_TOOL(GCJ, gcj,) - test set = "${GCJFLAGS+set}" || GCJFLAGS="-g -O2" - AC_SUBST(GCJFLAGS)])])[]dnl -]) - -# Old name: -AU_ALIAS([LT_AC_PROG_GCJ], [LT_PROG_GCJ]) -dnl aclocal-1.4 backwards compatibility: -dnl AC_DEFUN([LT_AC_PROG_GCJ], []) - - -# LT_PROG_GO -# ---------- -AC_DEFUN([LT_PROG_GO], -[AC_CHECK_TOOL(GOC, gccgo,) -]) - - -# LT_PROG_RC -# ---------- -AC_DEFUN([LT_PROG_RC], -[AC_CHECK_TOOL(RC, windres,) -]) - -# Old name: -AU_ALIAS([LT_AC_PROG_RC], [LT_PROG_RC]) -dnl aclocal-1.4 backwards compatibility: -dnl AC_DEFUN([LT_AC_PROG_RC], []) - - -# _LT_DECL_EGREP -# -------------- -# If we don't have a new enough Autoconf to choose the best grep -# available, choose the one first in the user's PATH. -m4_defun([_LT_DECL_EGREP], -[AC_REQUIRE([AC_PROG_EGREP])dnl -AC_REQUIRE([AC_PROG_FGREP])dnl -test -z "$GREP" && GREP=grep -_LT_DECL([], [GREP], [1], [A grep program that handles long lines]) -_LT_DECL([], [EGREP], [1], [An ERE matcher]) -_LT_DECL([], [FGREP], [1], [A literal string matcher]) -dnl Non-bleeding-edge autoconf doesn't subst GREP, so do it here too -AC_SUBST([GREP]) -]) - - -# _LT_DECL_OBJDUMP -# -------------- -# If we don't have a new enough Autoconf to choose the best objdump -# available, choose the one first in the user's PATH. -m4_defun([_LT_DECL_OBJDUMP], -[AC_CHECK_TOOL(OBJDUMP, objdump, false) -test -z "$OBJDUMP" && OBJDUMP=objdump -_LT_DECL([], [OBJDUMP], [1], [An object symbol dumper]) -AC_SUBST([OBJDUMP]) -]) - -# _LT_DECL_DLLTOOL -# ---------------- -# Ensure DLLTOOL variable is set. -m4_defun([_LT_DECL_DLLTOOL], -[AC_CHECK_TOOL(DLLTOOL, dlltool, false) -test -z "$DLLTOOL" && DLLTOOL=dlltool -_LT_DECL([], [DLLTOOL], [1], [DLL creation program]) -AC_SUBST([DLLTOOL]) -]) - -# _LT_DECL_SED -# ------------ -# Check for a fully-functional sed program, that truncates -# as few characters as possible. Prefer GNU sed if found. -m4_defun([_LT_DECL_SED], -[AC_PROG_SED -test -z "$SED" && SED=sed -Xsed="$SED -e 1s/^X//" -_LT_DECL([], [SED], [1], [A sed program that does not truncate output]) -_LT_DECL([], [Xsed], ["\$SED -e 1s/^X//"], - [Sed that helps us avoid accidentally triggering echo(1) options like -n]) -])# _LT_DECL_SED - -m4_ifndef([AC_PROG_SED], [ -# NOTE: This macro has been submitted for inclusion into # -# GNU Autoconf as AC_PROG_SED. When it is available in # -# a released version of Autoconf we should remove this # -# macro and use it instead. # - -m4_defun([AC_PROG_SED], -[AC_MSG_CHECKING([for a sed that does not truncate output]) -AC_CACHE_VAL(lt_cv_path_SED, -[# Loop through the user's path and test for sed and gsed. -# Then use that list of sed's as ones to test for truncation. -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for lt_ac_prog in sed gsed; do - for ac_exec_ext in '' $ac_executable_extensions; do - if $as_executable_p "$as_dir/$lt_ac_prog$ac_exec_ext"; then - lt_ac_sed_list="$lt_ac_sed_list $as_dir/$lt_ac_prog$ac_exec_ext" - fi - done - done -done -IFS=$as_save_IFS -lt_ac_max=0 -lt_ac_count=0 -# Add /usr/xpg4/bin/sed as it is typically found on Solaris -# along with /bin/sed that truncates output. -for lt_ac_sed in $lt_ac_sed_list /usr/xpg4/bin/sed; do - test ! -f "$lt_ac_sed" && continue - cat /dev/null > conftest.in - lt_ac_count=0 - echo $ECHO_N "0123456789$ECHO_C" >conftest.in - # Check for GNU sed and select it if it is found. - if "$lt_ac_sed" --version 2>&1 < /dev/null | grep 'GNU' > /dev/null; then - lt_cv_path_SED=$lt_ac_sed - break - fi - while true; do - cat conftest.in conftest.in >conftest.tmp - mv conftest.tmp conftest.in - cp conftest.in conftest.nl - echo >>conftest.nl - $lt_ac_sed -e 's/a$//' < conftest.nl >conftest.out || break - cmp -s conftest.out conftest.nl || break - # 10000 chars as input seems more than enough - test 10 -lt "$lt_ac_count" && break - lt_ac_count=`expr $lt_ac_count + 1` - if test "$lt_ac_count" -gt "$lt_ac_max"; then - lt_ac_max=$lt_ac_count - lt_cv_path_SED=$lt_ac_sed - fi - done -done -]) -SED=$lt_cv_path_SED -AC_SUBST([SED]) -AC_MSG_RESULT([$SED]) -])#AC_PROG_SED -])#m4_ifndef - -# Old name: -AU_ALIAS([LT_AC_PROG_SED], [AC_PROG_SED]) -dnl aclocal-1.4 backwards compatibility: -dnl AC_DEFUN([LT_AC_PROG_SED], []) - - -# _LT_CHECK_SHELL_FEATURES -# ------------------------ -# Find out whether the shell is Bourne or XSI compatible, -# or has some other useful features. -m4_defun([_LT_CHECK_SHELL_FEATURES], -[if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then - lt_unset=unset -else - lt_unset=false -fi -_LT_DECL([], [lt_unset], [0], [whether the shell understands "unset"])dnl - -# test EBCDIC or ASCII -case `echo X|tr X '\101'` in - A) # ASCII based system - # \n is not interpreted correctly by Solaris 8 /usr/ucb/tr - lt_SP2NL='tr \040 \012' - lt_NL2SP='tr \015\012 \040\040' - ;; - *) # EBCDIC based system - lt_SP2NL='tr \100 \n' - lt_NL2SP='tr \r\n \100\100' - ;; -esac -_LT_DECL([SP2NL], [lt_SP2NL], [1], [turn spaces into newlines])dnl -_LT_DECL([NL2SP], [lt_NL2SP], [1], [turn newlines into spaces])dnl -])# _LT_CHECK_SHELL_FEATURES - - -# _LT_PATH_CONVERSION_FUNCTIONS -# ----------------------------- -# Determine what file name conversion functions should be used by -# func_to_host_file (and, implicitly, by func_to_host_path). These are needed -# for certain cross-compile configurations and native mingw. -m4_defun([_LT_PATH_CONVERSION_FUNCTIONS], -[AC_REQUIRE([AC_CANONICAL_HOST])dnl -AC_REQUIRE([AC_CANONICAL_BUILD])dnl -AC_MSG_CHECKING([how to convert $build file names to $host format]) -AC_CACHE_VAL(lt_cv_to_host_file_cmd, -[case $host in - *-*-mingw* ) - case $build in - *-*-mingw* ) # actually msys - lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32 - ;; - *-*-cygwin* ) - lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32 - ;; - * ) # otherwise, assume *nix - lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32 - ;; - esac - ;; - *-*-cygwin* ) - case $build in - *-*-mingw* ) # actually msys - lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin - ;; - *-*-cygwin* ) - lt_cv_to_host_file_cmd=func_convert_file_noop - ;; - * ) # otherwise, assume *nix - lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin - ;; - esac - ;; - * ) # unhandled hosts (and "normal" native builds) - lt_cv_to_host_file_cmd=func_convert_file_noop - ;; -esac -]) -to_host_file_cmd=$lt_cv_to_host_file_cmd -AC_MSG_RESULT([$lt_cv_to_host_file_cmd]) -_LT_DECL([to_host_file_cmd], [lt_cv_to_host_file_cmd], - [0], [convert $build file names to $host format])dnl - -AC_MSG_CHECKING([how to convert $build file names to toolchain format]) -AC_CACHE_VAL(lt_cv_to_tool_file_cmd, -[#assume ordinary cross tools, or native build. -lt_cv_to_tool_file_cmd=func_convert_file_noop -case $host in - *-*-mingw* ) - case $build in - *-*-mingw* ) # actually msys - lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32 - ;; - esac - ;; -esac -]) -to_tool_file_cmd=$lt_cv_to_tool_file_cmd -AC_MSG_RESULT([$lt_cv_to_tool_file_cmd]) -_LT_DECL([to_tool_file_cmd], [lt_cv_to_tool_file_cmd], - [0], [convert $build files to toolchain format])dnl -])# _LT_PATH_CONVERSION_FUNCTIONS - -# Helper functions for option handling. -*- Autoconf -*- -# -# Copyright (C) 2004-2005, 2007-2009, 2011-2015 Free Software -# Foundation, Inc. -# Written by Gary V. Vaughan, 2004 -# -# This file is free software; the Free Software Foundation gives -# unlimited permission to copy and/or distribute it, with or without -# modifications, as long as this notice is preserved. - -# serial 8 ltoptions.m4 - -# This is to help aclocal find these macros, as it can't see m4_define. -AC_DEFUN([LTOPTIONS_VERSION], [m4_if([1])]) - - -# _LT_MANGLE_OPTION(MACRO-NAME, OPTION-NAME) -# ------------------------------------------ -m4_define([_LT_MANGLE_OPTION], -[[_LT_OPTION_]m4_bpatsubst($1__$2, [[^a-zA-Z0-9_]], [_])]) - - -# _LT_SET_OPTION(MACRO-NAME, OPTION-NAME) -# --------------------------------------- -# Set option OPTION-NAME for macro MACRO-NAME, and if there is a -# matching handler defined, dispatch to it. Other OPTION-NAMEs are -# saved as a flag. -m4_define([_LT_SET_OPTION], -[m4_define(_LT_MANGLE_OPTION([$1], [$2]))dnl -m4_ifdef(_LT_MANGLE_DEFUN([$1], [$2]), - _LT_MANGLE_DEFUN([$1], [$2]), - [m4_warning([Unknown $1 option '$2'])])[]dnl -]) - - -# _LT_IF_OPTION(MACRO-NAME, OPTION-NAME, IF-SET, [IF-NOT-SET]) -# ------------------------------------------------------------ -# Execute IF-SET if OPTION is set, IF-NOT-SET otherwise. -m4_define([_LT_IF_OPTION], -[m4_ifdef(_LT_MANGLE_OPTION([$1], [$2]), [$3], [$4])]) - - -# _LT_UNLESS_OPTIONS(MACRO-NAME, OPTION-LIST, IF-NOT-SET) -# ------------------------------------------------------- -# Execute IF-NOT-SET unless all options in OPTION-LIST for MACRO-NAME -# are set. -m4_define([_LT_UNLESS_OPTIONS], -[m4_foreach([_LT_Option], m4_split(m4_normalize([$2])), - [m4_ifdef(_LT_MANGLE_OPTION([$1], _LT_Option), - [m4_define([$0_found])])])[]dnl -m4_ifdef([$0_found], [m4_undefine([$0_found])], [$3 -])[]dnl -]) - - -# _LT_SET_OPTIONS(MACRO-NAME, OPTION-LIST) -# ---------------------------------------- -# OPTION-LIST is a space-separated list of Libtool options associated -# with MACRO-NAME. If any OPTION has a matching handler declared with -# LT_OPTION_DEFINE, dispatch to that macro; otherwise complain about -# the unknown option and exit. -m4_defun([_LT_SET_OPTIONS], -[# Set options -m4_foreach([_LT_Option], m4_split(m4_normalize([$2])), - [_LT_SET_OPTION([$1], _LT_Option)]) - -m4_if([$1],[LT_INIT],[ - dnl - dnl Simply set some default values (i.e off) if boolean options were not - dnl specified: - _LT_UNLESS_OPTIONS([LT_INIT], [dlopen], [enable_dlopen=no - ]) - _LT_UNLESS_OPTIONS([LT_INIT], [win32-dll], [enable_win32_dll=no - ]) - dnl - dnl If no reference was made to various pairs of opposing options, then - dnl we run the default mode handler for the pair. For example, if neither - dnl 'shared' nor 'disable-shared' was passed, we enable building of shared - dnl archives by default: - _LT_UNLESS_OPTIONS([LT_INIT], [shared disable-shared], [_LT_ENABLE_SHARED]) - _LT_UNLESS_OPTIONS([LT_INIT], [static disable-static], [_LT_ENABLE_STATIC]) - _LT_UNLESS_OPTIONS([LT_INIT], [pic-only no-pic], [_LT_WITH_PIC]) - _LT_UNLESS_OPTIONS([LT_INIT], [fast-install disable-fast-install], - [_LT_ENABLE_FAST_INSTALL]) - _LT_UNLESS_OPTIONS([LT_INIT], [aix-soname=aix aix-soname=both aix-soname=svr4], - [_LT_WITH_AIX_SONAME([aix])]) - ]) -])# _LT_SET_OPTIONS - - - -# _LT_MANGLE_DEFUN(MACRO-NAME, OPTION-NAME) -# ----------------------------------------- -m4_define([_LT_MANGLE_DEFUN], -[[_LT_OPTION_DEFUN_]m4_bpatsubst(m4_toupper([$1__$2]), [[^A-Z0-9_]], [_])]) - - -# LT_OPTION_DEFINE(MACRO-NAME, OPTION-NAME, CODE) -# ----------------------------------------------- -m4_define([LT_OPTION_DEFINE], -[m4_define(_LT_MANGLE_DEFUN([$1], [$2]), [$3])[]dnl -])# LT_OPTION_DEFINE - - -# dlopen -# ------ -LT_OPTION_DEFINE([LT_INIT], [dlopen], [enable_dlopen=yes -]) - -AU_DEFUN([AC_LIBTOOL_DLOPEN], -[_LT_SET_OPTION([LT_INIT], [dlopen]) -AC_DIAGNOSE([obsolete], -[$0: Remove this warning and the call to _LT_SET_OPTION when you -put the 'dlopen' option into LT_INIT's first parameter.]) -]) - -dnl aclocal-1.4 backwards compatibility: -dnl AC_DEFUN([AC_LIBTOOL_DLOPEN], []) - - -# win32-dll -# --------- -# Declare package support for building win32 dll's. -LT_OPTION_DEFINE([LT_INIT], [win32-dll], -[enable_win32_dll=yes - -case $host in -*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-cegcc*) - AC_CHECK_TOOL(AS, as, false) - AC_CHECK_TOOL(DLLTOOL, dlltool, false) - AC_CHECK_TOOL(OBJDUMP, objdump, false) - ;; -esac - -test -z "$AS" && AS=as -_LT_DECL([], [AS], [1], [Assembler program])dnl - -test -z "$DLLTOOL" && DLLTOOL=dlltool -_LT_DECL([], [DLLTOOL], [1], [DLL creation program])dnl - -test -z "$OBJDUMP" && OBJDUMP=objdump -_LT_DECL([], [OBJDUMP], [1], [Object dumper program])dnl -])# win32-dll - -AU_DEFUN([AC_LIBTOOL_WIN32_DLL], -[AC_REQUIRE([AC_CANONICAL_HOST])dnl -_LT_SET_OPTION([LT_INIT], [win32-dll]) -AC_DIAGNOSE([obsolete], -[$0: Remove this warning and the call to _LT_SET_OPTION when you -put the 'win32-dll' option into LT_INIT's first parameter.]) -]) - -dnl aclocal-1.4 backwards compatibility: -dnl AC_DEFUN([AC_LIBTOOL_WIN32_DLL], []) - - -# _LT_ENABLE_SHARED([DEFAULT]) -# ---------------------------- -# implement the --enable-shared flag, and supports the 'shared' and -# 'disable-shared' LT_INIT options. -# DEFAULT is either 'yes' or 'no'. If omitted, it defaults to 'yes'. -m4_define([_LT_ENABLE_SHARED], -[m4_define([_LT_ENABLE_SHARED_DEFAULT], [m4_if($1, no, no, yes)])dnl -AC_ARG_ENABLE([shared], - [AS_HELP_STRING([--enable-shared@<:@=PKGS@:>@], - [build shared libraries @<:@default=]_LT_ENABLE_SHARED_DEFAULT[@:>@])], - [p=${PACKAGE-default} - case $enableval in - yes) enable_shared=yes ;; - no) enable_shared=no ;; - *) - enable_shared=no - # Look at the argument we got. We use all the common list separators. - lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, - for pkg in $enableval; do - IFS=$lt_save_ifs - if test "X$pkg" = "X$p"; then - enable_shared=yes - fi - done - IFS=$lt_save_ifs - ;; - esac], - [enable_shared=]_LT_ENABLE_SHARED_DEFAULT) - - _LT_DECL([build_libtool_libs], [enable_shared], [0], - [Whether or not to build shared libraries]) -])# _LT_ENABLE_SHARED - -LT_OPTION_DEFINE([LT_INIT], [shared], [_LT_ENABLE_SHARED([yes])]) -LT_OPTION_DEFINE([LT_INIT], [disable-shared], [_LT_ENABLE_SHARED([no])]) - -# Old names: -AC_DEFUN([AC_ENABLE_SHARED], -[_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[shared]) -]) - -AC_DEFUN([AC_DISABLE_SHARED], -[_LT_SET_OPTION([LT_INIT], [disable-shared]) -]) - -AU_DEFUN([AM_ENABLE_SHARED], [AC_ENABLE_SHARED($@)]) -AU_DEFUN([AM_DISABLE_SHARED], [AC_DISABLE_SHARED($@)]) - -dnl aclocal-1.4 backwards compatibility: -dnl AC_DEFUN([AM_ENABLE_SHARED], []) -dnl AC_DEFUN([AM_DISABLE_SHARED], []) - - - -# _LT_ENABLE_STATIC([DEFAULT]) -# ---------------------------- -# implement the --enable-static flag, and support the 'static' and -# 'disable-static' LT_INIT options. -# DEFAULT is either 'yes' or 'no'. If omitted, it defaults to 'yes'. -m4_define([_LT_ENABLE_STATIC], -[m4_define([_LT_ENABLE_STATIC_DEFAULT], [m4_if($1, no, no, yes)])dnl -AC_ARG_ENABLE([static], - [AS_HELP_STRING([--enable-static@<:@=PKGS@:>@], - [build static libraries @<:@default=]_LT_ENABLE_STATIC_DEFAULT[@:>@])], - [p=${PACKAGE-default} - case $enableval in - yes) enable_static=yes ;; - no) enable_static=no ;; - *) - enable_static=no - # Look at the argument we got. We use all the common list separators. - lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, - for pkg in $enableval; do - IFS=$lt_save_ifs - if test "X$pkg" = "X$p"; then - enable_static=yes - fi - done - IFS=$lt_save_ifs - ;; - esac], - [enable_static=]_LT_ENABLE_STATIC_DEFAULT) - - _LT_DECL([build_old_libs], [enable_static], [0], - [Whether or not to build static libraries]) -])# _LT_ENABLE_STATIC - -LT_OPTION_DEFINE([LT_INIT], [static], [_LT_ENABLE_STATIC([yes])]) -LT_OPTION_DEFINE([LT_INIT], [disable-static], [_LT_ENABLE_STATIC([no])]) - -# Old names: -AC_DEFUN([AC_ENABLE_STATIC], -[_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[static]) -]) - -AC_DEFUN([AC_DISABLE_STATIC], -[_LT_SET_OPTION([LT_INIT], [disable-static]) -]) - -AU_DEFUN([AM_ENABLE_STATIC], [AC_ENABLE_STATIC($@)]) -AU_DEFUN([AM_DISABLE_STATIC], [AC_DISABLE_STATIC($@)]) - -dnl aclocal-1.4 backwards compatibility: -dnl AC_DEFUN([AM_ENABLE_STATIC], []) -dnl AC_DEFUN([AM_DISABLE_STATIC], []) - - - -# _LT_ENABLE_FAST_INSTALL([DEFAULT]) -# ---------------------------------- -# implement the --enable-fast-install flag, and support the 'fast-install' -# and 'disable-fast-install' LT_INIT options. -# DEFAULT is either 'yes' or 'no'. If omitted, it defaults to 'yes'. -m4_define([_LT_ENABLE_FAST_INSTALL], -[m4_define([_LT_ENABLE_FAST_INSTALL_DEFAULT], [m4_if($1, no, no, yes)])dnl -AC_ARG_ENABLE([fast-install], - [AS_HELP_STRING([--enable-fast-install@<:@=PKGS@:>@], - [optimize for fast installation @<:@default=]_LT_ENABLE_FAST_INSTALL_DEFAULT[@:>@])], - [p=${PACKAGE-default} - case $enableval in - yes) enable_fast_install=yes ;; - no) enable_fast_install=no ;; - *) - enable_fast_install=no - # Look at the argument we got. We use all the common list separators. - lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, - for pkg in $enableval; do - IFS=$lt_save_ifs - if test "X$pkg" = "X$p"; then - enable_fast_install=yes - fi - done - IFS=$lt_save_ifs - ;; - esac], - [enable_fast_install=]_LT_ENABLE_FAST_INSTALL_DEFAULT) - -_LT_DECL([fast_install], [enable_fast_install], [0], - [Whether or not to optimize for fast installation])dnl -])# _LT_ENABLE_FAST_INSTALL - -LT_OPTION_DEFINE([LT_INIT], [fast-install], [_LT_ENABLE_FAST_INSTALL([yes])]) -LT_OPTION_DEFINE([LT_INIT], [disable-fast-install], [_LT_ENABLE_FAST_INSTALL([no])]) - -# Old names: -AU_DEFUN([AC_ENABLE_FAST_INSTALL], -[_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[fast-install]) -AC_DIAGNOSE([obsolete], -[$0: Remove this warning and the call to _LT_SET_OPTION when you put -the 'fast-install' option into LT_INIT's first parameter.]) -]) - -AU_DEFUN([AC_DISABLE_FAST_INSTALL], -[_LT_SET_OPTION([LT_INIT], [disable-fast-install]) -AC_DIAGNOSE([obsolete], -[$0: Remove this warning and the call to _LT_SET_OPTION when you put -the 'disable-fast-install' option into LT_INIT's first parameter.]) -]) - -dnl aclocal-1.4 backwards compatibility: -dnl AC_DEFUN([AC_ENABLE_FAST_INSTALL], []) -dnl AC_DEFUN([AM_DISABLE_FAST_INSTALL], []) - - -# _LT_WITH_AIX_SONAME([DEFAULT]) -# ---------------------------------- -# implement the --with-aix-soname flag, and support the `aix-soname=aix' -# and `aix-soname=both' and `aix-soname=svr4' LT_INIT options. DEFAULT -# is either `aix', `both' or `svr4'. If omitted, it defaults to `aix'. -m4_define([_LT_WITH_AIX_SONAME], -[m4_define([_LT_WITH_AIX_SONAME_DEFAULT], [m4_if($1, svr4, svr4, m4_if($1, both, both, aix))])dnl -shared_archive_member_spec= -case $host,$enable_shared in -power*-*-aix[[5-9]]*,yes) - AC_MSG_CHECKING([which variant of shared library versioning to provide]) - AC_ARG_WITH([aix-soname], - [AS_HELP_STRING([--with-aix-soname=aix|svr4|both], - [shared library versioning (aka "SONAME") variant to provide on AIX, @<:@default=]_LT_WITH_AIX_SONAME_DEFAULT[@:>@.])], - [case $withval in - aix|svr4|both) - ;; - *) - AC_MSG_ERROR([Unknown argument to --with-aix-soname]) - ;; - esac - lt_cv_with_aix_soname=$with_aix_soname], - [AC_CACHE_VAL([lt_cv_with_aix_soname], - [lt_cv_with_aix_soname=]_LT_WITH_AIX_SONAME_DEFAULT) - with_aix_soname=$lt_cv_with_aix_soname]) - AC_MSG_RESULT([$with_aix_soname]) - if test aix != "$with_aix_soname"; then - # For the AIX way of multilib, we name the shared archive member - # based on the bitwidth used, traditionally 'shr.o' or 'shr_64.o', - # and 'shr.imp' or 'shr_64.imp', respectively, for the Import File. - # Even when GNU compilers ignore OBJECT_MODE but need '-maix64' flag, - # the AIX toolchain works better with OBJECT_MODE set (default 32). - if test 64 = "${OBJECT_MODE-32}"; then - shared_archive_member_spec=shr_64 - else - shared_archive_member_spec=shr - fi - fi - ;; -*) - with_aix_soname=aix - ;; -esac - -_LT_DECL([], [shared_archive_member_spec], [0], - [Shared archive member basename, for filename based shared library versioning on AIX])dnl -])# _LT_WITH_AIX_SONAME - -LT_OPTION_DEFINE([LT_INIT], [aix-soname=aix], [_LT_WITH_AIX_SONAME([aix])]) -LT_OPTION_DEFINE([LT_INIT], [aix-soname=both], [_LT_WITH_AIX_SONAME([both])]) -LT_OPTION_DEFINE([LT_INIT], [aix-soname=svr4], [_LT_WITH_AIX_SONAME([svr4])]) - - -# _LT_WITH_PIC([MODE]) -# -------------------- -# implement the --with-pic flag, and support the 'pic-only' and 'no-pic' -# LT_INIT options. -# MODE is either 'yes' or 'no'. If omitted, it defaults to 'both'. -m4_define([_LT_WITH_PIC], -[AC_ARG_WITH([pic], - [AS_HELP_STRING([--with-pic@<:@=PKGS@:>@], - [try to use only PIC/non-PIC objects @<:@default=use both@:>@])], - [lt_p=${PACKAGE-default} - case $withval in - yes|no) pic_mode=$withval ;; - *) - pic_mode=default - # Look at the argument we got. We use all the common list separators. - lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, - for lt_pkg in $withval; do - IFS=$lt_save_ifs - if test "X$lt_pkg" = "X$lt_p"; then - pic_mode=yes - fi - done - IFS=$lt_save_ifs - ;; - esac], - [pic_mode=m4_default([$1], [default])]) - -_LT_DECL([], [pic_mode], [0], [What type of objects to build])dnl -])# _LT_WITH_PIC - -LT_OPTION_DEFINE([LT_INIT], [pic-only], [_LT_WITH_PIC([yes])]) -LT_OPTION_DEFINE([LT_INIT], [no-pic], [_LT_WITH_PIC([no])]) - -# Old name: -AU_DEFUN([AC_LIBTOOL_PICMODE], -[_LT_SET_OPTION([LT_INIT], [pic-only]) -AC_DIAGNOSE([obsolete], -[$0: Remove this warning and the call to _LT_SET_OPTION when you -put the 'pic-only' option into LT_INIT's first parameter.]) -]) - -dnl aclocal-1.4 backwards compatibility: -dnl AC_DEFUN([AC_LIBTOOL_PICMODE], []) - - -m4_define([_LTDL_MODE], []) -LT_OPTION_DEFINE([LTDL_INIT], [nonrecursive], - [m4_define([_LTDL_MODE], [nonrecursive])]) -LT_OPTION_DEFINE([LTDL_INIT], [recursive], - [m4_define([_LTDL_MODE], [recursive])]) -LT_OPTION_DEFINE([LTDL_INIT], [subproject], - [m4_define([_LTDL_MODE], [subproject])]) - -m4_define([_LTDL_TYPE], []) -LT_OPTION_DEFINE([LTDL_INIT], [installable], - [m4_define([_LTDL_TYPE], [installable])]) -LT_OPTION_DEFINE([LTDL_INIT], [convenience], - [m4_define([_LTDL_TYPE], [convenience])]) - -# ltsugar.m4 -- libtool m4 base layer. -*-Autoconf-*- -# -# Copyright (C) 2004-2005, 2007-2008, 2011-2015 Free Software -# Foundation, Inc. -# Written by Gary V. Vaughan, 2004 -# -# This file is free software; the Free Software Foundation gives -# unlimited permission to copy and/or distribute it, with or without -# modifications, as long as this notice is preserved. - -# serial 6 ltsugar.m4 - -# This is to help aclocal find these macros, as it can't see m4_define. -AC_DEFUN([LTSUGAR_VERSION], [m4_if([0.1])]) - - -# lt_join(SEP, ARG1, [ARG2...]) -# ----------------------------- -# Produce ARG1SEPARG2...SEPARGn, omitting [] arguments and their -# associated separator. -# Needed until we can rely on m4_join from Autoconf 2.62, since all earlier -# versions in m4sugar had bugs. -m4_define([lt_join], -[m4_if([$#], [1], [], - [$#], [2], [[$2]], - [m4_if([$2], [], [], [[$2]_])$0([$1], m4_shift(m4_shift($@)))])]) -m4_define([_lt_join], -[m4_if([$#$2], [2], [], - [m4_if([$2], [], [], [[$1$2]])$0([$1], m4_shift(m4_shift($@)))])]) - - -# lt_car(LIST) -# lt_cdr(LIST) -# ------------ -# Manipulate m4 lists. -# These macros are necessary as long as will still need to support -# Autoconf-2.59, which quotes differently. -m4_define([lt_car], [[$1]]) -m4_define([lt_cdr], -[m4_if([$#], 0, [m4_fatal([$0: cannot be called without arguments])], - [$#], 1, [], - [m4_dquote(m4_shift($@))])]) -m4_define([lt_unquote], $1) - - -# lt_append(MACRO-NAME, STRING, [SEPARATOR]) -# ------------------------------------------ -# Redefine MACRO-NAME to hold its former content plus 'SEPARATOR''STRING'. -# Note that neither SEPARATOR nor STRING are expanded; they are appended -# to MACRO-NAME as is (leaving the expansion for when MACRO-NAME is invoked). -# No SEPARATOR is output if MACRO-NAME was previously undefined (different -# than defined and empty). -# -# This macro is needed until we can rely on Autoconf 2.62, since earlier -# versions of m4sugar mistakenly expanded SEPARATOR but not STRING. -m4_define([lt_append], -[m4_define([$1], - m4_ifdef([$1], [m4_defn([$1])[$3]])[$2])]) - - - -# lt_combine(SEP, PREFIX-LIST, INFIX, SUFFIX1, [SUFFIX2...]) -# ---------------------------------------------------------- -# Produce a SEP delimited list of all paired combinations of elements of -# PREFIX-LIST with SUFFIX1 through SUFFIXn. Each element of the list -# has the form PREFIXmINFIXSUFFIXn. -# Needed until we can rely on m4_combine added in Autoconf 2.62. -m4_define([lt_combine], -[m4_if(m4_eval([$# > 3]), [1], - [m4_pushdef([_Lt_sep], [m4_define([_Lt_sep], m4_defn([lt_car]))])]]dnl -[[m4_foreach([_Lt_prefix], [$2], - [m4_foreach([_Lt_suffix], - ]m4_dquote(m4_dquote(m4_shift(m4_shift(m4_shift($@)))))[, - [_Lt_sep([$1])[]m4_defn([_Lt_prefix])[$3]m4_defn([_Lt_suffix])])])])]) - - -# lt_if_append_uniq(MACRO-NAME, VARNAME, [SEPARATOR], [UNIQ], [NOT-UNIQ]) -# ----------------------------------------------------------------------- -# Iff MACRO-NAME does not yet contain VARNAME, then append it (delimited -# by SEPARATOR if supplied) and expand UNIQ, else NOT-UNIQ. -m4_define([lt_if_append_uniq], -[m4_ifdef([$1], - [m4_if(m4_index([$3]m4_defn([$1])[$3], [$3$2$3]), [-1], - [lt_append([$1], [$2], [$3])$4], - [$5])], - [lt_append([$1], [$2], [$3])$4])]) - - -# lt_dict_add(DICT, KEY, VALUE) -# ----------------------------- -m4_define([lt_dict_add], -[m4_define([$1($2)], [$3])]) - - -# lt_dict_add_subkey(DICT, KEY, SUBKEY, VALUE) -# -------------------------------------------- -m4_define([lt_dict_add_subkey], -[m4_define([$1($2:$3)], [$4])]) - - -# lt_dict_fetch(DICT, KEY, [SUBKEY]) -# ---------------------------------- -m4_define([lt_dict_fetch], -[m4_ifval([$3], - m4_ifdef([$1($2:$3)], [m4_defn([$1($2:$3)])]), - m4_ifdef([$1($2)], [m4_defn([$1($2)])]))]) - - -# lt_if_dict_fetch(DICT, KEY, [SUBKEY], VALUE, IF-TRUE, [IF-FALSE]) -# ----------------------------------------------------------------- -m4_define([lt_if_dict_fetch], -[m4_if(lt_dict_fetch([$1], [$2], [$3]), [$4], - [$5], - [$6])]) - - -# lt_dict_filter(DICT, [SUBKEY], VALUE, [SEPARATOR], KEY, [...]) -# -------------------------------------------------------------- -m4_define([lt_dict_filter], -[m4_if([$5], [], [], - [lt_join(m4_quote(m4_default([$4], [[, ]])), - lt_unquote(m4_split(m4_normalize(m4_foreach(_Lt_key, lt_car([m4_shiftn(4, $@)]), - [lt_if_dict_fetch([$1], _Lt_key, [$2], [$3], [_Lt_key ])])))))])[]dnl -]) - -# ltversion.m4 -- version numbers -*- Autoconf -*- -# -# Copyright (C) 2004, 2011-2015 Free Software Foundation, Inc. -# Written by Scott James Remnant, 2004 -# -# This file is free software; the Free Software Foundation gives -# unlimited permission to copy and/or distribute it, with or without -# modifications, as long as this notice is preserved. - -# @configure_input@ - -# serial 4179 ltversion.m4 -# This file is part of GNU Libtool - -m4_define([LT_PACKAGE_VERSION], [2.4.6]) -m4_define([LT_PACKAGE_REVISION], [2.4.6]) - -AC_DEFUN([LTVERSION_VERSION], -[macro_version='2.4.6' -macro_revision='2.4.6' -_LT_DECL(, macro_version, 0, [Which release of libtool.m4 was used?]) -_LT_DECL(, macro_revision, 0) -]) - -# lt~obsolete.m4 -- aclocal satisfying obsolete definitions. -*-Autoconf-*- -# -# Copyright (C) 2004-2005, 2007, 2009, 2011-2015 Free Software -# Foundation, Inc. -# Written by Scott James Remnant, 2004. -# -# This file is free software; the Free Software Foundation gives -# unlimited permission to copy and/or distribute it, with or without -# modifications, as long as this notice is preserved. - -# serial 5 lt~obsolete.m4 - -# These exist entirely to fool aclocal when bootstrapping libtool. -# -# In the past libtool.m4 has provided macros via AC_DEFUN (or AU_DEFUN), -# which have later been changed to m4_define as they aren't part of the -# exported API, or moved to Autoconf or Automake where they belong. -# -# The trouble is, aclocal is a bit thick. It'll see the old AC_DEFUN -# in /usr/share/aclocal/libtool.m4 and remember it, then when it sees us -# using a macro with the same name in our local m4/libtool.m4 it'll -# pull the old libtool.m4 in (it doesn't see our shiny new m4_define -# and doesn't know about Autoconf macros at all.) -# -# So we provide this file, which has a silly filename so it's always -# included after everything else. This provides aclocal with the -# AC_DEFUNs it wants, but when m4 processes it, it doesn't do anything -# because those macros already exist, or will be overwritten later. -# We use AC_DEFUN over AU_DEFUN for compatibility with aclocal-1.6. -# -# Anytime we withdraw an AC_DEFUN or AU_DEFUN, remember to add it here. -# Yes, that means every name once taken will need to remain here until -# we give up compatibility with versions before 1.7, at which point -# we need to keep only those names which we still refer to. - -# This is to help aclocal find these macros, as it can't see m4_define. -AC_DEFUN([LTOBSOLETE_VERSION], [m4_if([1])]) - -m4_ifndef([AC_LIBTOOL_LINKER_OPTION], [AC_DEFUN([AC_LIBTOOL_LINKER_OPTION])]) -m4_ifndef([AC_PROG_EGREP], [AC_DEFUN([AC_PROG_EGREP])]) -m4_ifndef([_LT_AC_PROG_ECHO_BACKSLASH], [AC_DEFUN([_LT_AC_PROG_ECHO_BACKSLASH])]) -m4_ifndef([_LT_AC_SHELL_INIT], [AC_DEFUN([_LT_AC_SHELL_INIT])]) -m4_ifndef([_LT_AC_SYS_LIBPATH_AIX], [AC_DEFUN([_LT_AC_SYS_LIBPATH_AIX])]) -m4_ifndef([_LT_PROG_LTMAIN], [AC_DEFUN([_LT_PROG_LTMAIN])]) -m4_ifndef([_LT_AC_TAGVAR], [AC_DEFUN([_LT_AC_TAGVAR])]) -m4_ifndef([AC_LTDL_ENABLE_INSTALL], [AC_DEFUN([AC_LTDL_ENABLE_INSTALL])]) -m4_ifndef([AC_LTDL_PREOPEN], [AC_DEFUN([AC_LTDL_PREOPEN])]) -m4_ifndef([_LT_AC_SYS_COMPILER], [AC_DEFUN([_LT_AC_SYS_COMPILER])]) -m4_ifndef([_LT_AC_LOCK], [AC_DEFUN([_LT_AC_LOCK])]) -m4_ifndef([AC_LIBTOOL_SYS_OLD_ARCHIVE], [AC_DEFUN([AC_LIBTOOL_SYS_OLD_ARCHIVE])]) -m4_ifndef([_LT_AC_TRY_DLOPEN_SELF], [AC_DEFUN([_LT_AC_TRY_DLOPEN_SELF])]) -m4_ifndef([AC_LIBTOOL_PROG_CC_C_O], [AC_DEFUN([AC_LIBTOOL_PROG_CC_C_O])]) -m4_ifndef([AC_LIBTOOL_SYS_HARD_LINK_LOCKS], [AC_DEFUN([AC_LIBTOOL_SYS_HARD_LINK_LOCKS])]) -m4_ifndef([AC_LIBTOOL_OBJDIR], [AC_DEFUN([AC_LIBTOOL_OBJDIR])]) -m4_ifndef([AC_LTDL_OBJDIR], [AC_DEFUN([AC_LTDL_OBJDIR])]) -m4_ifndef([AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH], [AC_DEFUN([AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH])]) -m4_ifndef([AC_LIBTOOL_SYS_LIB_STRIP], [AC_DEFUN([AC_LIBTOOL_SYS_LIB_STRIP])]) -m4_ifndef([AC_PATH_MAGIC], [AC_DEFUN([AC_PATH_MAGIC])]) -m4_ifndef([AC_PROG_LD_GNU], [AC_DEFUN([AC_PROG_LD_GNU])]) -m4_ifndef([AC_PROG_LD_RELOAD_FLAG], [AC_DEFUN([AC_PROG_LD_RELOAD_FLAG])]) -m4_ifndef([AC_DEPLIBS_CHECK_METHOD], [AC_DEFUN([AC_DEPLIBS_CHECK_METHOD])]) -m4_ifndef([AC_LIBTOOL_PROG_COMPILER_NO_RTTI], [AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_NO_RTTI])]) -m4_ifndef([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE], [AC_DEFUN([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE])]) -m4_ifndef([AC_LIBTOOL_PROG_COMPILER_PIC], [AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_PIC])]) -m4_ifndef([AC_LIBTOOL_PROG_LD_SHLIBS], [AC_DEFUN([AC_LIBTOOL_PROG_LD_SHLIBS])]) -m4_ifndef([AC_LIBTOOL_POSTDEP_PREDEP], [AC_DEFUN([AC_LIBTOOL_POSTDEP_PREDEP])]) -m4_ifndef([LT_AC_PROG_EGREP], [AC_DEFUN([LT_AC_PROG_EGREP])]) -m4_ifndef([LT_AC_PROG_SED], [AC_DEFUN([LT_AC_PROG_SED])]) -m4_ifndef([_LT_CC_BASENAME], [AC_DEFUN([_LT_CC_BASENAME])]) -m4_ifndef([_LT_COMPILER_BOILERPLATE], [AC_DEFUN([_LT_COMPILER_BOILERPLATE])]) -m4_ifndef([_LT_LINKER_BOILERPLATE], [AC_DEFUN([_LT_LINKER_BOILERPLATE])]) -m4_ifndef([_AC_PROG_LIBTOOL], [AC_DEFUN([_AC_PROG_LIBTOOL])]) -m4_ifndef([AC_LIBTOOL_SETUP], [AC_DEFUN([AC_LIBTOOL_SETUP])]) -m4_ifndef([_LT_AC_CHECK_DLFCN], [AC_DEFUN([_LT_AC_CHECK_DLFCN])]) -m4_ifndef([AC_LIBTOOL_SYS_DYNAMIC_LINKER], [AC_DEFUN([AC_LIBTOOL_SYS_DYNAMIC_LINKER])]) -m4_ifndef([_LT_AC_TAGCONFIG], [AC_DEFUN([_LT_AC_TAGCONFIG])]) -m4_ifndef([AC_DISABLE_FAST_INSTALL], [AC_DEFUN([AC_DISABLE_FAST_INSTALL])]) -m4_ifndef([_LT_AC_LANG_CXX], [AC_DEFUN([_LT_AC_LANG_CXX])]) -m4_ifndef([_LT_AC_LANG_F77], [AC_DEFUN([_LT_AC_LANG_F77])]) -m4_ifndef([_LT_AC_LANG_GCJ], [AC_DEFUN([_LT_AC_LANG_GCJ])]) -m4_ifndef([AC_LIBTOOL_LANG_C_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_C_CONFIG])]) -m4_ifndef([_LT_AC_LANG_C_CONFIG], [AC_DEFUN([_LT_AC_LANG_C_CONFIG])]) -m4_ifndef([AC_LIBTOOL_LANG_CXX_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_CXX_CONFIG])]) -m4_ifndef([_LT_AC_LANG_CXX_CONFIG], [AC_DEFUN([_LT_AC_LANG_CXX_CONFIG])]) -m4_ifndef([AC_LIBTOOL_LANG_F77_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_F77_CONFIG])]) -m4_ifndef([_LT_AC_LANG_F77_CONFIG], [AC_DEFUN([_LT_AC_LANG_F77_CONFIG])]) -m4_ifndef([AC_LIBTOOL_LANG_GCJ_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_GCJ_CONFIG])]) -m4_ifndef([_LT_AC_LANG_GCJ_CONFIG], [AC_DEFUN([_LT_AC_LANG_GCJ_CONFIG])]) -m4_ifndef([AC_LIBTOOL_LANG_RC_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_RC_CONFIG])]) -m4_ifndef([_LT_AC_LANG_RC_CONFIG], [AC_DEFUN([_LT_AC_LANG_RC_CONFIG])]) -m4_ifndef([AC_LIBTOOL_CONFIG], [AC_DEFUN([AC_LIBTOOL_CONFIG])]) -m4_ifndef([_LT_AC_FILE_LTDLL_C], [AC_DEFUN([_LT_AC_FILE_LTDLL_C])]) -m4_ifndef([_LT_REQUIRED_DARWIN_CHECKS], [AC_DEFUN([_LT_REQUIRED_DARWIN_CHECKS])]) -m4_ifndef([_LT_AC_PROG_CXXCPP], [AC_DEFUN([_LT_AC_PROG_CXXCPP])]) -m4_ifndef([_LT_PREPARE_SED_QUOTE_VARS], [AC_DEFUN([_LT_PREPARE_SED_QUOTE_VARS])]) -m4_ifndef([_LT_PROG_ECHO_BACKSLASH], [AC_DEFUN([_LT_PROG_ECHO_BACKSLASH])]) -m4_ifndef([_LT_PROG_F77], [AC_DEFUN([_LT_PROG_F77])]) -m4_ifndef([_LT_PROG_FC], [AC_DEFUN([_LT_PROG_FC])]) -m4_ifndef([_LT_PROG_CXX], [AC_DEFUN([_LT_PROG_CXX])]) - diff --git a/art/icon-243x273.gif b/art/icon-243x273.gif new file mode 100644 index 0000000000..e1cdfd0b51 Binary files /dev/null and b/art/icon-243x273.gif differ diff --git a/art/icon-80x90.gif b/art/icon-80x90.gif new file mode 100644 index 0000000000..ebb2390005 Binary files /dev/null and b/art/icon-80x90.gif differ diff --git a/art/sqlite370.svg b/art/sqlite370.svg new file mode 100644 index 0000000000..9a050b593d --- /dev/null +++ b/art/sqlite370.svg @@ -0,0 +1,104 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/auto.def b/auto.def new file mode 100644 index 0000000000..214ef22304 --- /dev/null +++ b/auto.def @@ -0,0 +1,69 @@ +#!/do/not/tclsh +# ^^^ help out editors which guess this file's content type. +# +# This is the main autosetup-compatible configure script for the +# SQLite project. +# +# This script and all of its dependencies must be kept compatible with +# JimTCL, a copy of which is included in this source tree as +# ./autosetup/jimsh0.c. The number of incompatibilities between +# canonical TCL and JimTCL is very low and alternative formulations of +# incompatible constructs have, so far, been easy to find. +# +# JimTCL: https://jim.tcl.tk +# +# Code-diver notes: APIs names starting with "sqlite-" are specific to +# this project and can be found in autosetup/sqlite-config.tcl. Names +# starting with "proj-" are project-agnostic and found in +# autosetup/proj.tcl. +# +use sqlite-config +sqlite-configure canonical { + proj-if-opt-truthy dev { + # --enable-dev needs to come early so that the downstream tests + # which check for the following flags use their updated state. + proj-opt-set all 1 + proj-opt-set debug 1 + proj-opt-set amalgamation 0 + define CFLAGS [get-env CFLAGS {-O0 -g}] + # -------------^^^^^^^ intentionally using [get-env] instead of + # [proj-get-env] here because [sqlite-setup-default-cflags] uses + # [proj-get-env] and we want this to supercede that. + sqlite-munge-cflags; # straighten out -DSQLITE_ENABLE/OMIT flags + } + sqlite-handle-debug ;# must come after --dev flag check + sqlite-check-common-bins ;# must come before [sqlite-handle-wasi-sdk] + sqlite-handle-wasi-sdk ;# must run relatively early, as it changes the environment + sqlite-check-common-system-deps + + proj-define-for-opt amalgamation USE_AMALGAMATION "Use amalgamation for builds?" + + proj-define-for-opt gcov USE_GCOV "Use gcov?" + + proj-define-for-opt test-status TSTRNNR_OPTS \ + "test-runner flags:" {--status} {} + + proj-define-for-opt linemacros AMALGAMATION_LINE_MACROS \ + "Use #line macros in the amalgamation:" + + define AMALGAMATION_EXTRA_SRC \ + [join [opt-val amalgamation-extra-src ""] " "] + + define LINK_TOOLS_DYNAMICALLY [proj-opt-was-provided dynlink-tools] + + if {[set fsan [join [opt-val asan-fsanitize] ","]] in {auto ""}} { + set fsan address,bounds-strict + } + define CFLAGS_ASAN_FSANITIZE [proj-check-fsanitize [split $fsan ", "]] + + sqlite-handle-tcl + sqlite-handle-emsdk + + proj-if-opt-truthy static-shells { + proj-opt-set static-tclsqlite3 1 + proj-opt-set static-cli-shell 1 + } + proj-define-for-opt static-tclsqlite3 STATIC_TCLSQLITE3 "Statically link tclsqlite3?" + proj-define-for-opt static-cli-shell STATIC_CLI_SHELL "Statically link CLI shell?" + +}; # sqlite-configure diff --git a/autoconf/INSTALL b/autoconf/INSTALL deleted file mode 100644 index a1e89e18ad..0000000000 --- a/autoconf/INSTALL +++ /dev/null @@ -1,370 +0,0 @@ -Installation Instructions -************************* - -Copyright (C) 1994-1996, 1999-2002, 2004-2011 Free Software Foundation, -Inc. - - Copying and distribution of this file, with or without modification, -are permitted in any medium without royalty provided the copyright -notice and this notice are preserved. This file is offered as-is, -without warranty of any kind. - -Basic Installation -================== - - Briefly, the shell commands `./configure; make; make install' should -configure, build, and install this package. The following -more-detailed instructions are generic; see the `README' file for -instructions specific to this package. Some packages provide this -`INSTALL' file but do not implement all of the features documented -below. The lack of an optional feature in a given package is not -necessarily a bug. More recommendations for GNU packages can be found -in *note Makefile Conventions: (standards)Makefile Conventions. - - The `configure' shell script attempts to guess correct values for -various system-dependent variables used during compilation. It uses -those values to create a `Makefile' in each directory of the package. -It may also create one or more `.h' files containing system-dependent -definitions. Finally, it creates a shell script `config.status' that -you can run in the future to recreate the current configuration, and a -file `config.log' containing compiler output (useful mainly for -debugging `configure'). - - It can also use an optional file (typically called `config.cache' -and enabled with `--cache-file=config.cache' or simply `-C') that saves -the results of its tests to speed up reconfiguring. Caching is -disabled by default to prevent problems with accidental use of stale -cache files. - - If you need to do unusual things to compile the package, please try -to figure out how `configure' could check whether to do them, and mail -diffs or instructions to the address given in the `README' so they can -be considered for the next release. If you are using the cache, and at -some point `config.cache' contains results you don't want to keep, you -may remove or edit it. - - The file `configure.ac' (or `configure.in') is used to create -`configure' by a program called `autoconf'. You need `configure.ac' if -you want to change it or regenerate `configure' using a newer version -of `autoconf'. - - The simplest way to compile this package is: - - 1. `cd' to the directory containing the package's source code and type - `./configure' to configure the package for your system. - - Running `configure' might take a while. While running, it prints - some messages telling which features it is checking for. - - 2. Type `make' to compile the package. - - 3. Optionally, type `make check' to run any self-tests that come with - the package, generally using the just-built uninstalled binaries. - - 4. Type `make install' to install the programs and any data files and - documentation. When installing into a prefix owned by root, it is - recommended that the package be configured and built as a regular - user, and only the `make install' phase executed with root - privileges. - - 5. Optionally, type `make installcheck' to repeat any self-tests, but - this time using the binaries in their final installed location. - This target does not install anything. Running this target as a - regular user, particularly if the prior `make install' required - root privileges, verifies that the installation completed - correctly. - - 6. You can remove the program binaries and object files from the - source code directory by typing `make clean'. To also remove the - files that `configure' created (so you can compile the package for - a different kind of computer), type `make distclean'. There is - also a `make maintainer-clean' target, but that is intended mainly - for the package's developers. If you use it, you may have to get - all sorts of other programs in order to regenerate files that came - with the distribution. - - 7. Often, you can also type `make uninstall' to remove the installed - files again. In practice, not all packages have tested that - uninstallation works correctly, even though it is required by the - GNU Coding Standards. - - 8. Some packages, particularly those that use Automake, provide `make - distcheck', which can by used by developers to test that all other - targets like `make install' and `make uninstall' work correctly. - This target is generally not run by end users. - -Compilers and Options -===================== - - Some systems require unusual options for compilation or linking that -the `configure' script does not know about. Run `./configure --help' -for details on some of the pertinent environment variables. - - You can give `configure' initial values for configuration parameters -by setting variables in the command line or in the environment. Here -is an example: - - ./configure CC=c99 CFLAGS=-g LIBS=-lposix - - *Note Defining Variables::, for more details. - -Compiling For Multiple Architectures -==================================== - - You can compile the package for more than one kind of computer at the -same time, by placing the object files for each architecture in their -own directory. To do this, you can use GNU `make'. `cd' to the -directory where you want the object files and executables to go and run -the `configure' script. `configure' automatically checks for the -source code in the directory that `configure' is in and in `..'. This -is known as a "VPATH" build. - - With a non-GNU `make', it is safer to compile the package for one -architecture at a time in the source code directory. After you have -installed the package for one architecture, use `make distclean' before -reconfiguring for another architecture. - - On MacOS X 10.5 and later systems, you can create libraries and -executables that work on multiple system types--known as "fat" or -"universal" binaries--by specifying multiple `-arch' options to the -compiler but only a single `-arch' option to the preprocessor. Like -this: - - ./configure CC="gcc -arch i386 -arch x86_64 -arch ppc -arch ppc64" \ - CXX="g++ -arch i386 -arch x86_64 -arch ppc -arch ppc64" \ - CPP="gcc -E" CXXCPP="g++ -E" - - This is not guaranteed to produce working output in all cases, you -may have to build one architecture at a time and combine the results -using the `lipo' tool if you have problems. - -Installation Names -================== - - By default, `make install' installs the package's commands under -`/usr/local/bin', include files under `/usr/local/include', etc. You -can specify an installation prefix other than `/usr/local' by giving -`configure' the option `--prefix=PREFIX', where PREFIX must be an -absolute file name. - - You can specify separate installation prefixes for -architecture-specific files and architecture-independent files. If you -pass the option `--exec-prefix=PREFIX' to `configure', the package uses -PREFIX as the prefix for installing programs and libraries. -Documentation and other data files still use the regular prefix. - - In addition, if you use an unusual directory layout you can give -options like `--bindir=DIR' to specify different values for particular -kinds of files. Run `configure --help' for a list of the directories -you can set and what kinds of files go in them. In general, the -default for these options is expressed in terms of `${prefix}', so that -specifying just `--prefix' will affect all of the other directory -specifications that were not explicitly provided. - - The most portable way to affect installation locations is to pass the -correct locations to `configure'; however, many packages provide one or -both of the following shortcuts of passing variable assignments to the -`make install' command line to change installation locations without -having to reconfigure or recompile. - - The first method involves providing an override variable for each -affected directory. For example, `make install -prefix=/alternate/directory' will choose an alternate location for all -directory configuration variables that were expressed in terms of -`${prefix}'. Any directories that were specified during `configure', -but not in terms of `${prefix}', must each be overridden at install -time for the entire installation to be relocated. The approach of -makefile variable overrides for each directory variable is required by -the GNU Coding Standards, and ideally causes no recompilation. -However, some platforms have known limitations with the semantics of -shared libraries that end up requiring recompilation when using this -method, particularly noticeable in packages that use GNU Libtool. - - The second method involves providing the `DESTDIR' variable. For -example, `make install DESTDIR=/alternate/directory' will prepend -`/alternate/directory' before all installation names. The approach of -`DESTDIR' overrides is not required by the GNU Coding Standards, and -does not work on platforms that have drive letters. On the other hand, -it does better at avoiding recompilation issues, and works well even -when some directory options were not specified in terms of `${prefix}' -at `configure' time. - -Optional Features -================= - - If the package supports it, you can cause programs to be installed -with an extra prefix or suffix on their names by giving `configure' the -option `--program-prefix=PREFIX' or `--program-suffix=SUFFIX'. - - Some packages pay attention to `--enable-FEATURE' options to -`configure', where FEATURE indicates an optional part of the package. -They may also pay attention to `--with-PACKAGE' options, where PACKAGE -is something like `gnu-as' or `x' (for the X Window System). The -`README' should mention any `--enable-' and `--with-' options that the -package recognizes. - - For packages that use the X Window System, `configure' can usually -find the X include and library files automatically, but if it doesn't, -you can use the `configure' options `--x-includes=DIR' and -`--x-libraries=DIR' to specify their locations. - - Some packages offer the ability to configure how verbose the -execution of `make' will be. For these packages, running `./configure ---enable-silent-rules' sets the default to minimal output, which can be -overridden with `make V=1'; while running `./configure ---disable-silent-rules' sets the default to verbose, which can be -overridden with `make V=0'. - -Particular systems -================== - - On HP-UX, the default C compiler is not ANSI C compatible. If GNU -CC is not installed, it is recommended to use the following options in -order to use an ANSI C compiler: - - ./configure CC="cc -Ae -D_XOPEN_SOURCE=500" - -and if that doesn't work, install pre-built binaries of GCC for HP-UX. - - HP-UX `make' updates targets which have the same time stamps as -their prerequisites, which makes it generally unusable when shipped -generated files such as `configure' are involved. Use GNU `make' -instead. - - On OSF/1 a.k.a. Tru64, some versions of the default C compiler cannot -parse its `' header file. The option `-nodtk' can be used as -a workaround. If GNU CC is not installed, it is therefore recommended -to try - - ./configure CC="cc" - -and if that doesn't work, try - - ./configure CC="cc -nodtk" - - On Solaris, don't put `/usr/ucb' early in your `PATH'. This -directory contains several dysfunctional programs; working variants of -these programs are available in `/usr/bin'. So, if you need `/usr/ucb' -in your `PATH', put it _after_ `/usr/bin'. - - On Haiku, software installed for all users goes in `/boot/common', -not `/usr/local'. It is recommended to use the following options: - - ./configure --prefix=/boot/common - -Specifying the System Type -========================== - - There may be some features `configure' cannot figure out -automatically, but needs to determine by the type of machine the package -will run on. Usually, assuming the package is built to be run on the -_same_ architectures, `configure' can figure that out, but if it prints -a message saying it cannot guess the machine type, give it the -`--build=TYPE' option. TYPE can either be a short name for the system -type, such as `sun4', or a canonical name which has the form: - - CPU-COMPANY-SYSTEM - -where SYSTEM can have one of these forms: - - OS - KERNEL-OS - - See the file `config.sub' for the possible values of each field. If -`config.sub' isn't included in this package, then this package doesn't -need to know the machine type. - - If you are _building_ compiler tools for cross-compiling, you should -use the option `--target=TYPE' to select the type of system they will -produce code for. - - If you want to _use_ a cross compiler, that generates code for a -platform different from the build platform, you should specify the -"host" platform (i.e., that on which the generated programs will -eventually be run) with `--host=TYPE'. - -Sharing Defaults -================ - - If you want to set default values for `configure' scripts to share, -you can create a site shell script called `config.site' that gives -default values for variables like `CC', `cache_file', and `prefix'. -`configure' looks for `PREFIX/share/config.site' if it exists, then -`PREFIX/etc/config.site' if it exists. Or, you can set the -`CONFIG_SITE' environment variable to the location of the site script. -A warning: not all `configure' scripts look for a site script. - -Defining Variables -================== - - Variables not defined in a site shell script can be set in the -environment passed to `configure'. However, some packages may run -configure again during the build, and the customized values of these -variables may be lost. In order to avoid this problem, you should set -them in the `configure' command line, using `VAR=value'. For example: - - ./configure CC=/usr/local2/bin/gcc - -causes the specified `gcc' to be used as the C compiler (unless it is -overridden in the site shell script). - -Unfortunately, this technique does not work for `CONFIG_SHELL' due to -an Autoconf bug. Until the bug is fixed you can use this workaround: - - CONFIG_SHELL=/bin/bash /bin/bash ./configure CONFIG_SHELL=/bin/bash - -`configure' Invocation -====================== - - `configure' recognizes the following options to control how it -operates. - -`--help' -`-h' - Print a summary of all of the options to `configure', and exit. - -`--help=short' -`--help=recursive' - Print a summary of the options unique to this package's - `configure', and exit. The `short' variant lists options used - only in the top level, while the `recursive' variant lists options - also present in any nested packages. - -`--version' -`-V' - Print the version of Autoconf used to generate the `configure' - script, and exit. - -`--cache-file=FILE' - Enable the cache: use and save the results of the tests in FILE, - traditionally `config.cache'. FILE defaults to `/dev/null' to - disable caching. - -`--config-cache' -`-C' - Alias for `--cache-file=config.cache'. - -`--quiet' -`--silent' -`-q' - Do not print messages saying which checks are being made. To - suppress all normal output, redirect it to `/dev/null' (any error - messages will still be shown). - -`--srcdir=DIR' - Look for the package's source code in directory DIR. Usually - `configure' can determine that directory automatically. - -`--prefix=DIR' - Use DIR as the installation prefix. *note Installation Names:: - for more details, including other options available for fine-tuning - the installation locations. - -`--no-create' -`-n' - Run the configure checks, but stop before creating any output - files. - -`configure' also accepts some other, not widely useful, options. Run -`configure --help' for more details. - diff --git a/autoconf/Makefile.am b/autoconf/Makefile.am deleted file mode 100644 index 694419b27d..0000000000 --- a/autoconf/Makefile.am +++ /dev/null @@ -1,20 +0,0 @@ - -AM_CFLAGS = @BUILD_CFLAGS@ -lib_LTLIBRARIES = libsqlite3.la -libsqlite3_la_SOURCES = sqlite3.c -libsqlite3_la_LDFLAGS = -no-undefined -version-info 8:6:8 - -bin_PROGRAMS = sqlite3 -sqlite3_SOURCES = shell.c sqlite3.h -EXTRA_sqlite3_SOURCES = sqlite3.c -sqlite3_LDADD = @EXTRA_SHELL_OBJ@ @READLINE_LIBS@ -sqlite3_DEPENDENCIES = @EXTRA_SHELL_OBJ@ -sqlite3_CFLAGS = $(AM_CFLAGS) -DSQLITE_ENABLE_EXPLAIN_COMMENTS -DSQLITE_ENABLE_DBPAGE_VTAB -DSQLITE_ENABLE_STMTVTAB -DSQLITE_ENABLE_DBSTAT_VTAB $(SHELL_CFLAGS) - -include_HEADERS = sqlite3.h sqlite3ext.h - -EXTRA_DIST = sqlite3.1 tea Makefile.msc sqlite3.rc sqlite3rc.h README.txt Replace.cs Makefile.fallback -pkgconfigdir = ${libdir}/pkgconfig -pkgconfig_DATA = sqlite3.pc - -man_MANS = sqlite3.1 diff --git a/autoconf/Makefile.in b/autoconf/Makefile.in new file mode 100644 index 0000000000..c938ffe1bf --- /dev/null +++ b/autoconf/Makefile.in @@ -0,0 +1,314 @@ +######################################################################## +# This is a main makefile for the "autoconf" bundle of SQLite. This is +# a trimmed-down version of the canonical makefile, devoid of most +# documentation. For the full docs, see /main.mk in the canonical +# source tree. +# +# Maintenance reminders: +# +# - To keep this working with an out-of-tree build, be sure to prefix +# input file names with $(TOP)/ where appropriate (which is most +# places). +# +# - The original/canonical recipes can be found in /main.mk in the +# canonical source tree. +all: + +TOP = @abs_top_srcdir@ + +PACKAGE_VERSION = @PACKAGE_VERSION@ + +# +# Filename extensions for binaries and libraries +# +B.exe = @BUILD_EXEEXT@ +T.exe = @TARGET_EXEEXT@ +B.dll = @BUILD_DLLEXT@ +T.dll = @TARGET_DLLEXT@ +B.lib = @BUILD_LIBEXT@ +T.lib = @TARGET_LIBEXT@ + +# +# Autotools-compatibility dirs +# +prefix = @prefix@ +datadir = @datadir@ +mandir = @mandir@ +includedir = @includedir@ +exec_prefix = @exec_prefix@ +bindir = @bindir@ +libdir = @libdir@ + +# +# Required binaries +# +INSTALL = @BIN_INSTALL@ +AR = @AR@ +AR.flags = cr +CC = @CC@ + + +ENABLE_LIB_SHARED = @ENABLE_LIB_SHARED@ +ENABLE_LIB_STATIC = @ENABLE_LIB_STATIC@ +HAVE_WASI_SDK = @HAVE_WASI_SDK@ + +CFLAGS = @CFLAGS@ @CPPFLAGS@ +# +# $(LDFLAGS.configure) represents any LDFLAGS=... the client passes to +# configure. See main.mk. +# +LDFLAGS.configure = @LDFLAGS@ + +CFLAGS.core = @SH_CFLAGS@ +LDFLAGS.shlib = @SH_LDFLAGS@ +LDFLAGS.zlib = @LDFLAGS_ZLIB@ +LDFLAGS.math = @LDFLAGS_MATH@ +LDFLAGS.rpath = @LDFLAGS_RPATH@ +LDFLAGS.pthread = @LDFLAGS_PTHREAD@ +LDFLAGS.dlopen = @LDFLAGS_DLOPEN@ +LDFLAGS.readline = @LDFLAGS_READLINE@ +CFLAGS.readline = @CFLAGS_READLINE@ +LDFLAGS.rt = @LDFLAGS_RT@ +LDFLAGS.icu = @LDFLAGS_ICU@ +CFLAGS.icu = @CFLAGS_ICU@ + +# INSTALL reminder: we specifically do not strip binaries, +# as discussed in https://sqlite.org/forum/forumpost/9a67df63eda9925c. +INSTALL.noexec = $(INSTALL) -m 0644 + +install-dir.bin = $(DESTDIR)$(bindir) +install-dir.lib = $(DESTDIR)$(libdir) +install-dir.include = $(DESTDIR)$(includedir) +install-dir.pkgconfig = $(DESTDIR)$(libdir)/pkgconfig +install-dir.man1 = $(DESTDIR)$(mandir)/man1 +install-dir.all = $(install-dir.bin) $(install-dir.include) \ + $(install-dir.lib) $(install-dir.man1) \ + $(install-dir.pkgconfig) +$(install-dir.all): + @if [ ! -d "$@" ]; then set -x; $(INSTALL) -d "$@"; fi +# ^^^^ on some platforms, install -d fails if the target already exists. + + +# +# Vars with the AS_ prefix are specifically related to AutoSetup. +# +# AS_AUTO_DEF is the main configure script. +# +AS_AUTO_DEF = $(TOP)/auto.def + +# +# Shell commands to re-run $(TOP)/configure with the same args it was +# invoked with to produce this makefile. +# +AS_AUTORECONFIG = @SQLITE_AUTORECONFIG@ +Makefile: $(TOP)/Makefile.in $(AS_AUTO_DEF) + $(AS_AUTORECONFIG) + @touch $@ + +sqlite3.pc: $(TOP)/sqlite3.pc.in $(AS_AUTO_DEF) + $(AS_AUTORECONFIG) + @touch $@ + +sqlite_cfg.h: $(AS_AUTO_DEF) + $(AS_AUTORECONFIG) + @touch $@ + +# +# CFLAGS for sqlite3$(T.exe) +# +SHELL_OPT ?= @OPT_SHELL@ +SHELL_OPT += -DSQLITE_DQS=0 +SHELL_OPT += -DSQLITE_ENABLE_FTS4 +#SHELL_OPT += -DSQLITE_ENABLE_FTS5 +SHELL_OPT += -DSQLITE_ENABLE_RTREE +SHELL_OPT += -DSQLITE_ENABLE_EXPLAIN_COMMENTS +SHELL_OPT += -DSQLITE_ENABLE_UNKNOWN_SQL_FUNCTION +SHELL_OPT += -DSQLITE_ENABLE_STMTVTAB +SHELL_OPT += -DSQLITE_ENABLE_DBPAGE_VTAB +SHELL_OPT += -DSQLITE_ENABLE_DBSTAT_VTAB +SHELL_OPT += -DSQLITE_ENABLE_BYTECODE_VTAB +SHELL_OPT += -DSQLITE_ENABLE_OFFSET_SQL_FUNC +SHELL_OPT += -DSQLITE_ENABLE_PERCENTILE +SHELL_OPT += -DSQLITE_STRICT_SUBTYPE=1 + +# +# Library-level feature flags +# +OPT_FEATURE_FLAGS = @OPT_FEATURE_FLAGS@ + +LDFLAGS.libsqlite3.soname = @LDFLAGS_LIBSQLITE3_SONAME@ +# soname: see https://sqlite.org/src/forumpost/5a3b44f510df8ded +LDFLAGS.libsqlite3.os-specific = \ + @LDFLAGS_MAC_CVERSION@ @LDFLAGS_MAC_INSTALL_NAME@ @LDFLAGS_OUT_IMPLIB@ + +LDFLAGS.libsqlite3 = \ + $(LDFLAGS.rpath) $(LDFLAGS.pthread) \ + $(LDFLAGS.math) $(LDFLAGS.dlopen) \ + $(LDFLAGS.zlib) $(LDFLAGS.icu) \ + $(LDFLAGS.rt) $(LDFLAGS.configure) +CFLAGS.libsqlite3 = -I. $(CFLAGS.core) $(CFLAGS.icu) $(OPT_FEATURE_FLAGS) + +sqlite3.o: $(TOP)/sqlite3.h $(TOP)/sqlite3.c + $(CC) -c $(TOP)/sqlite3.c -o $@ $(CFLAGS) $(CFLAGS.libsqlite3) + +libsqlite3.LIB = libsqlite3$(T.lib) +libsqlite3.DLL.basename = @SQLITE_DLL_BASENAME@ +libsqlite3.out.implib = @SQLITE_OUT_IMPLIB@ +libsqlite3.DLL = $(libsqlite3.DLL.basename)$(T.dll) +libsqlite3.DLL.install-rules = @SQLITE_DLL_INSTALL_RULES@ + +$(libsqlite3.DLL): sqlite3.o + $(CC) -o $@ sqlite3.o $(LDFLAGS.shlib) \ + $(LDFLAGS) $(LDFLAGS.libsqlite3) \ + $(LDFLAGS.libsqlite3.os-specific) $(LDFLAGS.libsqlite3.soname) +$(libsqlite3.DLL)-1: $(libsqlite3.DLL) +$(libsqlite3.DLL)-0: +all: $(libsqlite3.DLL)-$(ENABLE_LIB_SHARED) + +$(libsqlite3.LIB): sqlite3.o + $(AR) $(AR.flags) $@ sqlite3.o +$(libsqlite3.LIB)-1: $(libsqlite3.LIB) +$(libsqlite3.LIB)-0: +all: $(libsqlite3.LIB)-$(ENABLE_LIB_STATIC) + +# +# Maintenance reminder: the install-dll-... rules must be kept in sync +# with the main copies rom /main.mk. +# +install-dll-out-implib: $(install-dir.lib) $(libsqlite3.DLL) + if [ x != "x$(libsqlite3.out.implib)" ] && [ -f "$(libsqlite3.out.implib)" ]; then \ + $(INSTALL) $(libsqlite3.out.implib) "$(install-dir.lib)"; \ + fi + +install-dll-unix-generic: install-dll-out-implib + $(INSTALL) $(libsqlite3.DLL) "$(install-dir.lib)" + @echo "Setting up $(libsqlite3.DLL) version symlinks..."; \ + cd "$(install-dir.lib)" || exit $$?; \ + rm -f $(libsqlite3.DLL).0 $(libsqlite3.DLL).$(PACKAGE_VERSION) || exit $$?; \ + mv $(libsqlite3.DLL) $(libsqlite3.DLL).$(PACKAGE_VERSION) || exit $$?; \ + ln -s $(libsqlite3.DLL).$(PACKAGE_VERSION) $(libsqlite3.DLL) || exit $$?; \ + ln -s $(libsqlite3.DLL).$(PACKAGE_VERSION) $(libsqlite3.DLL).0 || exit $$?; \ + ls -la $(libsqlite3.DLL) $(libsqlite3.DLL).[a03]*; \ + if [ -e $(libsqlite3.DLL).0.8.6 ]; then \ + echo "ACHTUNG: legacy libtool-compatible install found. Re-linking it..."; \ + rm -f libsqlite3.la $(libsqlite3.DLL).0.8.6 || exit $$?; \ + ln -s $(libsqlite3.DLL).$(PACKAGE_VERSION) $(libsqlite3.DLL).0.8.6 || exit $$?; \ + ls -la $(libsqlite3.DLL).0.8.6; \ + elif [ x1 = "x$(INSTALL_SO_086_LINK)" ]; then \ + echo "ACHTUNG: installing legacy libtool-style links because INSTALL_SO_086_LINK=1"; \ + rm -f libsqlite3.la $(libsqlite3.DLL).0.8.6 || exit $$?; \ + ln -s $(libsqlite3.DLL).$(PACKAGE_VERSION) $(libsqlite3.DLL).0.8.6 || exit $$?; \ + ls -la $(libsqlite3.DLL).0.8.6; \ + fi + +install-dll-msys: install-dll-out-implib $(install-dir.bin) + $(INSTALL) $(libsqlite3.DLL) "$(install-dir.bin)" +# ----------------------------------------------^^^ yes, bin +# Each of {msys,mingw,cygwin} uses a different name for the DLL, but +# that is already accounted for via $(libsqlite3.DLL). +install-dll-mingw: install-dll-msys +install-dll-cygwin: install-dll-msys + +install-dll-darwin: $(install-dir.lib) $(libsqlite3.DLL) + $(INSTALL) $(libsqlite3.DLL) "$(install-dir.lib)" + @echo "Setting up $(libsqlite3.DLL) version symlinks..."; \ + cd "$(install-dir.lib)" || exit $$?; \ + rm -f libsqlite3.0$(T.dll) libsqlite3.$(PACKAGE_VERSION)$(T.dll) || exit $$?; \ + dllname=libsqlite3.$(PACKAGE_VERSION)$(T.dll); \ + mv $(libsqlite3.DLL) $$dllname || exit $$?; \ + ln -s $$dllname $(libsqlite3.DLL) || exit $$?; \ + ln -s $$dllname libsqlite3.0$(T.dll) || exit $$?; \ + ls -la $$dllname $(libsqlite3.DLL) libsqlite3.0$(T.dll) + +install-dll-1: install-dll-$(libsqlite3.DLL.install-rules) +install-dll-0 install-dll-: +install-dll: install-dll-$(ENABLE_LIB_SHARED) +install: install-dll + +install-lib-1: $(install-dir.lib) $(libsqlite3.LIB) + $(INSTALL.noexec) $(libsqlite3.LIB) "$(install-dir.lib)" +install-lib-0 install-lib-: +install-lib: install-lib-$(ENABLE_LIB_STATIC) +install: install-lib + +# +# Flags to link the shell app either directly against sqlite3.c +# (ENABLE_STATIC_SHELL==1) or libsqlite3.so (ENABLE_STATIC_SHELL==0). +# +# Maintenance reminder: placement of $(LDFLAGS) is more relevant for +# some platforms than others: +# https://sqlite.org/forum/forumpost/d80ecdaddd +ENABLE_STATIC_SHELL = @ENABLE_STATIC_SHELL@ +sqlite3-shell-link-flags.1 = $(TOP)/sqlite3.c $(LDFLAGS) $(LDFLAGS.libsqlite3) +sqlite3-shell-link-flags.0 = $(LDFLAGS) -L. -lsqlite3 $(LDFLAGS.zlib) $(LDFLAGS.math) +sqlite3-shell-deps.1 = $(TOP)/sqlite3.c +sqlite3-shell-deps.0 = $(libsqlite3.DLL) +# +# STATIC_CLI_SHELL = 1 to statically link sqlite3$(T.exe), else +# 0. Requires static versions of all requisite libraries. Primarily +# intended for use with static-friendly environments like Alpine +# Linux. +# +STATIC_CLI_SHELL = @STATIC_CLI_SHELL@ +# +# sqlite3-shell-static.flags.N = N is $(STATIC_CLI_SHELL) +# +sqlite3-shell-static.flags.1 = -static +sqlite3-shell-static.flags.0 = +sqlite3$(T.exe): $(TOP)/shell.c $(sqlite3-shell-deps.$(ENABLE_STATIC_SHELL)) + $(CC) -o $@ \ + $(TOP)/shell.c $(sqlite3-shell-link-flags.$(ENABLE_STATIC_SHELL)) \ + $(sqlite3-shell-static.flags.$(STATIC_CLI_SHELL)) \ + -I. $(OPT_FEATURE_FLAGS) $(SHELL_OPT) \ + $(CFLAGS) $(CFLAGS.readline) $(CFLAGS.icu) \ + $(LDFLAGS.readline) + +sqlite3$(T.exe)-1: +sqlite3$(T.exe)-0: sqlite3$(T.exe) +all: sqlite3$(T.exe)-$(HAVE_WASI_SDK) + +install-shell-0: sqlite3$(T.exe) $(install-dir.bin) + $(INSTALL) sqlite3$(T.exe) "$(install-dir.bin)" +install-shell-1: +install: install-shell-$(HAVE_WASI_SDK) + +install-headers: $(TOP)/sqlite3.h $(install-dir.include) + $(INSTALL.noexec) $(TOP)/sqlite3.h $(TOP)/sqlite3ext.h "$(install-dir.include)" +install: install-headers + +install-pc: sqlite3.pc $(install-dir.pkgconfig) + $(INSTALL.noexec) sqlite3.pc "$(install-dir.pkgconfig)" +install: install-pc + +install-man1: $(TOP)/sqlite3.1 $(install-dir.man1) + $(INSTALL.noexec) $(TOP)/sqlite3.1 "$(install-dir.man1)" +install: install-man1 + +clean: + rm -f *.o sqlite3$(T.exe) + rm -f $(libsqlite3.LIB) $(libsqlite3.DLL) libsqlite3$(T.dll).a + +distclean: clean + rm -f jimsh0$(T.exe) config.* sqlite3.pc sqlite_cfg.h Makefile + +DIST_FILES := \ + README.txt VERSION \ + auto.def autosetup configure tea \ + sqlite3.h sqlite3.c shell.c sqlite3ext.h \ + Makefile.in Makefile.msc Makefile.fallback \ + sqlite3.rc sqlite3rc.h Replace.cs \ + sqlite3.pc.in sqlite3.1 + +# +# Maintenance note: dist_name must be sqlite-$(PACKAGE_VERSION) so +# that tool/mkautoconfamal.sh knows how to find it. +# +dist_name = sqlite-$(PACKAGE_VERSION) +dist_tarball = $(dist_name).tar.gz +dist: + rm -fr $(dist_name) + mkdir -p $(dist_name) + cp -rp $(DIST_FILES) $(dist_name)/. + tar czf $(dist_tarball) $(dist_name) + rm -fr $(dist_name) + ls -l $(dist_tarball) diff --git a/autoconf/Makefile.msc b/autoconf/Makefile.msc index 40d0e8113d..4f96e3b189 100644 --- a/autoconf/Makefile.msc +++ b/autoconf/Makefile.msc @@ -18,6 +18,15 @@ TOP = . +# Optionally set EXTRA_SRC to a list of C files to append to +# the generated sqlite3.c. Any sqlite3 extensions added this +# way may require manual editing, as described in +# https://sqlite.org/forum/forumpost/903f721f3e7c0d25 +# +!IFNDEF EXTRA_SRC +EXTRA_SRC = +!ENDIF + # Set this non-0 to enable full warnings (-W4, etc) when compiling. # !IFNDEF USE_FULLWARN @@ -52,6 +61,21 @@ MINIMAL_AMALGAMATION = 0 USE_STDCALL = 0 !ENDIF +# Use the USE_SEH=0 option on the nmake command line to omit structured +# exception handling (SEH) support. SEH is on by default. +# +!IFNDEF USE_SEH +USE_SEH = 1 +!ENDIF + +# Use STATICALLY_LINK_TCL=1 to statically link against TCL +# +!IFNDEF STATICALLY_LINK_TCL +STATICALLY_LINK_TCL = 0 +!ELSEIF $(STATICALLY_LINK_TCL)!=0 +CCOPTS = $(CCOPTS) -DSTATIC_BUILD +!ENDIF + # Set this non-0 to have the shell executable link against the core dynamic # link library. # @@ -180,6 +204,12 @@ WIN32HEAP = 0 OSTRACE = 0 !ENDIF +# enable address sanitizer using ASAN=1 on the command-line. +# +!IFNDEF ASAN +ASAN = 0 +!ENDIF + # Set this to one of the following values to enable various debugging # features. Each level includes the debugging options from the previous # levels. Currently, the recognized values for DEBUG are: @@ -217,6 +247,12 @@ SESSION = 0 RBU = 0 !ENDIF +# Set this to non-0 to enable support for blocking locks. +# +!IFNDEF SETLK_TIMEOUT +SETLK_TIMEOUT = 0 +!ENDIF + # Set the source code file to be used by executables and libraries when # they need the amalgamation. # @@ -281,19 +317,29 @@ SQLITE3EXEPDB = /pdb:sqlite3sh.pdb # the Windows platform. # !IFNDEF OPT_FEATURE_FLAGS +OPT_FEATURE_FLAGS = $(OPT_XTRA) !IF $(MINIMAL_AMALGAMATION)==0 OPT_FEATURE_FLAGS = $(OPT_FEATURE_FLAGS) -DSQLITE_ENABLE_FTS3=1 +OPT_FEATURE_FLAGS = $(OPT_FEATURE_FLAGS) -DSQLITE_ENABLE_FTS5=1 OPT_FEATURE_FLAGS = $(OPT_FEATURE_FLAGS) -DSQLITE_ENABLE_RTREE=1 OPT_FEATURE_FLAGS = $(OPT_FEATURE_FLAGS) -DSQLITE_ENABLE_GEOPOLY=1 -OPT_FEATURE_FLAGS = $(OPT_FEATURE_FLAGS) -DSQLITE_ENABLE_JSON1=1 OPT_FEATURE_FLAGS = $(OPT_FEATURE_FLAGS) -DSQLITE_ENABLE_STMTVTAB=1 OPT_FEATURE_FLAGS = $(OPT_FEATURE_FLAGS) -DSQLITE_ENABLE_DBPAGE_VTAB=1 OPT_FEATURE_FLAGS = $(OPT_FEATURE_FLAGS) -DSQLITE_ENABLE_DBSTAT_VTAB=1 OPT_FEATURE_FLAGS = $(OPT_FEATURE_FLAGS) -DSQLITE_ENABLE_BYTECODE_VTAB=1 +OPT_FEATURE_FLAGS = $(OPT_FEATURE_FLAGS) -DSQLITE_ENABLE_CARRAY=1 !ENDIF OPT_FEATURE_FLAGS = $(OPT_FEATURE_FLAGS) -DSQLITE_ENABLE_COLUMN_METADATA=1 !ENDIF +# Additional feature-options above and beyond what are normally used can be +# be added using OPTIONS=.... on the command-line. These values are +# appended to the OPT_FEATURE_FLAGS variable. +# +!IFDEF OPTIONS +OPT_FEATURE_FLAGS = $(OPT_FEATURE_FLAGS) $(OPTIONS) +!ENDIF + # Should the session extension be enabled? If so, add compilation options # to enable it. # @@ -304,6 +350,7 @@ OPT_FEATURE_FLAGS = $(OPT_FEATURE_FLAGS) -DSQLITE_ENABLE_PREUPDATE_HOOK=1 # Always enable math functions on Windows OPT_FEATURE_FLAGS = $(OPT_FEATURE_FLAGS) -DSQLITE_ENABLE_MATH_FUNCTIONS +OPT_FEATURE_FLAGS = $(OPT_FEATURE_FLAGS) -DSQLITE_ENABLE_PERCENTILE # Should the rbu extension be enabled? If so, add compilation options # to enable it. @@ -312,6 +359,14 @@ OPT_FEATURE_FLAGS = $(OPT_FEATURE_FLAGS) -DSQLITE_ENABLE_MATH_FUNCTIONS OPT_FEATURE_FLAGS = $(OPT_FEATURE_FLAGS) -DSQLITE_ENABLE_RBU=1 !ENDIF +# Should structured exception handling (SEH) be enabled for WAL mode in +# the core library? It is on by default. Only omit it if the +# USE_SEH=0 option is provided on the nmake command-line. +# +!IF $(USE_SEH)==0 +OPT_FEATURE_FLAGS = $(OPT_FEATURE_FLAGS) -DSQLITE_OMIT_SEH=1 +!ENDIF + # These are the "extended" SQLite compilation options used when compiling for # the Windows 10 platform. # @@ -325,6 +380,10 @@ EXT_FEATURE_FLAGS = !ENDIF !ENDIF +!IF $(SETLK_TIMEOUT)!=0 +OPT_FEATURE_FLAGS = $(OPT_FEATURE_FLAGS) -DSQLITE_ENABLE_SETLK_TIMEOUT +!ENDIF + ############################################################################### ############################### END OF OPTIONS ################################ ############################################################################### @@ -502,12 +561,12 @@ RCC = $(RC) -DSQLITE_OS_WIN=1 -I. -I$(TOP) $(RCOPTS) $(RCCOPTS) # !IF $(USE_STDCALL)!=0 || $(FOR_WIN10)!=0 !IF "$(PLATFORM)"=="x86" -CORE_CCONV_OPTS = -Gz -DSQLITE_CDECL=__cdecl -DSQLITE_APICALL=__stdcall -DSQLITE_CALLBACK=__stdcall -DSQLITE_SYSAPI=__stdcall -SHELL_CCONV_OPTS = -Gz -DSQLITE_CDECL=__cdecl -DSQLITE_APICALL=__stdcall -DSQLITE_CALLBACK=__stdcall -DSQLITE_SYSAPI=__stdcall +CORE_CCONV_OPTS = -Gz -guard:cf -DSQLITE_CDECL=__cdecl -DSQLITE_APICALL=__stdcall -DSQLITE_CALLBACK=__stdcall -DSQLITE_SYSAPI=__stdcall +SHELL_CCONV_OPTS = -Gz -guard:cf -DSQLITE_CDECL=__cdecl -DSQLITE_APICALL=__stdcall -DSQLITE_CALLBACK=__stdcall -DSQLITE_SYSAPI=__stdcall !ELSE !IFNDEF PLATFORM -CORE_CCONV_OPTS = -Gz -DSQLITE_CDECL=__cdecl -DSQLITE_APICALL=__stdcall -DSQLITE_CALLBACK=__stdcall -DSQLITE_SYSAPI=__stdcall -SHELL_CCONV_OPTS = -Gz -DSQLITE_CDECL=__cdecl -DSQLITE_APICALL=__stdcall -DSQLITE_CALLBACK=__stdcall -DSQLITE_SYSAPI=__stdcall +CORE_CCONV_OPTS = -Gz -guard:cf -DSQLITE_CDECL=__cdecl -DSQLITE_APICALL=__stdcall -DSQLITE_CALLBACK=__stdcall -DSQLITE_SYSAPI=__stdcall +SHELL_CCONV_OPTS = -Gz -guard:cf -DSQLITE_CDECL=__cdecl -DSQLITE_APICALL=__stdcall -DSQLITE_CALLBACK=__stdcall -DSQLITE_SYSAPI=__stdcall !ELSE CORE_CCONV_OPTS = SHELL_CCONV_OPTS = @@ -667,7 +726,7 @@ RCC = $(RCC) -DSQLITE_ENABLE_API_ARMOR=1 !ENDIF !IF $(DEBUG)>2 -TCC = $(TCC) -DSQLITE_DEBUG=1 +TCC = $(TCC) -DSQLITE_DEBUG=1 -DSQLITE_USE_W32_FOR_CONSOLE_IO RCC = $(RCC) -DSQLITE_DEBUG=1 !IF $(DYNAMIC_SHELL)==0 TCC = $(TCC) -DSQLITE_ENABLE_WHERETRACE -DSQLITE_ENABLE_SELECTTRACE @@ -719,6 +778,13 @@ RCC = $(RCC) -DSQLITE_WIN32_MALLOC_VALIDATE=1 !ENDIF +# Address sanitizer if ASAN=1 +# +!IF $(ASAN)>0 +TCC = $(TCC) /fsanitize=address +!ENDIF + + # Compiler options needed for programs that use the readline() library. # !IFNDEF READLINE_FLAGS @@ -747,15 +813,6 @@ RCC = $(RCC) -DSQLITE_THREAD_OVERRIDE_LOCK=-1 TLIBS = !ENDIF -# Flags controlling use of the in memory btree implementation -# -# SQLITE_TEMP_STORE is 0 to force temporary tables to be in a file, 1 to -# default to file, 2 to default to memory, and 3 to force temporary -# tables to always be in memory. -# -TCC = $(TCC) -DSQLITE_TEMP_STORE=1 -RCC = $(RCC) -DSQLITE_TEMP_STORE=1 - # Enable/disable loadable extensions, and other optional features # based on configuration. (-DSQLITE_OMIT*, -DSQLITE_ENABLE*). # The same set of OMIT and ENABLE flags should be passed to the @@ -956,9 +1013,14 @@ LIBRESOBJS = # when the shell is not being dynamically linked. # !IF $(DYNAMIC_SHELL)==0 && $(FOR_WIN10)==0 +SHELL_COMPILE_OPTS = $(SHELL_COMPILE_OPTS) -DSQLITE_DQS=0 SHELL_COMPILE_OPTS = $(SHELL_COMPILE_OPTS) -DSQLITE_ENABLE_FTS4=1 SHELL_COMPILE_OPTS = $(SHELL_COMPILE_OPTS) -DSQLITE_ENABLE_EXPLAIN_COMMENTS=1 SHELL_COMPILE_OPTS = $(SHELL_COMPILE_OPTS) -DSQLITE_ENABLE_OFFSET_SQL_FUNC=1 +SHELL_COMPILE_OPTS = $(SHELL_COMPILE_OPTS) -DSQLITE_ENABLE_PERCENTILE=1 +SHELL_COMPILE_OPTS = $(SHELL_COMPILE_OPTS) -DSQLITE_ENABLE_UNKNOWN_SQL_FUNCTION=1 +SHELL_COMPILE_OPTS = $(SHELL_COMPILE_OPTS) -DSQLITE_ENABLE_STMT_SCANSTATUS=1 +SHELL_COMPILE_OPTS = $(SHELL_COMPILE_OPTS) -DSQLITE_STRICT_SUBTYPE=1 !ENDIF @@ -983,6 +1045,11 @@ dll: $(SQLITE3DLL) # shell: $(SQLITE3EXE) +# jimsh0 - replacement for tclsh +# +jimsh0.exe: $(TOP)\autosetup\jimsh0.c + cl -DHAVE__FULLPATH=1 $(TOP)\autosetup\jimsh0.c + $(SQLITE3DLL): $(LIBOBJ) $(LIBRESOBJS) $(CORE_LINK_DEP) $(LD) $(LDFLAGS) $(LTLINKOPTS) $(LTLIBPATHS) /DLL $(CORE_LINK_OPTS) /OUT:$@ $(LIBOBJ) $(LIBRESOBJS) $(LTLIBS) $(TLIBS) @@ -1033,5 +1100,6 @@ $(LIBRESOBJS): $(TOP)\sqlite3.rc rcver.vc $(SQLITE3H) clean: del /Q *.exp *.lo *.ilk *.lib *.obj *.ncb *.pdb *.sdf *.suo 2>NUL - del /Q *.bsc *.def *.cod *.da *.bb *.bbg *.vc gmon.out 2>NUL + del /Q *.bsc *.cod *.da *.bb *.bbg *.vc gmon.out 2>NUL + del /Q sqlite3.def tclsqlite3.def ctime.c pragma.h 2>NUL del /Q $(SQLITE3EXE) $(SQLITE3DLL) Replace.exe 2>NUL diff --git a/autoconf/README.first b/autoconf/README.first index 5c2ea0a70f..75c4a76d61 100644 --- a/autoconf/README.first +++ b/autoconf/README.first @@ -1,11 +1,12 @@ -This directory contains components use to build an autoconf-ready package -of the SQLite amalgamation: sqlite-autoconf-30XXXXXX.tar.gz +This directory contains components used to build an autoconf-like +package of the SQLite amalgamation: sqlite-autoconf-30XXXXXX.tar.gz -To build the autoconf amalgamation, run from the top-level: +To build the autoconf amalgamation, run from the top of the canonical +source tree: ./configure make amalgamation-tarball -The amalgamation-tarball target (also available in "main.mk") runs the -script tool/mkautoconfamal.sh which does the work. Refer to that script -for details. +The amalgamation-tarball target (available in "main.mk") runs the +script tool/mkautoconfamal.sh which does the work. Refer to that +script for details. diff --git a/autoconf/README.txt b/autoconf/README.txt index 6e62a4e138..ca0ed20fd4 100644 --- a/autoconf/README.txt +++ b/autoconf/README.txt @@ -4,13 +4,44 @@ This package contains: * the sqlite3.h and sqlite3ext.h header files that define the C-language interface to the sqlite3.c library file * the shell.c file used to build the sqlite3 command-line shell program - * autoconf/automake installation infrastucture for building on POSIX + * autoconf-like installation infrastucture for building on POSIX compliant systems * a Makefile.msc, sqlite3.rc, and Replace.cs for building with Microsoft Visual C++ on Windows -SUMMARY OF HOW TO BUILD -======================= +WHY USE THIS PACKAGE? +===================== + +The canonical make system for SQLite requires TCL as part of the build +process. Various TCL scripts are used to generate parts of the code and +TCL is used to run tests. But some people would prefer to build SQLite +using only generic tools and without having to install TCL. The purpose +of this package is to provide that capability. + +This package contains a pre-build SQLite amalgamation file "sqlite3.c" +(and its associated header file "sqlite3.h"). Because the +amalgamation has been pre-built, no TCL is required for the code +generate (the configure script itself is written in TCL but it can use +the embedded copy of JimTCL). + +REASONS TO USE THE CANONICAL BUILD SYSTEM RATHER THAN THIS PACKAGE +================================================================== + + * the canonical build system allows you to run tests to verify that + the build worked + * the canonical build system supports more compile-time options + * the canonical build system works for any arbitrary check-in to + the SQLite source tree + +Step-by-step instructions on how to build using the canonical make +system for SQLite can be found at: + + https://sqlite.org/src/doc/trunk/doc/compile-for-unix.md + https://sqlite.org/src/doc/trunk/doc/compile-for-windows.md + + +SUMMARY OF HOW TO BUILD USING THIS PACKAGE +========================================== Unix: ./configure; make Windows: nmake /f Makefile.msc @@ -18,14 +49,12 @@ SUMMARY OF HOW TO BUILD BUILDING ON POSIX ================= -The generic installation instructions for autoconf/automake are found -in the INSTALL file. - -The following SQLite specific boolean options are supported: +The configure script follows common conventions, making it easy +to use for anyone who has configured a software tree before. +It supports a number of build-time flags, the full list of which +can be seen by running: - --enable-readline use readline in shell tool [default=yes] - --enable-threadsafe build a thread-safe library [default=yes] - --enable-dynamic-extensions support loadable extensions [default=yes] + ./configure --help The default value for the CFLAGS variable (options passed to the C compiler) includes debugging symbols in the build, resulting in larger @@ -36,10 +65,11 @@ line like this: to produce a smaller installation footprint. -Other SQLite compilation parameters can also be set using CFLAGS. For +Many SQLite compilation parameters can be defined by passing flags +to the configure script. Others may be passed on in the CFLAGS. For example: - $ CFLAGS="-Os -DSQLITE_THREADSAFE=0" ./configure + $ CFLAGS="-Os -DSQLITE_OMIT_DEPRECATED" ./configure BUILDING WITH MICROSOFT VISUAL C++ @@ -53,48 +83,6 @@ Using Microsoft Visual C++ 2005 (or later) is recommended. Several Windows platform variants may be built by adding additional macros to the NMAKE command line. -Building for WinRT 8.0 ----------------------- - - FOR_WINRT=1 - -Using Microsoft Visual C++ 2012 (or later) is required. When using the -above, something like the following macro will need to be added to the -NMAKE command line as well: - - "NSDKLIBPATH=%WindowsSdkDir%\..\8.0\lib\win8\um\x86" - -Building for WinRT 8.1 ----------------------- - - FOR_WINRT=1 - -Using Microsoft Visual C++ 2013 (or later) is required. When using the -above, something like the following macro will need to be added to the -NMAKE command line as well: - - "NSDKLIBPATH=%WindowsSdkDir%\..\8.1\lib\winv6.3\um\x86" - -Building for UWP 10.0 ---------------------- - - FOR_WINRT=1 FOR_UWP=1 - -Using Microsoft Visual C++ 2015 (or later) is required. When using the -above, something like the following macros will need to be added to the -NMAKE command line as well: - - "NSDKLIBPATH=%WindowsSdkDir%\..\10\lib\10.0.10586.0\um\x86" - "PSDKLIBPATH=%WindowsSdkDir%\..\10\lib\10.0.10586.0\um\x86" - "NUCRTLIBPATH=%UniversalCRTSdkDir%\..\10\lib\10.0.10586.0\ucrt\x86" - -Building for the Windows 10 SDK -------------------------------- - - FOR_WIN10=1 - -Using Microsoft Visual C++ 2015 (or later) is required. When using the -above, no other macros should be needed on the NMAKE command line. Other preprocessor defines -------------------------- @@ -102,10 +90,10 @@ Other preprocessor defines Additionally, preprocessor defines may be specified by using the OPTS macro on the NMAKE command line. However, not all possible preprocessor defines may be specified in this manner as some require the amalgamation to be built -with them enabled (see http://www.sqlite.org/compile.html). For example, the +with them enabled (see http://sqlite.org/compile.html). For example, the following will work: - "OPTS=-DSQLITE_ENABLE_STAT4=1 -DSQLITE_ENABLE_JSON1=1" + "OPTS=-DSQLITE_ENABLE_STAT4=1 -DSQLITE_OMIT_JSON=1" However, the following will not compile unless the amalgamation was built with it enabled: diff --git a/autoconf/auto.def b/autoconf/auto.def new file mode 100644 index 0000000000..c61d81e506 --- /dev/null +++ b/autoconf/auto.def @@ -0,0 +1,25 @@ +#!/do/not/tclsh +# ^^^ help out editors which guess this file's content type. +# +# This is the main autosetup-compatible configure script for the +# "autoconf" bundle of the SQLite project. +use sqlite-config +sqlite-configure autoconf { + sqlite-handle-debug + sqlite-check-common-bins ;# must come before [sqlite-handle-wasi-sdk] + sqlite-handle-wasi-sdk ;# must run relatively early, as it changes the environment + sqlite-check-common-system-deps + proj-define-for-opt static-shell ENABLE_STATIC_SHELL \ + "Link library statically into the CLI shell?" + proj-define-for-opt static-cli-shell STATIC_CLI_SHELL "Statically link CLI shell?" + if {![opt-bool static-shell] && [opt-bool static-cli-shell]} { + proj-fatal "--disable-static-shell and --static-cli-shell are mutualy exclusive" + } + if {![opt-bool shared] && ![opt-bool static-shell]} { + proj-opt-set shared 1 + proj-indented-notice { + NOTICE: ignoring --disable-shared because --disable-static-shell + was specified. + } + } +} diff --git a/autoconf/configure.ac b/autoconf/configure.ac deleted file mode 100644 index e050786bd7..0000000000 --- a/autoconf/configure.ac +++ /dev/null @@ -1,285 +0,0 @@ - -#----------------------------------------------------------------------- -# Supports the following non-standard switches. -# -# --enable-threadsafe -# --enable-readline -# --enable-editline -# --enable-static-shell -# --enable-dynamic-extensions -# - -AC_PREREQ(2.61) -AC_INIT(sqlite, --SQLITE-VERSION--, http://www.sqlite.org) -AC_CONFIG_SRCDIR([sqlite3.c]) -AC_CONFIG_AUX_DIR([.]) - -# Use automake. -AM_INIT_AUTOMAKE([foreign]) - -AC_SYS_LARGEFILE - -# Check for required programs. -AC_PROG_CC -AC_PROG_LIBTOOL -AC_PROG_MKDIR_P - -# Check for library functions that SQLite can optionally use. -AC_CHECK_FUNCS([fdatasync usleep fullfsync localtime_r gmtime_r]) -AC_FUNC_STRERROR_R - -AC_CONFIG_FILES([Makefile sqlite3.pc]) -BUILD_CFLAGS= -AC_SUBST(BUILD_CFLAGS) - -#------------------------------------------------------------------------- -# Two options to enable readline compatible libraries: -# -# --enable-editline -# --enable-readline -# -# Both are enabled by default. If, after command line processing both are -# still enabled, the script searches for editline first and automatically -# disables readline if it is found. So, to use readline explicitly, the -# user must pass "--disable-editline". To disable command line editing -# support altogether, "--disable-editline --disable-readline". -# -# When searching for either library, check for headers before libraries -# as some distros supply packages that contain libraries but not header -# files, which come as a separate development package. -# -AC_ARG_ENABLE(editline, [AS_HELP_STRING([--enable-editline],[use BSD libedit])]) -AC_ARG_ENABLE(readline, [AS_HELP_STRING([--enable-readline],[use readline])]) - -AS_IF([ test x"$enable_editline" != xno ],[ - AC_CHECK_HEADERS([editline/readline.h],[ - sLIBS=$LIBS - LIBS="" - AC_SEARCH_LIBS([readline],[edit],[ - AC_DEFINE([HAVE_EDITLINE],1,Define to use BSD editline) - READLINE_LIBS="$LIBS -ltinfo" - enable_readline=no - ],[],[-ltinfo]) - AS_UNSET(ac_cv_search_readline) - LIBS=$sLIBS - ]) -]) - -AS_IF([ test x"$enable_readline" != xno ],[ - AC_CHECK_HEADERS([readline/readline.h],[ - sLIBS=$LIBS - LIBS="" - AC_SEARCH_LIBS(tgetent, termcap curses ncurses ncursesw, [], []) - AC_SEARCH_LIBS(readline,[readline edit], [ - AC_DEFINE([HAVE_READLINE],1,Define to use readline or wrapper) - READLINE_LIBS=$LIBS - ]) - LIBS=$sLIBS - ]) -]) - -AC_SUBST(READLINE_LIBS) -#----------------------------------------------------------------------- - -#----------------------------------------------------------------------- -# --enable-threadsafe -# -AC_ARG_ENABLE(threadsafe, [AS_HELP_STRING( - [--enable-threadsafe], [build a thread-safe library [default=yes]])], - [], [enable_threadsafe=yes]) -if test x"$enable_threadsafe" == "xno"; then - BUILD_CFLAGS="$BUILD_CFLAGS -DSQLITE_THREADSAFE=0" -else - BUILD_CFLAGS="$BUILD_CFLAGS -D_REENTRANT=1 -DSQLITE_THREADSAFE=1" - AC_SEARCH_LIBS(pthread_create, pthread) - AC_SEARCH_LIBS(pthread_mutexattr_init, pthread) -fi -#----------------------------------------------------------------------- - -#----------------------------------------------------------------------- -# --enable-dynamic-extensions -# -AC_ARG_ENABLE(dynamic-extensions, [AS_HELP_STRING( - [--enable-dynamic-extensions], [support loadable extensions [default=yes]])], - [], [enable_dynamic_extensions=yes]) -if test x"$enable_dynamic_extensions" != "xno"; then - AC_SEARCH_LIBS(dlopen, dl) -else - BUILD_CFLAGS="$BUILD_CFLAGS -DSQLITE_OMIT_LOAD_EXTENSION=1" -fi -AC_MSG_CHECKING([for whether to support dynamic extensions]) -AC_MSG_RESULT($enable_dynamic_extensions) -#----------------------------------------------------------------------- - -#----------------------------------------------------------------------- -# --enable-math -# -AC_ARG_ENABLE(math, [AS_HELP_STRING( - [--enable-math], [SQL math functions [default=yes]])], - [], [enable_math=yes]) -AC_MSG_CHECKING([SQL math functions]) -if test x"$enable_math" = "xyes"; then - BUILD_CFLAGS="$BUILD_CFLAGS -DSQLITE_ENABLE_MATH_FUNCTIONS" - AC_MSG_RESULT([enabled]) - AC_SEARCH_LIBS(ceil, m) -else - AC_MSG_RESULT([disabled]) -fi -#----------------------------------------------------------------------- - -#----------------------------------------------------------------------- -# --enable-fts4 -# -AC_ARG_ENABLE(fts4, [AS_HELP_STRING( - [--enable-fts4], [include fts4 support [default=yes]])], - [], [enable_fts4=yes]) -AC_MSG_CHECKING([FTS4 extension]) -if test x"$enable_fts4" = "xyes"; then - BUILD_CFLAGS="$BUILD_CFLAGS -DSQLITE_ENABLE_FTS4" - AC_MSG_RESULT([enabled]) -else - AC_MSG_RESULT([disabled]) -fi -#----------------------------------------------------------------------- - -#----------------------------------------------------------------------- -# --enable-fts3 -# -AC_ARG_ENABLE(fts3, [AS_HELP_STRING( - [--enable-fts3], [include fts3 support [default=no]])], - [], []) -AC_MSG_CHECKING([FTS3 extension]) -if test x"$enable_fts3" = "xyes" -a x"$enable_fts4" = "xno"; then - BUILD_CFLAGS="$BUILD_CFLAGS -DSQLITE_ENABLE_FTS3" - AC_MSG_RESULT([enabled]) -else - AC_MSG_RESULT([disabled]) -fi -#----------------------------------------------------------------------- - -#----------------------------------------------------------------------- -# --enable-fts5 -# -AC_ARG_ENABLE(fts5, [AS_HELP_STRING( - [--enable-fts5], [include fts5 support [default=yes]])], - [], [enable_fts5=yes]) -AC_MSG_CHECKING([FTS5 extension]) -if test x"$enable_fts5" = "xyes"; then - AC_MSG_RESULT([enabled]) - AC_SEARCH_LIBS(log, m) - BUILD_CFLAGS="$BUILD_CFLAGS -DSQLITE_ENABLE_FTS5" -else - AC_MSG_RESULT([disabled]) -fi -#----------------------------------------------------------------------- - -#----------------------------------------------------------------------- -# --enable-json1 -# -AC_ARG_ENABLE(json1, [AS_HELP_STRING( - [--enable-json1], [include json1 support [default=yes]])], - [],[enable_json1=yes]) -AC_MSG_CHECKING([JSON functions]) -if test x"$enable_json1" = "xyes"; then - BUILD_CFLAGS="$BUILD_CFLAGS -DSQLITE_ENABLE_JSON1" - AC_MSG_RESULT([enabled]) -else - AC_MSG_RESULT([disabled]) -fi -#----------------------------------------------------------------------- - -#----------------------------------------------------------------------- -# --enable-rtree -# -AC_ARG_ENABLE(rtree, [AS_HELP_STRING( - [--enable-rtree], [include rtree support [default=yes]])], - [], [enable_rtree=yes]) -AC_MSG_CHECKING([RTREE extension]) -if test x"$enable_rtree" = "xyes"; then - BUILD_CFLAGS="$BUILD_CFLAGS -DSQLITE_ENABLE_RTREE -DSQLITE_ENABLE_GEOPOLY" - AC_MSG_RESULT([enabled]) -else - AC_MSG_RESULT([disabled]) -fi -#----------------------------------------------------------------------- - -#----------------------------------------------------------------------- -# --enable-session -# -AC_ARG_ENABLE(session, [AS_HELP_STRING( - [--enable-session], [enable the session extension [default=no]])], - [], []) -AC_MSG_CHECKING([Session extension]) -if test x"$enable_session" = "xyes"; then - BUILD_CFLAGS="$BUILD_CFLAGS -DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_PREUPDATE_HOOK" - AC_MSG_RESULT([enabled]) -else - AC_MSG_RESULT([disabled]) -fi -#----------------------------------------------------------------------- - -#----------------------------------------------------------------------- -# --enable-debug -# -AC_ARG_ENABLE(debug, [AS_HELP_STRING( - [--enable-debug], [build with debugging features enabled [default=no]])], - [], []) -AC_MSG_CHECKING([Build type]) -if test x"$enable_debug" = "xyes"; then - BUILD_CFLAGS="$BUILD_CFLAGS -DSQLITE_DEBUG -DSQLITE_ENABLE_SELECTTRACE -DSQLITE_ENABLE_WHERETRACE" - CFLAGS="-g -O0" - AC_MSG_RESULT([debug]) -else - AC_MSG_RESULT([release]) -fi -#----------------------------------------------------------------------- - -#----------------------------------------------------------------------- -# --enable-static-shell -# -AC_ARG_ENABLE(static-shell, [AS_HELP_STRING( - [--enable-static-shell], - [statically link libsqlite3 into shell tool [default=yes]])], - [], [enable_static_shell=yes]) -if test x"$enable_static_shell" = "xyes"; then - EXTRA_SHELL_OBJ=sqlite3-sqlite3.$OBJEXT -else - EXTRA_SHELL_OBJ=libsqlite3.la -fi -AC_SUBST(EXTRA_SHELL_OBJ) -#----------------------------------------------------------------------- - -AC_CHECK_FUNCS(posix_fallocate) -AC_CHECK_HEADERS(zlib.h,[ - AC_SEARCH_LIBS(deflate,z,[BUILD_CFLAGS="$BUILD_CFLAGS -DSQLITE_HAVE_ZLIB"]) -]) - -AC_SEARCH_LIBS(system,,,[SHELL_CFLAGS="-DSQLITE_NOHAVE_SYSTEM"]) -AC_SUBST(SHELL_CFLAGS) - -#----------------------------------------------------------------------- -# UPDATE: Maybe it's better if users just set CFLAGS before invoking -# configure. This option doesn't really add much... -# -# --enable-tempstore -# -# AC_ARG_ENABLE(tempstore, [AS_HELP_STRING( -# [--enable-tempstore], -# [in-memory temporary tables (never, no, yes, always) [default=no]])], -# [], [enable_tempstore=no]) -# AC_MSG_CHECKING([for whether or not to store temp tables in-memory]) -# case "$enable_tempstore" in -# never ) TEMP_STORE=0 ;; -# no ) TEMP_STORE=1 ;; -# always ) TEMP_STORE=3 ;; -# yes ) TEMP_STORE=3 ;; -# * ) -# TEMP_STORE=1 -# enable_tempstore=yes -# ;; -# esac -# AC_MSG_RESULT($enable_tempstore) -# AC_SUBST(TEMP_STORE) -#----------------------------------------------------------------------- - -AC_OUTPUT diff --git a/autoconf/tea/Makefile.in b/autoconf/tea/Makefile.in index 3e481dadfe..04c8f87f55 100644 --- a/autoconf/tea/Makefile.in +++ b/autoconf/tea/Makefile.in @@ -1,440 +1,559 @@ -# Makefile.in -- +all: # -# This file is a Makefile for Sample TEA Extension. If it has the name -# "Makefile.in" then it is a template for a Makefile; to generate the -# actual Makefile, run "./configure", which is a configuration script -# generated by the "autoconf" program (constructs like "@foo@" will get -# replaced in the actual Makefile. -# -# Copyright (c) 1999 Scriptics Corporation. -# Copyright (c) 2002-2005 ActiveState Corporation. -# -# See the file "license.terms" for information on usage and redistribution -# of this file, and for a DISCLAIMER OF ALL WARRANTIES. -# -# RCS: @(#) $Id: Makefile.in,v 1.59 2005/07/26 19:17:02 mdejong Exp $ - -#======================================================================== -# Add additional lines to handle any additional AC_SUBST cases that -# have been added in a customized configure script. -#======================================================================== - -#SAMPLE_NEW_VAR = @SAMPLE_NEW_VAR@ - -#======================================================================== -# Nothing of the variables below this line should need to be changed. -# Please check the TARGETS section below to make sure the make targets -# are correct. -#======================================================================== - -#======================================================================== -# The names of the source files is defined in the configure script. -# The object files are used for linking into the final library. -# This will be used when a dist target is added to the Makefile. -# It is not important to specify the directory, as long as it is the -# $(srcdir) or in the generic, win or unix subdirectory. -#======================================================================== - -PKG_SOURCES = @PKG_SOURCES@ -PKG_OBJECTS = @PKG_OBJECTS@ - -PKG_STUB_SOURCES = @PKG_STUB_SOURCES@ -PKG_STUB_OBJECTS = @PKG_STUB_OBJECTS@ - -#======================================================================== -# PKG_TCL_SOURCES identifies Tcl runtime files that are associated with -# this package that need to be installed, if any. -#======================================================================== - -PKG_TCL_SOURCES = @PKG_TCL_SOURCES@ - -#======================================================================== -# This is a list of public header files to be installed, if any. -#======================================================================== - -PKG_HEADERS = @PKG_HEADERS@ - -#======================================================================== -# "PKG_LIB_FILE" refers to the library (dynamic or static as per -# configuration options) composed of the named objects. -#======================================================================== - -PKG_LIB_FILE = @PKG_LIB_FILE@ -PKG_STUB_LIB_FILE = @PKG_STUB_LIB_FILE@ - -lib_BINARIES = $(PKG_LIB_FILE) -BINARIES = $(lib_BINARIES) - -SHELL = @SHELL@ - -srcdir = @srcdir@ -prefix = @prefix@ -exec_prefix = @exec_prefix@ - -bindir = @bindir@ -libdir = @libdir@ -datarootdir = @datarootdir@ -datadir = @datadir@ -mandir = @mandir@ -includedir = @includedir@ - -DESTDIR = - -PKG_DIR = $(PACKAGE_NAME)$(PACKAGE_VERSION) -pkgdatadir = $(datadir)/$(PKG_DIR) -pkglibdir = $(libdir)/$(PKG_DIR) -pkgincludedir = $(includedir)/$(PKG_DIR) - -top_builddir = . - -INSTALL = @INSTALL@ -INSTALL_PROGRAM = @INSTALL_PROGRAM@ -INSTALL_DATA = @INSTALL_DATA@ -INSTALL_SCRIPT = @INSTALL_SCRIPT@ - -PACKAGE_NAME = @PACKAGE_NAME@ -PACKAGE_VERSION = @PACKAGE_VERSION@ -CC = @CC@ -CFLAGS_DEFAULT = @CFLAGS_DEFAULT@ -CFLAGS_WARNING = @CFLAGS_WARNING@ -CLEANFILES = @CLEANFILES@ -EXEEXT = @EXEEXT@ -LDFLAGS_DEFAULT = @LDFLAGS_DEFAULT@ -MAKE_LIB = @MAKE_LIB@ -MAKE_SHARED_LIB = @MAKE_SHARED_LIB@ -MAKE_STATIC_LIB = @MAKE_STATIC_LIB@ -MAKE_STUB_LIB = @MAKE_STUB_LIB@ -OBJEXT = @OBJEXT@ -RANLIB = @RANLIB@ -RANLIB_STUB = @RANLIB_STUB@ -SHLIB_CFLAGS = @SHLIB_CFLAGS@ -SHLIB_LD = @SHLIB_LD@ -SHLIB_LD_LIBS = @SHLIB_LD_LIBS@ -STLIB_LD = @STLIB_LD@ -#TCL_DEFS = @TCL_DEFS@ -TCL_BIN_DIR = @TCL_BIN_DIR@ -TCL_SRC_DIR = @TCL_SRC_DIR@ -#TK_BIN_DIR = @TK_BIN_DIR@ -#TK_SRC_DIR = @TK_SRC_DIR@ - -# This is no longer necessary even for packages that use private Tcl headers -#TCL_TOP_DIR_NATIVE = @TCL_TOP_DIR_NATIVE@ -# Not used, but retained for reference of what libs Tcl required -#TCL_LIBS = @TCL_LIBS@ - -#======================================================================== -# TCLLIBPATH seeds the auto_path in Tcl's init.tcl so we can test our -# package without installing. The other environment variables allow us -# to test against an uninstalled Tcl. Add special env vars that you -# require for testing here (like TCLX_LIBRARY). -#======================================================================== - -EXTRA_PATH = $(top_builddir):$(TCL_BIN_DIR) -#EXTRA_PATH = $(top_builddir):$(TCL_BIN_DIR):$(TK_BIN_DIR) -TCLLIBPATH = $(top_builddir) -TCLSH_ENV = TCL_LIBRARY=`@CYGPATH@ $(TCL_SRC_DIR)/library` \ - @LD_LIBRARY_PATH_VAR@="$(EXTRA_PATH):$(@LD_LIBRARY_PATH_VAR@)" \ - PATH="$(EXTRA_PATH):$(PATH)" \ - TCLLIBPATH="$(TCLLIBPATH)" -# TK_LIBRARY=`@CYGPATH@ $(TK_SRC_DIR)/library` - -TCLSH_PROG = @TCLSH_PROG@ -TCLSH = $(TCLSH_ENV) $(TCLSH_PROG) - -#WISH_PROG = @WISH_PROG@ -#WISH = $(TCLSH_ENV) $(WISH_PROG) - - -SHARED_BUILD = @SHARED_BUILD@ - -INCLUDES = @PKG_INCLUDES@ @TCL_INCLUDES@ -I$(srcdir)/.. -#INCLUDES = @PKG_INCLUDES@ @TCL_INCLUDES@ @TK_INCLUDES@ @TK_XINCLUDES@ - -PKG_CFLAGS = @PKG_CFLAGS@ - -# TCL_DEFS is not strictly need here, but if you remove it, then you -# must make sure that configure.in checks for the necessary components -# that your library may use. TCL_DEFS can actually be a problem if -# you do not compile with a similar machine setup as the Tcl core was -# compiled with. -#DEFS = $(TCL_DEFS) @DEFS@ $(PKG_CFLAGS) -DEFS = @DEFS@ $(PKG_CFLAGS) - -CONFIG_CLEAN_FILES = Makefile pkgIndex.tcl - -CPPFLAGS = @CPPFLAGS@ -LIBS = @PKG_LIBS@ @LIBS@ -AR = @AR@ -CFLAGS = @CFLAGS@ -COMPILE = $(CC) $(DEFS) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) - -#======================================================================== -# Start of user-definable TARGETS section -#======================================================================== - -#======================================================================== -# TEA TARGETS. Please note that the "libraries:" target refers to platform -# independent files, and the "binaries:" target inclues executable programs and -# platform-dependent libraries. Modify these targets so that they install -# the various pieces of your package. The make and install rules -# for the BINARIES that you specified above have already been done. -#======================================================================== - -all: binaries libraries doc - -#======================================================================== -# The binaries target builds executable programs, Windows .dll's, unix -# shared/static libraries, and any other platform-dependent files. -# The list of targets to build for "binaries:" is specified at the top -# of the Makefile, in the "BINARIES" variable. -#======================================================================== - -binaries: $(BINARIES) - -libraries: - - -#======================================================================== -# Your doc target should differentiate from doc builds (by the developer) -# and doc installs (see install-doc), which just install the docs on the -# end user machine when building from source. -#======================================================================== - -doc: - @echo "If you have documentation to create, place the commands to" - @echo "build the docs in the 'doc:' target. For example:" - @echo " xml2nroff sample.xml > sample.n" - @echo " xml2html sample.xml > sample.html" - -install: all install-binaries install-libraries install-doc - -install-binaries: binaries install-lib-binaries install-bin-binaries - -#======================================================================== -# This rule installs platform-independent files, such as header files. -# The list=...; for p in $$list handles the empty list case x-platform. -#======================================================================== - -install-libraries: libraries - @mkdir -p $(DESTDIR)$(includedir) - @echo "Installing header files in $(DESTDIR)$(includedir)" - @list='$(PKG_HEADERS)'; for i in $$list; do \ - echo "Installing $(srcdir)/$$i" ; \ - $(INSTALL_DATA) $(srcdir)/$$i $(DESTDIR)$(includedir) ; \ - done; - -#======================================================================== -# Install documentation. Unix manpages should go in the $(mandir) -# directory. -#======================================================================== - -install-doc: doc - @mkdir -p $(DESTDIR)$(mandir)/mann - @echo "Installing documentation in $(DESTDIR)$(mandir)" - @list='$(srcdir)/doc/*.n'; for i in $$list; do \ - echo "Installing $$i"; \ - rm -f $(DESTDIR)$(mandir)/mann/`basename $$i`; \ - $(INSTALL_DATA) $$i $(DESTDIR)$(mandir)/mann ; \ - done - -test: binaries libraries - @echo "SQLite TEA distribution does not include tests" - -shell: binaries libraries - @$(TCLSH) $(SCRIPT) - -gdb: - $(TCLSH_ENV) gdb $(TCLSH_PROG) $(SCRIPT) - -depend: - -#======================================================================== -# $(PKG_LIB_FILE) should be listed as part of the BINARIES variable -# mentioned above. That will ensure that this target is built when you -# run "make binaries". -# -# The $(PKG_OBJECTS) objects are created and linked into the final -# library. In most cases these object files will correspond to the -# source files above. -#======================================================================== - -$(PKG_LIB_FILE): $(PKG_OBJECTS) - -rm -f $(PKG_LIB_FILE) - ${MAKE_LIB} - $(RANLIB) $(PKG_LIB_FILE) - -$(PKG_STUB_LIB_FILE): $(PKG_STUB_OBJECTS) - -rm -f $(PKG_STUB_LIB_FILE) - ${MAKE_STUB_LIB} - $(RANLIB_STUB) $(PKG_STUB_LIB_FILE) - -#======================================================================== -# We need to enumerate the list of .c to .o lines here. -# -# In the following lines, $(srcdir) refers to the toplevel directory -# containing your extension. If your sources are in a subdirectory, -# you will have to modify the paths to reflect this: -# -# sample.$(OBJEXT): $(srcdir)/generic/sample.c -# $(COMPILE) -c `@CYGPATH@ $(srcdir)/generic/sample.c` -o $@ -# -# Setting the VPATH variable to a list of paths will cause the makefile -# to look into these paths when resolving .c to .obj dependencies. -# As necessary, add $(srcdir):$(srcdir)/compat:.... -#======================================================================== - -VPATH = $(srcdir):$(srcdir)/generic:$(srcdir)/unix:$(srcdir)/win - -.c.@OBJEXT@: - $(COMPILE) -c `@CYGPATH@ $<` -o $@ - -#======================================================================== -# Distribution creation -# You may need to tweak this target to make it work correctly. -#======================================================================== - -#COMPRESS = tar cvf $(PKG_DIR).tar $(PKG_DIR); compress $(PKG_DIR).tar -COMPRESS = gtar zcvf $(PKG_DIR).tar.gz $(PKG_DIR) -DIST_ROOT = /tmp/dist -DIST_DIR = $(DIST_ROOT)/$(PKG_DIR) - -dist-clean: - rm -rf $(DIST_DIR) $(DIST_ROOT)/$(PKG_DIR).tar.* - -dist: dist-clean - mkdir -p $(DIST_DIR) - cp -p $(srcdir)/README* $(srcdir)/license* \ - $(srcdir)/aclocal.m4 $(srcdir)/configure $(srcdir)/*.in \ - $(DIST_DIR)/ - chmod 664 $(DIST_DIR)/Makefile.in $(DIST_DIR)/aclocal.m4 - chmod 775 $(DIST_DIR)/configure $(DIST_DIR)/configure.in - - for i in $(srcdir)/*.[ch]; do \ - if [ -f $$i ]; then \ - cp -p $$i $(DIST_DIR)/ ; \ - fi; \ - done; - - mkdir $(DIST_DIR)/tclconfig - cp $(srcdir)/tclconfig/install-sh $(srcdir)/tclconfig/tcl.m4 \ - $(DIST_DIR)/tclconfig/ - chmod 664 $(DIST_DIR)/tclconfig/tcl.m4 - chmod +x $(DIST_DIR)/tclconfig/install-sh - - list='demos doc generic library mac tests unix win'; \ - for p in $$list; do \ - if test -d $(srcdir)/$$p ; then \ - mkdir $(DIST_DIR)/$$p; \ - cp -p $(srcdir)/$$p/*.* $(DIST_DIR)/$$p/; \ - fi; \ - done - - (cd $(DIST_ROOT); $(COMPRESS);) - -#======================================================================== -# End of user-definable section -#======================================================================== - -#======================================================================== -# Don't modify the file to clean here. Instead, set the "CLEANFILES" -# variable in configure.in -#======================================================================== - -clean: - -test -z "$(BINARIES)" || rm -f $(BINARIES) - -rm -f *.$(OBJEXT) core *.core - -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) - -distclean: clean - -rm -f *.tab.c - -rm -f $(CONFIG_CLEAN_FILES) - -rm -f config.h config.cache config.log config.status - -#======================================================================== -# Install binary object libraries. On Windows this includes both .dll and -# .lib files. Because the .lib files are not explicitly listed anywhere, -# we need to deduce their existence from the .dll file of the same name. -# Library files go into the lib directory. -# In addition, this will generate the pkgIndex.tcl -# file in the install location (assuming it can find a usable tclsh shell) -# -# You should not have to modify this target. -#======================================================================== - -install-lib-binaries: binaries - @mkdir -p $(DESTDIR)$(pkglibdir) - @list='$(lib_BINARIES)'; for p in $$list; do \ - if test -f $$p; then \ - echo " $(INSTALL_PROGRAM) $$p $(DESTDIR)$(pkglibdir)/$$p"; \ - $(INSTALL_PROGRAM) $$p $(DESTDIR)$(pkglibdir)/$$p; \ - stub=`echo $$p|sed -e "s/.*\(stub\).*/\1/"`; \ - if test "x$$stub" = "xstub"; then \ - echo " $(RANLIB_STUB) $(DESTDIR)$(pkglibdir)/$$p"; \ - $(RANLIB_STUB) $(DESTDIR)$(pkglibdir)/$$p; \ - else \ - echo " $(RANLIB) $(DESTDIR)$(pkglibdir)/$$p"; \ - $(RANLIB) $(DESTDIR)$(pkglibdir)/$$p; \ - fi; \ - ext=`echo $$p|sed -e "s/.*\.//"`; \ - if test "x$$ext" = "xdll"; then \ - lib=`basename $$p|sed -e 's/.[^.]*$$//'`.lib; \ - if test -f $$lib; then \ - echo " $(INSTALL_DATA) $$lib $(DESTDIR)$(pkglibdir)/$$lib"; \ - $(INSTALL_DATA) $$lib $(DESTDIR)$(pkglibdir)/$$lib; \ - fi; \ - fi; \ - fi; \ - done - @list='$(PKG_TCL_SOURCES)'; for p in $$list; do \ - if test -f $(srcdir)/$$p; then \ - destp=`basename $$p`; \ - echo " Install $$destp $(DESTDIR)$(pkglibdir)/$$destp"; \ - $(INSTALL_DATA) $(srcdir)/$$p $(DESTDIR)$(pkglibdir)/$$destp; \ - fi; \ - done - @if test "x$(SHARED_BUILD)" = "x1"; then \ - echo " Install pkgIndex.tcl $(DESTDIR)$(pkglibdir)"; \ - $(INSTALL_DATA) pkgIndex.tcl $(DESTDIR)$(pkglibdir); \ +# Unless this file is named Makefile.in, you are probably looking +# at an automatically generated/filtered copy and should probably not +# edit it. +# +# This makefile is part of the teaish framework, a tool for building +# Tcl extensions, conceptually related to TEA/tclconfig but using the +# Autosetup configuration system instead of the GNU Autotools. +# +# A copy of this makefile gets processed for each extension separately +# and populated with info about how to build, test, and install the +# extension. +# +# Maintenance reminder: this file needs to stay portable with POSIX +# Make, not just GNU Make. Yes, that's unfortunate because it makes +# some things impossible (like skipping over swathes of rules when +# 'make distclean' is invoked). +# + +CC = @CC@ +INSTALL = @BIN_INSTALL@ +INSTALL.noexec = $(INSTALL) -m 0644 + +# +# Var name prefixes: +# +# teaish. => teaish core +# tx. => teaish extension +# +# Vars with a "tx." or "teaish." prefix are all "public" for purposes +# of the extension makefile, but the extension must not any "teaish." +# vars and must only modify "tx." vars where that allowance is +# specifically noted. +# +# Vars with a "teaish__" prefix are "private" and must not be used by +# the extension makefile. They may change semantics or be removed in +# any given teaish build. +# +tx.name = @TEAISH_NAME@ +tx.version = @TEAISH_VERSION@ +tx.name.pkg = @TEAISH_PKGNAME@ +tx.libdir = @TEAISH_LIBDIR_NAME@ +tx.loadPrefix = @TEAISH_LOAD_PREFIX@ +tx.tcl = @TEAISH_TCL@ +tx.makefile = @TEAISH_MAKEFILE@ +tx.makefile.in = @TEAISH_MAKEFILE_IN@ +tx.dll8.basename = @TEAISH_DLL8_BASENAME@ +tx.dll9.basename = @TEAISH_DLL9_BASENAME@ +tx.dll8 = @TEAISH_DLL8@ +tx.dll9 = @TEAISH_DLL9@ +tx.dll = $(tx.dll$(TCL_MAJOR_VERSION)) +tx.dir = @TEAISH_EXT_DIR@ +@if TEAISH_TM_TCL +# Input filename for tcl::tm-style module +tx.tm = @TEAISH_TM_TCL@ +# Target filename for tcl::tm-style installation +tx.tm.tgt = $(tx.name.pkg)-$(tx.version).tm +@endif + +@if TEAISH_DIST_NAME +tx.name.dist = @TEAISH_DIST_NAME@ +@else +tx.name.dist = $(teaish.name) +@endif + +teaish.dir = @abs_top_srcdir@ +#teaish.dir.autosetup = @TEAISH_AUTOSETUP_DIR@ +teaish.makefile = Makefile +teaish.makefile.in = $(teaish.dir)/Makefile.in +teaish__auto.def = $(teaish.dir)/auto.def + +# +# Autotools-conventional vars. We don't actually use these in this +# makefile but some may be referenced by vars imported via +# tclConfig.sh. They are part of the public API and may be reliably +# depended on from teaish.make.in. +# +bindir = @bindir@ +datadir = @datadir@ +exec_prefix = @exec_prefix@ +includedir = @includedir@ +infodir = @infodir@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localstatedir = @localstatedir@ +mandir = @mandir@ +prefix = @prefix@ +runstatedir = @runstatedir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +sysconfdir = @sysconfdir@ + + +# +# Vars derived (mostly) from tclConfig.sh. These may be reliably +# used from the extension makefile. +# +TCLSH = @TCLSH_CMD@ +TCL_CONFIG_SH = @TCL_CONFIG_SH@ +TCL_EXEC_PREFIX = @TCL_EXEC_PREFIX@ +TCL_INCLUDE_SPEC = @TCL_INCLUDE_SPEC@ +TCL_LIBS = @TCL_LIBS@ +TCL_LIB_SPEC = @TCL_LIB_SPEC@ +TCL_MAJOR_VERSION = @TCL_MAJOR_VERSION@ +TCL_MINOR_VERSION = @TCL_MINOR_VERSION@ +TCL_PATCH_LEVEL = @TCL_PATCH_LEVEL@ +TCL_PREFIX = @TCL_PREFIX@ +TCL_SHLIB_SUFFIX = @TCL_SHLIB_SUFFIX@ +TCL_STUB_LIB_SPEC = @TCL_STUB_LIB_SPEC@ +TCL_VERSION = @TCL_VERSION@ +TCLLIBDIR = @TCLLIBDIR@ + +# +# CFLAGS.configure = CFLAGS as known at configure-time. +# +# This ordering is deliberate: flags populated via tcl's +# [teaish-cflags-add] should preceed CFLAGS and CPPFLAGS (which +# typically come from the ./configure command-line invocation). +# +CFLAGS.configure = @SH_CFLAGS@ @TEAISH_CFLAGS@ @CFLAGS@ @CPPFLAGS@ $(TCL_INCLUDE_SPEC) +CFLAGS.configure += -DUSE_TCL_STUBS=@TEAISH_USE_STUBS@ + +# +# LDFLAGS.configure = LDFLAGS as known at configure-time. +# +# This ordering is deliberate: flags populated via tcl's +# [teaish-ldflags-add] should precede LDFLAGS (which typically +# comes from the ./configure command-line invocation). +# +LDFLAGS.configure = @TEAISH_LDFLAGS@ @LDFLAGS@ + +# +# Linker flags for linkhing a shared library. +# +LDFLAGS.shlib = @SH_LDFLAGS@ + +# +# The following tx.XYZ vars may be populated/modified by teaish.tcl +# and/or teaish.make. +# + +# +# tx.src is the list of source or object files to include in the +# (single) compiler/linker invocation. This will initially contain any +# sources passed to [teaish-src-add], but may also be appended to by +# teaish.make. +# +tx.src = @TEAISH_EXT_SRC@ + +# +# tx.CFLAGS is typically set by teaish.make, whereas TEAISH_CFLAGS +# gets set up via the configure script. +# +tx.CFLAGS = +tx.CPPFLAGS = + +# +# tx.LDFLAGS is typically set by teaish.make, whereas TEAISH_LDFLAGS +# gets set up via the configure script. +# +tx.LDFLAGS = + +# +# The list of 'dist' files may be appended to from teaish.make.in. +# It can also be set up from teaish.tcl using [teaish-dist-add] +# and/or [teaish-src-add -dist ...]. +# +tx.dist.files = @TEAISH_DIST_FILES@ + +# +# The base name for a distribution tar/zip file. +# +tx.dist.basename = $(tx.name.dist)-$(tx.version) + +# List of deps which may trigger an auto-reconfigure. +# +teaish__autogen.deps = \ + $(tx.makefile.in) $(teaish.makefile.in) \ + $(tx.tcl) \ + @TEAISH_PKGINDEX_TCL_IN@ @TEAISH_TM_TCL_IN@ \ + @AUTODEPS@ + +@if TEAISH_MAKEFILE_IN +$(tx.makefile): $(tx.makefile.in) +@endif + +teaish.autoreconfig = \ + @TEAISH_AUTORECONFIG@ + +# +# Problem: when more than one target can invoke TEAISH_AUTORECONFIG, +# we can get parallel reconfigures running. Thus, targets which +# may require reconfigure should depend on... +# +config.log: $(teaish__autogen.deps) + $(teaish.autoreconfig) +# ^^^ We would love to skip this when running [dist]clean, but there's +# no POSIX Make-portable way to do that. GNU Make can. +.PHONY: reconfigure +reconfigure: + $(teaish.autoreconfig) + +$(teaish.makefile): $(teaish__auto.def) $(teaish.makefile.in) \ + @AUTODEPS@ + +@if TEAISH_TESTER_TCL_IN +@TEAISH_TESTER_TCL_IN@: $(teaish__autogen.deps) +config.log: @TEAISH_TESTER_TCL_IN@ +@TEAISH_TESTER_TCL@: @TEAISH_TESTER_TCL_IN@ +@endif +@if TEAISH_TEST_TCL_IN +@TEAISH_TEST_TCL_IN@: $(teaish__autogen.deps) +config.log: @TEAISH_TEST_TCL_IN@ +@TEAISH_TEST_TCL@: @TEAISH_TEST_TCL_IN@ +@endif + +# +# CC variant for compiling Tcl-using sources. +# +CC.tcl = \ + $(CC) -o $@ $(CFLAGS.configure) $(CFLAGS) $(tx.CFLAGS) $(tx.CPPFLAGS) + +# +# CC variant for linking $(tx.src) into an extension DLL. Note that +# $(tx.src) must come before $(LDFLAGS...) for linking to third-party +# libs to work. +# +CC.dll = \ + $(CC.tcl) $(tx.src) $(LDFLAGS.shlib) \ + $(tx.LDFLAGS) $(LDFLAGS.configure) $(LDFLAGS) $(TCL_STUB_LIB_SPEC) + +@if TEAISH_ENABLE_DLL +# +# The rest of this makefile exists solely to support this brief +# target: the extension shared lib. +# +$(tx.dll): $(tx.src) config.log + @if [ "x" = "x$(tx.src)" ]; then \ + echo "Makefile var tx.src (source/object files) is empty" 1>&2; \ + exit 1; \ + fi + $(CC.dll) + +all: $(tx.dll) +@endif # TEAISH_ENABLE_DLL + +tclsh: $(teaish.makefile) config.log + @{ echo "#!/bin/sh"; echo 'exec $(TCLSH) "$$@"'; } > $@ + @chmod +x $@ + @echo "Created $@" + +# +# Run the generated test script. +# +.PHONY: test-pre test-prepre test-core test test-post test-extension +test-extension: # this name is reserved for use by teaish.make[.in] +@if TEAISH_ENABLE_DLL +test-prepre: $(tx.dll) +@endif +@if TEAISH_TESTER_TCL +teaish.tester.tcl = @TEAISH_TESTER_TCL@ +test-core.args = $(teaish.tester.tcl) +@if TEAISH_ENABLE_DLL +test-core.args += '$(tx.dll)' '$(tx.loadPrefix)' +@else +test-core.args += '' '' +@endif +test-core.args += @TEAISH_TESTUTIL_TCL@ +# Clients may pass additional args via test.args=... +# and ::argv will be rewritten before the test script loads, to +# remove $(test-core.args) +test.args ?= +test-core: test-pre + $(TCLSH) $(test-core.args) $(test.args) +test-gdb: $(teaish.tester.tcl) + gdb --args $(TCLSH) $(test-core.args) $(test.args) +test-vg.flags ?= --leak-check=full -v --show-reachable=yes --track-origins=yes +test-vg: $(teaish.tester.tcl) + valgrind $(test-vg.flags) $(TCLSH) $(test-core.args) $(test.args) +@else # !TEAISH_TESTER_TCL +test-prepre: +@endif # TEAISH_TESTER_TCL +test-pre: test-prepre +test-core: test-pre +test-post: test-core +test: test-post + +# +# Cleanup rules... +# +#.PHONY: clean-pre clean-core clean-post clean-extension +# +clean-pre: +clean-core: clean-pre + rm -f $(tx.dll8) $(tx.dll9) tclsh +clean-post: clean-core +clean: clean-post + +.PHONY: distclean-pre distclean-core distclean-post clean-extension +distclean-pre: clean +distclean-core: distclean-pre + rm -f Makefile + rm -f config.log config.defines.txt +@if TEAISH_MAKEFILE_IN +@if TEAISH_MAKEFILE + rm -f @TEAISH_MAKEFILE@ +@endif +@endif +@if TEAISH_TESTER_TCL_IN + rm -f $(teaish.tester.tcl) +@endif +@if TEAISH_PKGINDEX_TCL_IN + rm -f @TEAISH_PKGINDEX_TCL@ +@endif +@if TEAISH_PKGINIT_TCL_IN + rm -f @TEAISH_PKGINIT_TCL@ +@endif +@if TEAISH_TEST_TCL_IN + rm -f @TEAISH_TEST_TCL@ +@endif +distclean-post: distclean-core +distclean: distclean-post +# +# The (dist)clean-extension targets are reserved for use by +# client-side teaish.make. +# +# Client code which wants to clean up extra stuff should do so by +# adding their cleanup target (e.g. clean-extension) as a dependency +# to the 'clean' target, like so: +# +# clean: distclean-extension +# distclean: distclean-extension +# +distclean-extension: +clean-extension: + +# +# Installation rules... +# +@if TEAISH_ENABLE_INSTALL +.PHONY: install-pre install-core install-post install-test install-prepre install-extension +install-extension: # this name is reserved for use by teaish.make + +@if TEAISH_ENABLE_DLL +install-prepre: $(tx.dll) +@else +install-prepre: +@endif + +@if TEAISH_TM_TCL +install-core.tmdir = $(DESTDIR)@TEAISH_TCL_TM_DIR@ +@endif + +install-pre: install-prepre +install-core: install-pre + @if [ ! -d "$(DESTDIR)$(TCLLIBDIR)" ]; then \ + set -x; $(INSTALL) -d "$(DESTDIR)$(TCLLIBDIR)"; \ + fi +# ^^^^ on some platforms, install -d fails if the target already exists. +@if TEAISH_ENABLE_DLL + $(INSTALL) $(tx.dll) "$(DESTDIR)$(TCLLIBDIR)" +@endif +@if TEAISH_PKGINDEX_TCL + $(INSTALL.noexec) "@TEAISH_PKGINDEX_TCL@" "$(DESTDIR)$(TCLLIBDIR)" +@endif +@if TEAISH_PKGINIT_TCL + $(INSTALL.noexec) "@TEAISH_PKGINIT_TCL@" "$(DESTDIR)$(TCLLIBDIR)" +@endif +@if TEAISH_TM_TCL + @if [ ! -d "$(install-core.tmdir)" ]; then \ + set -x; $(INSTALL) -d "$(install-core.tmdir)"; \ + fi + $(INSTALL.noexec) "@TEAISH_TM_TCL@" "$(install-core.tmdir)/$(tx.tm.tgt)" +@endif +install-test: install-core + @echo "Post-install test of [package require $(tx.name.pkg) $(tx.version)]..."; \ + set xtra=""; \ + if [ x != "x$(DESTDIR)" ]; then \ + xtra='set ::auto_path [linsert $$::auto_path 0 [file normalize $(DESTDIR)$(TCLLIBDIR)/..]];'; \ + fi; \ + if echo \ + 'set c 0; ' $$xtra \ + '@TEAISH_POSTINST_PREREQUIRE@' \ + 'if {[catch {package require $(tx.name.pkg) $(tx.version)} xc]} {incr c};' \ + 'if {$$c && "" ne $$xc} {puts $$xc; puts "auto_path=$$::auto_path"};' \ + 'exit $$c' \ + | $(TCLSH) ; then \ + echo "passed"; \ + else \ + echo "FAILED"; \ + exit 1; \ fi +install-post: install-test +install: install-post + +# +# Uninstall rules... +# +.PHONY: uninstall uninstall-pre uninstall-core uninstall-post uninstall-extension +uninstall-extension: # this name is reserved for use by teaish.make +uninstall-pre: +uninstall-core: uninstall-pre +@if TEAISH_ENABLE_DLL + rm -fr "$(DESTDIR)$(TCLLIBDIR)" +@endif +@if TEAISH_TM_TCL + rm -f "$(DESTDIR)$(install-core.tmdir)/$(tx.tm.tgt)" +@endif + +uninstall-post: uninstall-core + @echo "Uninstalled Tcl extension $(tx.name) $(tx.version)" +uninstall: uninstall-post +@endif # TEAISH_ENABLE_INSTALL + +@if TEAISH_MAKEFILE_IN +Makefile: $(tx.makefile.in) +config.log: $(teaish.makefile.in) +@endif + +# +# Package archive generation ("dist") rules... +# +@if TEAISH_ENABLE_DIST +@if BIN_TAR +@if BIN_ZIP + +# When installing teaish as part of "make dist", we need to run +# configure with similar flags to what we last configured with but we +# must not pass on any extension-specific flags, as those won't be +# recognized when running in --teaish-install mode, causing +# the sub-configure to fail. +dist.flags = --with-tclsh=$(TCLSH) +dist.reconfig = $(teaish.dir)/configure $(tx.dist.reconfig-flags) $(dist.flags) + +# Temp dir for dist.zip. Must be different than dist.tgz or else +# parallel builds may hose the dist. +teaish__dist.tmp.zip = teaish__dist_zip +# +# Make a distribution zip file... +# +dist.zip = $(tx.dist.basename).zip +.PHONY: dist.zip dist.zip-core dist.zip-post +#dist.zip-pre: +# We apparently can't add a pre-hook here, else "make dist" rebuilds +# the archive each time it's run. +$(dist.zip): $(tx.dist.files) + @rm -fr $(teaish__dist.tmp.zip) + @mkdir -p $(teaish__dist.tmp.zip)/$(tx.dist.basename) + @tar cf $(teaish__dist.tmp.zip)/tmp.tar $(tx.dist.files) + @tar xf $(teaish__dist.tmp.zip)/tmp.tar -C $(teaish__dist.tmp.zip)/$(tx.dist.basename) +@if TEAISH_DIST_FULL + @$(dist.reconfig) \ + --teaish-install=$(teaish__dist.tmp.zip)/$(tx.dist.basename) \ + --t-e-d=$(teaish__dist.tmp.zip)/$(tx.dist.basename) >/dev/null +@endif + @rm -f $(tx.dist.basename)/tmp.tar $(dist.zip) + @cd $(teaish__dist.tmp.zip) && zip -q -r ../$(dist.zip) $(tx.dist.basename) + @rm -fr $(teaish__dist.tmp.zip) + @ls -la $(dist.zip) +dist.zip-core: $(dist.zip) +dist.zip-post: dist.zip-core +dist.zip: dist.zip-post +dist: dist.zip +undist-zip: + rm -f $(dist.zip) +undist: undist-zip +@endif #BIN_ZIP + +# +# Make a distribution tarball... +# +teaish__dist.tmp.tgz = teaish__dist_tgz +dist.tgz = $(tx.dist.basename).tar.gz +.PHONY: dist.tgz dist.tgz-core dist.tgz-post +# dist.tgz-pre: +# see notes in dist.zip +$(dist.tgz): $(tx.dist.files) + @rm -fr $(teaish__dist.tmp.tgz) + @mkdir -p $(teaish__dist.tmp.tgz)/$(tx.dist.basename) + @tar cf $(teaish__dist.tmp.tgz)/tmp.tar $(tx.dist.files) + @tar xf $(teaish__dist.tmp.tgz)/tmp.tar -C $(teaish__dist.tmp.tgz)/$(tx.dist.basename) +@if TEAISH_DIST_FULL + @rm -f $(teaish__dist.tmp.tgz)/$(tx.dist.basename)/pkgIndex.tcl.in; # kludge + @$(dist.reconfig) \ + --teaish-install=$(teaish__dist.tmp.tgz)/$(tx.dist.basename) \ + --t-e-d=$(teaish__dist.tmp.zip)/$(tx.dist.basename) >/dev/null +@endif + @rm -f $(tx.dist.basename)/tmp.tar $(dist.tgz) + @cd $(teaish__dist.tmp.tgz) && tar czf ../$(dist.tgz) $(tx.dist.basename) + @rm -fr $(teaish__dist.tmp.tgz) + @ls -la $(dist.tgz) +dist.tgz-core: $(dist.tgz) +dist.tgz-post: dist.tgz-core +dist.tgz: dist.tgz-post +dist: dist.tgz +undist-tgz: + rm -f $(dist.tgz) +undist: undist-tgz +@else #!BIN_TAR +dist: + @echo "The dist rules require tar, which configure did not find." 1>&2; exit 1 +@endif #BIN_TAR +@else #!TEAISH_ENABLE_DIST +undist: +dist: +@if TEAISH_OUT_OF_EXT_TREE + @echo "'dist' can only be used from an extension's home dir" 1>&2; \ + echo "In this case: @TEAISH_EXT_DIR@" 1>&2; exit 1 +@endif +@endif #TEAISH_ENABLE_DIST + +Makefile: @TEAISH_TCL@ + +@if TEAISH_MAKEFILE_CODE +# +# TEAISH_MAKEFILE_CODE may contain literal makefile code, which +# gets pasted verbatim here. Either [define TEAISH_MAKEFILE_CODE +# ...] or use [teaish-make-add] to incrementally build up this +# content. +# +# +@TEAISH_MAKEFILE_CODE@ +# +@endif -#======================================================================== -# Install binary executables (e.g. .exe files and dependent .dll files) -# This is for files that must go in the bin directory (located next to -# wish and tclsh), like dependent .dll files on Windows. -# -# You should not have to modify this target, except to define bin_BINARIES -# above if necessary. -#======================================================================== - -install-bin-binaries: binaries - @mkdir -p $(DESTDIR)$(bindir) - @list='$(bin_BINARIES)'; for p in $$list; do \ - if test -f $$p; then \ - echo " $(INSTALL_PROGRAM) $$p $(DESTDIR)$(bindir)/$$p"; \ - $(INSTALL_PROGRAM) $$p $(DESTDIR)$(bindir)/$$p; \ - fi; \ - done - -.SUFFIXES: .c .$(OBJEXT) - -Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status - cd $(top_builddir) \ - && CONFIG_FILES=$@ CONFIG_HEADERS= $(SHELL) ./config.status - -uninstall-binaries: - list='$(lib_BINARIES)'; for p in $$list; do \ - rm -f $(DESTDIR)$(pkglibdir)/$$p; \ - done - list='$(PKG_TCL_SOURCES)'; for p in $$list; do \ - p=`basename $$p`; \ - rm -f $(DESTDIR)$(pkglibdir)/$$p; \ - done - list='$(bin_BINARIES)'; for p in $$list; do \ - rm -f $(DESTDIR)$(bindir)/$$p; \ - done - -.PHONY: all binaries clean depend distclean doc install libraries test - -# Tell versions [3.59,3.63) of GNU make to not export all variables. -# Otherwise a system limit (for SysV at least) may be exceeded. -.NOEXPORT: +@if TEAISH_MAKEFILE +# +# TEAISH_MAKEFILE[_IN] defines any extension-specific state this file +# needs. +# +# It must set the following vars if they're not already accounted for +# via teaish.tcl. +# +# - tx.src = list of the extension's source files, being sure to +# prefix each with $(tx.dir) (if it's in the same dir as the +# extension) so that out-of-tree builds can find them. Optionally, +# [define] TEAISH_EXT_SRC or pass them to [teaish-src-add]. +# +# It may optionally set the following vars: +# +# - tx.CFLAGS = CFLAGS/CPPFLAGS. Optionally, [define] TEAISH_CFLAGS +# or pass them to [teaish-cflags-add]. +# +# - tx.LDFLAGS = LDFLAGS. Optionally, [define] TEAISH_LDFLAGS or +# pass them to [teaish-ldflags-add]. +# +# It may optionally hook into various targets as documented in +# /doc/extensions.md in the canonical teaish source tree. +# +# Interestingly, we don't have to pre-filter teaish.makefile.in - we +# can just @include it here. That skips its teaish-specific validation +# though. Hmm. +# +# +Makefile: @TEAISH_MAKEFILE@ +@include @TEAISH_MAKEFILE@ +# +@endif diff --git a/autoconf/tea/README b/autoconf/tea/README deleted file mode 100644 index 99dc8b8f03..0000000000 --- a/autoconf/tea/README +++ /dev/null @@ -1,36 +0,0 @@ -This is the SQLite extension for Tcl using the Tcl Extension -Architecture (TEA). For additional information on SQLite see - - http://www.sqlite.org/ - - -UNIX BUILD -========== - -Building under most UNIX systems is easy, just run the configure script -and then run make. For more information about the build process, see -the tcl/unix/README file in the Tcl src dist. The following minimal -example will install the extension in the /opt/tcl directory. - - $ cd sqlite-*-tea - $ ./configure --prefix=/opt/tcl - $ make - $ make install - -WINDOWS BUILD -============= - -The recommended method to build extensions under windows is to use the -Msys + Mingw build process. This provides a Unix-style build while -generating native Windows binaries. Using the Msys + Mingw build tools -means that you can use the same configure script as per the Unix build -to create a Makefile. See the tcl/win/README file for the URL of -the Msys + Mingw download. - -If you have VC++ then you may wish to use the files in the win -subdirectory and build the extension using just VC++. These files have -been designed to be as generic as possible but will require some -additional maintenance by the project developer to synchronise with -the TEA configure.in and Makefile.in files. Instructions for using the -VC++ makefile are written in the first part of the Makefile.vc -file. diff --git a/autoconf/tea/README.txt b/autoconf/tea/README.txt new file mode 100644 index 0000000000..122b08d32d --- /dev/null +++ b/autoconf/tea/README.txt @@ -0,0 +1,94 @@ +This is the SQLite extension for Tcl using something akin to +the Tcl Extension Architecture (TEA). To build it: + + ./configure ...flags... + +e.g.: + + ./configure --with-tcl=/path/to/tcl/install/root + +or: + + ./configure --with-tclsh=/path/to/tcl/install/root + +Run ./configure --help for the full list of flags. + +The configuration process will fail if tclConfig.sh cannot be found. + +The makefile will only honor CFLAGS and CPPFLAGS passed to the +configure script, not those directly passed to the makefile. + +Then: + + make test install + +----------------------- THE PREFERRED WAY --------------------------- + +The preferred way to build the TCL extension for SQLite is to use the +canonical source code tarball. For Unix: + + ./configure --with-tclsh=$(TCLSH) + make tclextension-install + +For Windows: + + nmake /f Makefile.msc tclextension-install TCLSH_CMD=$(TCLSH) + +In both of the above, replace $(TCLSH) with the full pathname of +of the tclsh that you want the SQLite extension to work with. See +step-by-step instructions at the links below for more information: + + https://sqlite.org/src/doc/trunk/doc/compile-for-unix.md + https://sqlite.org/src/doc/trunk/doc/compile-for-windows.md + +And info about the extension's Tcl interface can be found at: + + https://sqlite.org/tclsqlite.html + +The whole point of the amalgamation-autoconf tarball (in which this +README.txt file is embedded) is to provide a means of compiling SQLite +that does not require first installing TCL and/or "tclsh". The +canonical Makefile in the SQLite source tree provides more +capabilities (such as the the ability to run test cases to ensure that +the build worked) and is better maintained. The only downside of the +canonical Makefile is that it requires a TCL installation. But if you +are wanting to build the TCL extension for SQLite, then presumably you +already have a TCL installation. So why not just use the more-capable +and better-maintained canoncal Makefile? + +As of version 3.50.0, this build process uses "teaish": + + https://fossil.wanderinghorse.net/r/teaish + +which is conceptually derived from the pre-3.50 toolchain, TEA: + + http://core.tcl-lang.org/tclconfig + http://core.tcl-lang.org/sampleextension + +It to works for us. It might also work for you. But we cannot +promise that. + +If you want to use this TEA builder and it works for you, that's fine. +But if you have trouble, the first thing you should do is go back +to using the canonical Makefile in the SQLite source tree. + +------------------------------------------------------------------ + + +UNIX BUILD +========== + +Building under most UNIX systems is easy, just run the configure +script and then run make. For example: + + $ cd sqlite-*-tea + $ ./configure --with-tcl=/path/to/tcl/install/root + $ make test + $ make install + +WINDOWS BUILD +============= + +On Windows this build is known to work on Cygwin and some Msys2 +environments. We do not currently support Microsoft makefiles for +native Windows builds. diff --git a/autoconf/tea/_teaish.tester.tcl.in b/autoconf/tea/_teaish.tester.tcl.in new file mode 100644 index 0000000000..e04d8e63e7 --- /dev/null +++ b/autoconf/tea/_teaish.tester.tcl.in @@ -0,0 +1,50 @@ +# -*- tcl -*- +# +# Unless this file is named _teaish.tester.tcl.in, you are probably +# looking at an automatically generated/filtered copy and should +# probably not edit it. +# +# This is the wrapper script invoked by teaish's "make test" recipe. +# It gets passed 3 args: +# +# $1 = the DLL name, or "" if the extension has no DLL +# +# $2 = the "load prefix" for Tcl's [load] or empty if $1 is empty +# +# $3 = the /path/to/teaish/tester.tcl (test utility code) +# +@if TEAISH_VSATISFIES_CODE +@TEAISH_VSATISFIES_CODE@ +@endif +if {[llength [lindex $::argv 0]] > 0} { + load [file normalize [lindex $::argv 0]] [lindex $::argv 1]; + # ----^^^^^^^ needed on Haiku when argv 0 is just a filename, else + # load cannot find the file. +} +set ::argv [lassign $argv - -] +source -encoding utf-8 [lindex $::argv 0]; # teaish/tester.tcl +@if TEAISH_PKGINIT_TCL +apply {{file} { + set dir [file dirname $::argv0] + source -encoding utf-8 $file +}} [join {@TEAISH_PKGINIT_TCL@}] +@endif +@if TEAISH_TM_TCL +apply {{file} { + set dir [file dirname $::argv0] + source -encoding utf-8 $file +}} [join {@TEAISH_TM_TCL@}] +@endif +@if TEAISH_TEST_TCL +apply {{file} { + # Populate state for [tester.tcl::teaish-build-flag*] + array set ::teaish__BuildFlags @TEAISH__DEFINES_MAP@ + set dir [file normalize [file dirname $file]] + #test-fail "Just testing" + source -encoding utf-8 $file +}} [join {@TEAISH_TEST_TCL@}] +@else # TEAISH_TEST_TCL +# No $TEAISH_TEST_TCL provided, so here's a default test which simply +# loads the extension. +puts {Extension @TEAISH_NAME@ @TEAISH_VERSION@ successfully loaded from @TEAISH_TESTER_TCL@} +@endif diff --git a/autoconf/tea/aclocal.m4 b/autoconf/tea/aclocal.m4 deleted file mode 100644 index 0b057391d2..0000000000 --- a/autoconf/tea/aclocal.m4 +++ /dev/null @@ -1,9 +0,0 @@ -# -# Include the TEA standard macro set -# - -builtin(include,tclconfig/tcl.m4) - -# -# Add here whatever m4 macros you want to define for your package -# diff --git a/autoconf/tea/auto.def b/autoconf/tea/auto.def new file mode 100644 index 0000000000..7170b3d1fe --- /dev/null +++ b/autoconf/tea/auto.def @@ -0,0 +1,8 @@ +#/do/not/tclsh +# ^^^ help out editors which guess this file's content type. +# +# Main configure script entry point for the TEA(ish) framework. All +# extension-specific customization goes in teaish.tcl.in or +# teaish.tcl. +use teaish/core +teaish-configure-core diff --git a/autoconf/tea/configure b/autoconf/tea/configure new file mode 100755 index 0000000000..01b3abcc2f --- /dev/null +++ b/autoconf/tea/configure @@ -0,0 +1,20 @@ +#!/bin/sh +# Look for and run autosetup... +dir0="`dirname "$0"`" +dirA="$dir0" +if [ -d $dirA/autosetup ]; then + # A local copy of autosetup + dirA=$dirA/autosetup +elif [ -d $dirA/../autosetup ]; then + # SQLite "autoconf" bundle + dirA=$dirA/../autosetup +elif [ -d $dirA/../../autosetup ]; then + # SQLite canonical source tree + dirA=$dirA/../../autosetup +else + echo "$0: Cannot find autosetup" 1>&2 + exit 1 +fi +WRAPPER="$0"; export WRAPPER; exec "`"$dirA/autosetup-find-tclsh"`" \ + "$dirA/autosetup" --teaish-extension-dir="$dir0" \ + "$@" diff --git a/autoconf/tea/configure.ac b/autoconf/tea/configure.ac deleted file mode 100644 index dae94f512e..0000000000 --- a/autoconf/tea/configure.ac +++ /dev/null @@ -1,201 +0,0 @@ -#!/bin/bash -norc -dnl This file is an input file used by the GNU "autoconf" program to -dnl generate the file "configure", which is run during Tcl installation -dnl to configure the system for the local environment. -# -# RCS: @(#) $Id: configure.in,v 1.43 2005/07/26 19:17:05 mdejong Exp $ - -#----------------------------------------------------------------------- -# Sample configure.in for Tcl Extensions. The only places you should -# need to modify this file are marked by the string __CHANGE__ -#----------------------------------------------------------------------- - -#----------------------------------------------------------------------- -# __CHANGE__ -# Set your package name and version numbers here. -# -# This initializes the environment with PACKAGE_NAME and PACKAGE_VERSION -# set as provided. These will also be added as -D defs in your Makefile -# so you can encode the package version directly into the source files. -#----------------------------------------------------------------------- - -AC_INIT([sqlite], [3.32.0]) - -#-------------------------------------------------------------------- -# Call TEA_INIT as the first TEA_ macro to set up initial vars. -# This will define a ${TEA_PLATFORM} variable == "unix" or "windows" -# as well as PKG_LIB_FILE and PKG_STUB_LIB_FILE. -#-------------------------------------------------------------------- - -TEA_INIT([3.9]) - -AC_CONFIG_AUX_DIR(tclconfig) - -#-------------------------------------------------------------------- -# Load the tclConfig.sh file -#-------------------------------------------------------------------- - -TEA_PATH_TCLCONFIG -TEA_LOAD_TCLCONFIG - -#-------------------------------------------------------------------- -# Load the tkConfig.sh file if necessary (Tk extension) -#-------------------------------------------------------------------- - -#TEA_PATH_TKCONFIG -#TEA_LOAD_TKCONFIG - -#----------------------------------------------------------------------- -# Handle the --prefix=... option by defaulting to what Tcl gave. -# Must be called after TEA_LOAD_TCLCONFIG and before TEA_SETUP_COMPILER. -#----------------------------------------------------------------------- - -TEA_PREFIX - -#----------------------------------------------------------------------- -# Standard compiler checks. -# This sets up CC by using the CC env var, or looks for gcc otherwise. -# This also calls AC_PROG_CC, AC_PROG_INSTALL and a few others to create -# the basic setup necessary to compile executables. -#----------------------------------------------------------------------- - -TEA_SETUP_COMPILER - -#----------------------------------------------------------------------- -# __CHANGE__ -# Specify the C source files to compile in TEA_ADD_SOURCES, -# public headers that need to be installed in TEA_ADD_HEADERS, -# stub library C source files to compile in TEA_ADD_STUB_SOURCES, -# and runtime Tcl library files in TEA_ADD_TCL_SOURCES. -# This defines PKG(_STUB)_SOURCES, PKG(_STUB)_OBJECTS, PKG_HEADERS -# and PKG_TCL_SOURCES. -#----------------------------------------------------------------------- - -TEA_ADD_SOURCES([tclsqlite3.c]) -TEA_ADD_HEADERS([]) -TEA_ADD_INCLUDES([-I\"`\${CYGPATH} \${srcdir}/generic`\"]) -TEA_ADD_LIBS([]) -TEA_ADD_CFLAGS([-DSQLITE_ENABLE_FTS3=1]) -TEA_ADD_CFLAGS([-DSQLITE_3_SUFFIX_ONLY=1]) -TEA_ADD_CFLAGS([-DSQLITE_ENABLE_RTREE=1]) -TEA_ADD_STUB_SOURCES([]) -TEA_ADD_TCL_SOURCES([]) - -#-------------------------------------------------------------------- -# The --with-system-sqlite causes the TCL bindings to SQLite to use -# the system shared library for SQLite rather than statically linking -# against its own private copy. This is dangerous and leads to -# undersirable dependences and is not recommended. -# Patchs from rmax. -#-------------------------------------------------------------------- -AC_ARG_WITH([system-sqlite], - [AC_HELP_STRING([--with-system-sqlite], - [use a system-supplied libsqlite3 instead of the bundled one])], - [], [with_system_sqlite=no]) -if test x$with_system_sqlite != xno; then - AC_CHECK_HEADER([sqlite3.h], - [AC_CHECK_LIB([sqlite3],[sqlite3_initialize], - [AC_DEFINE(USE_SYSTEM_SQLITE) - LIBS="$LIBS -lsqlite3"])]) -fi - -#-------------------------------------------------------------------- -# __CHANGE__ -# Choose which headers you need. Extension authors should try very -# hard to only rely on the Tcl public header files. Internal headers -# contain private data structures and are subject to change without -# notice. -# This MUST be called after TEA_LOAD_TCLCONFIG / TEA_LOAD_TKCONFIG -#-------------------------------------------------------------------- - -TEA_PUBLIC_TCL_HEADERS -#TEA_PRIVATE_TCL_HEADERS - -#TEA_PUBLIC_TK_HEADERS -#TEA_PRIVATE_TK_HEADERS -#TEA_PATH_X - -#-------------------------------------------------------------------- -# Check whether --enable-threads or --disable-threads was given. -# This auto-enables if Tcl was compiled threaded. -#-------------------------------------------------------------------- - -TEA_ENABLE_THREADS -if test "${TCL_THREADS}" = "1" ; then - AC_DEFINE(SQLITE_THREADSAFE, 1, [Trigger sqlite threadsafe build]) - # Not automatically added by Tcl because its assumed Tcl links to them, - # but it may not if it isn't really a threaded build. - TEA_ADD_LIBS([$THREADS_LIBS]) -else - AC_DEFINE(SQLITE_THREADSAFE, 0, [Trigger sqlite non-threadsafe build]) -fi - -#-------------------------------------------------------------------- -# The statement below defines a collection of symbols related to -# building as a shared library instead of a static library. -#-------------------------------------------------------------------- - -TEA_ENABLE_SHARED - -#-------------------------------------------------------------------- -# This macro figures out what flags to use with the compiler/linker -# when building shared/static debug/optimized objects. This information -# can be taken from the tclConfig.sh file, but this figures it all out. -#-------------------------------------------------------------------- - -TEA_CONFIG_CFLAGS - -#-------------------------------------------------------------------- -# Set the default compiler switches based on the --enable-symbols option. -#-------------------------------------------------------------------- - -TEA_ENABLE_SYMBOLS - -#-------------------------------------------------------------------- -# Everyone should be linking against the Tcl stub library. If you -# can't for some reason, remove this definition. If you aren't using -# stubs, you also need to modify the SHLIB_LD_LIBS setting below to -# link against the non-stubbed Tcl library. Add Tk too if necessary. -#-------------------------------------------------------------------- - -AC_DEFINE(USE_TCL_STUBS, 1, [Use Tcl stubs]) -#AC_DEFINE(USE_TK_STUBS, 1, [Use Tk stubs]) - - -#-------------------------------------------------------------------- -# Redefine fdatasync as fsync on systems that lack fdatasync -#-------------------------------------------------------------------- -# -#AC_CHECK_FUNC(fdatasync, , AC_DEFINE(fdatasync, fsync)) -# Check for library functions that SQLite can optionally use. -AC_CHECK_FUNCS([fdatasync usleep fullfsync localtime_r gmtime_r]) - -AC_FUNC_STRERROR_R - - -#-------------------------------------------------------------------- -# This macro generates a line to use when building a library. It -# depends on values set by the TEA_ENABLE_SHARED, TEA_ENABLE_SYMBOLS, -# and TEA_LOAD_TCLCONFIG macros above. -#-------------------------------------------------------------------- - -TEA_MAKE_LIB - -#-------------------------------------------------------------------- -# Determine the name of the tclsh and/or wish executables in the -# Tcl and Tk build directories or the location they were installed -# into. These paths are used to support running test cases only, -# the Makefile should not be making use of these paths to generate -# a pkgIndex.tcl file or anything else at extension build time. -#-------------------------------------------------------------------- - -TEA_PROG_TCLSH -#TEA_PROG_WISH - -#-------------------------------------------------------------------- -# Finally, substitute all of the various values into the Makefile. -# You may alternatively have a special pkgIndex.tcl.in or other files -# which require substituting th AC variables in. Include these here. -#-------------------------------------------------------------------- - -AC_OUTPUT([Makefile pkgIndex.tcl]) diff --git a/autoconf/tea/doc/sqlite3.n b/autoconf/tea/doc/sqlite3.n deleted file mode 100644 index 13913e5583..0000000000 --- a/autoconf/tea/doc/sqlite3.n +++ /dev/null @@ -1,15 +0,0 @@ -.TH sqlite3 n 4.1 "Tcl-Extensions" -.HS sqlite3 tcl -.BS -.SH NAME -sqlite3 \- an interface to the SQLite3 database engine -.SH SYNOPSIS -\fBsqlite3\fI command_name ?filename?\fR -.br -.SH DESCRIPTION -SQLite3 is a self-contains, zero-configuration, transactional SQL database -engine. This extension provides an easy to use interface for accessing -SQLite database files from Tcl. -.PP -For full documentation see \fIhttp://www.sqlite.org/\fR and -in particular \fIhttp://www.sqlite.org/tclsqlite.html\fR. diff --git a/autoconf/tea/pkgIndex.tcl.in b/autoconf/tea/pkgIndex.tcl.in index bc585f73b3..c93fcc6854 100644 --- a/autoconf/tea/pkgIndex.tcl.in +++ b/autoconf/tea/pkgIndex.tcl.in @@ -1,7 +1,40 @@ -# +# -*- tcl -*- # Tcl package index file # -# Note sqlite*3* init specifically +# Unless this file is named pkgIndex.tcl.in, you are probably looking +# at an automatically generated/filtered copy and should probably not +# edit it. # -package ifneeded sqlite3 @PACKAGE_VERSION@ \ - [list load [file join $dir @PKG_LIB_FILE@] Sqlite3] +# Adapted from https://core.tcl-lang.org/tcltls +@if TEAISH_VSATISFIES_CODE +@TEAISH_VSATISFIES_CODE@ +@endif +if {[package vsatisfies [package provide Tcl] 9.0-]} { + package ifneeded {@TEAISH_PKGNAME@} {@TEAISH_VERSION@} [list apply {{dir} { +@if TEAISH_ENABLE_DLL + load [file join $dir {@TEAISH_DLL9@}] @TEAISH_LOAD_PREFIX@ +@endif +@if TEAISH_PKGINIT_TCL_TAIL + set initScript [file join $dir {@TEAISH_PKGINIT_TCL_TAIL@}] + if {[file exists $initScript]} { + source -encoding utf-8 $initScript + } +@endif + }} $dir] +} else { + package ifneeded {@TEAISH_PKGNAME@} {@TEAISH_VERSION@} [list apply {{dir} { +@if TEAISH_ENABLE_DLL + if {[string tolower [file extension {@TEAISH_DLL8@}]] in [list .dll .dylib .so]} { + load [file join $dir {@TEAISH_DLL8@}] @TEAISH_LOAD_PREFIX@ + } else { + load {} @TEAISH_LOAD_PREFIX@ + } +@endif +@if TEAISH_PKGINIT_TCL_TAIL + set initScript [file join $dir {@TEAISH_PKGINIT_TCL_TAIL@}] + if {[file exists $initScript]} { + source -encoding utf-8 $initScript + } +@endif + }} $dir] +} diff --git a/autoconf/tea/tclconfig/install-sh b/autoconf/tea/tclconfig/install-sh deleted file mode 100644 index 7c34c3f926..0000000000 --- a/autoconf/tea/tclconfig/install-sh +++ /dev/null @@ -1,528 +0,0 @@ -#!/bin/sh -# install - install a program, script, or datafile - -scriptversion=2011-04-20.01; # UTC - -# This originates from X11R5 (mit/util/scripts/install.sh), which was -# later released in X11R6 (xc/config/util/install.sh) with the -# following copyright and license. -# -# Copyright (C) 1994 X Consortium -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to -# deal in the Software without restriction, including without limitation the -# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -# sell copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# X CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN -# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNEC- -# TION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -# -# Except as contained in this notice, the name of the X Consortium shall not -# be used in advertising or otherwise to promote the sale, use or other deal- -# ings in this Software without prior written authorization from the X Consor- -# tium. -# -# -# FSF changes to this file are in the public domain. -# -# Calling this script install-sh is preferred over install.sh, to prevent -# `make' implicit rules from creating a file called install from it -# when there is no Makefile. -# -# This script is compatible with the BSD install script, but was written -# from scratch. - -nl=' -' -IFS=" "" $nl" - -# set DOITPROG to echo to test this script - -# Don't use :- since 4.3BSD and earlier shells don't like it. -doit=${DOITPROG-} -if test -z "$doit"; then - doit_exec=exec -else - doit_exec=$doit -fi - -# Put in absolute file names if you don't have them in your path; -# or use environment vars. - -chgrpprog=${CHGRPPROG-chgrp} -chmodprog=${CHMODPROG-chmod} -chownprog=${CHOWNPROG-chown} -cmpprog=${CMPPROG-cmp} -cpprog=${CPPROG-cp} -mkdirprog=${MKDIRPROG-mkdir} -mvprog=${MVPROG-mv} -rmprog=${RMPROG-rm} -stripprog=${STRIPPROG-strip} - -posix_glob='?' -initialize_posix_glob=' - test "$posix_glob" != "?" || { - if (set -f) 2>/dev/null; then - posix_glob= - else - posix_glob=: - fi - } -' - -posix_mkdir= - -# Desired mode of installed file. -mode=0755 - -chgrpcmd= -chmodcmd=$chmodprog -chowncmd= -mvcmd=$mvprog -rmcmd="$rmprog -f" -stripcmd= - -src= -dst= -dir_arg= -dst_arg= - -copy_on_change=false -no_target_directory= - -usage="\ -Usage: $0 [OPTION]... [-T] SRCFILE DSTFILE - or: $0 [OPTION]... SRCFILES... DIRECTORY - or: $0 [OPTION]... -t DIRECTORY SRCFILES... - or: $0 [OPTION]... -d DIRECTORIES... - -In the 1st form, copy SRCFILE to DSTFILE. -In the 2nd and 3rd, copy all SRCFILES to DIRECTORY. -In the 4th, create DIRECTORIES. - -Options: - --help display this help and exit. - --version display version info and exit. - - -c (ignored) - -C install only if different (preserve the last data modification time) - -d create directories instead of installing files. - -g GROUP $chgrpprog installed files to GROUP. - -m MODE $chmodprog installed files to MODE. - -o USER $chownprog installed files to USER. - -s $stripprog installed files. - -S $stripprog installed files. - -t DIRECTORY install into DIRECTORY. - -T report an error if DSTFILE is a directory. - -Environment variables override the default commands: - CHGRPPROG CHMODPROG CHOWNPROG CMPPROG CPPROG MKDIRPROG MVPROG - RMPROG STRIPPROG -" - -while test $# -ne 0; do - case $1 in - -c) ;; - - -C) copy_on_change=true;; - - -d) dir_arg=true;; - - -g) chgrpcmd="$chgrpprog $2" - shift;; - - --help) echo "$usage"; exit $?;; - - -m) mode=$2 - case $mode in - *' '* | *' '* | *' -'* | *'*'* | *'?'* | *'['*) - echo "$0: invalid mode: $mode" >&2 - exit 1;; - esac - shift;; - - -o) chowncmd="$chownprog $2" - shift;; - - -s) stripcmd=$stripprog;; - - -S) stripcmd="$stripprog $2" - shift;; - - -t) dst_arg=$2 - shift;; - - -T) no_target_directory=true;; - - --version) echo "$0 $scriptversion"; exit $?;; - - --) shift - break;; - - -*) echo "$0: invalid option: $1" >&2 - exit 1;; - - *) break;; - esac - shift -done - -if test $# -ne 0 && test -z "$dir_arg$dst_arg"; then - # When -d is used, all remaining arguments are directories to create. - # When -t is used, the destination is already specified. - # Otherwise, the last argument is the destination. Remove it from $@. - for arg - do - if test -n "$dst_arg"; then - # $@ is not empty: it contains at least $arg. - set fnord "$@" "$dst_arg" - shift # fnord - fi - shift # arg - dst_arg=$arg - done -fi - -if test $# -eq 0; then - if test -z "$dir_arg"; then - echo "$0: no input file specified." >&2 - exit 1 - fi - # It's OK to call `install-sh -d' without argument. - # This can happen when creating conditional directories. - exit 0 -fi - -if test -z "$dir_arg"; then - do_exit='(exit $ret); exit $ret' - trap "ret=129; $do_exit" 1 - trap "ret=130; $do_exit" 2 - trap "ret=141; $do_exit" 13 - trap "ret=143; $do_exit" 15 - - # Set umask so as not to create temps with too-generous modes. - # However, 'strip' requires both read and write access to temps. - case $mode in - # Optimize common cases. - *644) cp_umask=133;; - *755) cp_umask=22;; - - *[0-7]) - if test -z "$stripcmd"; then - u_plus_rw= - else - u_plus_rw='% 200' - fi - cp_umask=`expr '(' 777 - $mode % 1000 ')' $u_plus_rw`;; - *) - if test -z "$stripcmd"; then - u_plus_rw= - else - u_plus_rw=,u+rw - fi - cp_umask=$mode$u_plus_rw;; - esac -fi - -for src -do - # Protect names starting with `-'. - case $src in - -*) src=./$src;; - esac - - if test -n "$dir_arg"; then - dst=$src - dstdir=$dst - test -d "$dstdir" - dstdir_status=$? - else - - # Waiting for this to be detected by the "$cpprog $src $dsttmp" command - # might cause directories to be created, which would be especially bad - # if $src (and thus $dsttmp) contains '*'. - if test ! -f "$src" && test ! -d "$src"; then - echo "$0: $src does not exist." >&2 - exit 1 - fi - - if test -z "$dst_arg"; then - echo "$0: no destination specified." >&2 - exit 1 - fi - - dst=$dst_arg - # Protect names starting with `-'. - case $dst in - -*) dst=./$dst;; - esac - - # If destination is a directory, append the input filename; won't work - # if double slashes aren't ignored. - if test -d "$dst"; then - if test -n "$no_target_directory"; then - echo "$0: $dst_arg: Is a directory" >&2 - exit 1 - fi - dstdir=$dst - dst=$dstdir/`basename "$src"` - dstdir_status=0 - else - # Prefer dirname, but fall back on a substitute if dirname fails. - dstdir=` - (dirname "$dst") 2>/dev/null || - expr X"$dst" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$dst" : 'X\(//\)[^/]' \| \ - X"$dst" : 'X\(//\)$' \| \ - X"$dst" : 'X\(/\)' \| . 2>/dev/null || - echo X"$dst" | - sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ - s//\1/ - q - } - /^X\(\/\/\)[^/].*/{ - s//\1/ - q - } - /^X\(\/\/\)$/{ - s//\1/ - q - } - /^X\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q' - ` - - test -d "$dstdir" - dstdir_status=$? - fi - fi - - obsolete_mkdir_used=false - - if test $dstdir_status != 0; then - case $posix_mkdir in - '') - # Create intermediate dirs using mode 755 as modified by the umask. - # This is like FreeBSD 'install' as of 1997-10-28. - umask=`umask` - case $stripcmd.$umask in - # Optimize common cases. - *[2367][2367]) mkdir_umask=$umask;; - .*0[02][02] | .[02][02] | .[02]) mkdir_umask=22;; - - *[0-7]) - mkdir_umask=`expr $umask + 22 \ - - $umask % 100 % 40 + $umask % 20 \ - - $umask % 10 % 4 + $umask % 2 - `;; - *) mkdir_umask=$umask,go-w;; - esac - - # With -d, create the new directory with the user-specified mode. - # Otherwise, rely on $mkdir_umask. - if test -n "$dir_arg"; then - mkdir_mode=-m$mode - else - mkdir_mode= - fi - - posix_mkdir=false - case $umask in - *[123567][0-7][0-7]) - # POSIX mkdir -p sets u+wx bits regardless of umask, which - # is incompatible with FreeBSD 'install' when (umask & 300) != 0. - ;; - *) - tmpdir=${TMPDIR-/tmp}/ins$RANDOM-$$ - trap 'ret=$?; rmdir "$tmpdir/d" "$tmpdir" 2>/dev/null; exit $ret' 0 - - if (umask $mkdir_umask && - exec $mkdirprog $mkdir_mode -p -- "$tmpdir/d") >/dev/null 2>&1 - then - if test -z "$dir_arg" || { - # Check for POSIX incompatibilities with -m. - # HP-UX 11.23 and IRIX 6.5 mkdir -m -p sets group- or - # other-writeable bit of parent directory when it shouldn't. - # FreeBSD 6.1 mkdir -m -p sets mode of existing directory. - ls_ld_tmpdir=`ls -ld "$tmpdir"` - case $ls_ld_tmpdir in - d????-?r-*) different_mode=700;; - d????-?--*) different_mode=755;; - *) false;; - esac && - $mkdirprog -m$different_mode -p -- "$tmpdir" && { - ls_ld_tmpdir_1=`ls -ld "$tmpdir"` - test "$ls_ld_tmpdir" = "$ls_ld_tmpdir_1" - } - } - then posix_mkdir=: - fi - rmdir "$tmpdir/d" "$tmpdir" - else - # Remove any dirs left behind by ancient mkdir implementations. - rmdir ./$mkdir_mode ./-p ./-- 2>/dev/null - fi - trap '' 0;; - esac;; - esac - - if - $posix_mkdir && ( - umask $mkdir_umask && - $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir" - ) - then : - else - - # The umask is ridiculous, or mkdir does not conform to POSIX, - # or it failed possibly due to a race condition. Create the - # directory the slow way, step by step, checking for races as we go. - - case $dstdir in - /*) prefix='/';; - -*) prefix='./';; - *) prefix='';; - esac - - eval "$initialize_posix_glob" - - oIFS=$IFS - IFS=/ - $posix_glob set -f - set fnord $dstdir - shift - $posix_glob set +f - IFS=$oIFS - - prefixes= - - for d - do - test -z "$d" && continue - - prefix=$prefix$d - if test -d "$prefix"; then - prefixes= - else - if $posix_mkdir; then - (umask=$mkdir_umask && - $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir") && break - # Don't fail if two instances are running concurrently. - test -d "$prefix" || exit 1 - else - case $prefix in - *\'*) qprefix=`echo "$prefix" | sed "s/'/'\\\\\\\\''/g"`;; - *) qprefix=$prefix;; - esac - prefixes="$prefixes '$qprefix'" - fi - fi - prefix=$prefix/ - done - - if test -n "$prefixes"; then - # Don't fail if two instances are running concurrently. - (umask $mkdir_umask && - eval "\$doit_exec \$mkdirprog $prefixes") || - test -d "$dstdir" || exit 1 - obsolete_mkdir_used=true - fi - fi - fi - - if test -n "$dir_arg"; then - { test -z "$chowncmd" || $doit $chowncmd "$dst"; } && - { test -z "$chgrpcmd" || $doit $chgrpcmd "$dst"; } && - { test "$obsolete_mkdir_used$chowncmd$chgrpcmd" = false || - test -z "$chmodcmd" || $doit $chmodcmd $mode "$dst"; } || exit 1 - else - - # Make a couple of temp file names in the proper directory. - dsttmp=$dstdir/_inst.$$_ - rmtmp=$dstdir/_rm.$$_ - - # Trap to clean up those temp files at exit. - trap 'ret=$?; rm -f "$dsttmp" "$rmtmp" && exit $ret' 0 - - # Copy the file name to the temp name. - (umask $cp_umask && $doit_exec $cpprog "$src" "$dsttmp") && - - # and set any options; do chmod last to preserve setuid bits. - # - # If any of these fail, we abort the whole thing. If we want to - # ignore errors from any of these, just make sure not to ignore - # errors from the above "$doit $cpprog $src $dsttmp" command. - # - { test -z "$chowncmd" || $doit $chowncmd "$dsttmp"; } && - { test -z "$chgrpcmd" || $doit $chgrpcmd "$dsttmp"; } && - { test -z "$stripcmd" || $doit $stripcmd "$dsttmp"; } && - { test -z "$chmodcmd" || $doit $chmodcmd $mode "$dsttmp"; } && - - # If -C, don't bother to copy if it wouldn't change the file. - if $copy_on_change && - old=`LC_ALL=C ls -dlL "$dst" 2>/dev/null` && - new=`LC_ALL=C ls -dlL "$dsttmp" 2>/dev/null` && - - eval "$initialize_posix_glob" && - $posix_glob set -f && - set X $old && old=:$2:$4:$5:$6 && - set X $new && new=:$2:$4:$5:$6 && - $posix_glob set +f && - - test "$old" = "$new" && - $cmpprog "$dst" "$dsttmp" >/dev/null 2>&1 - then - rm -f "$dsttmp" - else - # Rename the file to the real destination. - $doit $mvcmd -f "$dsttmp" "$dst" 2>/dev/null || - - # The rename failed, perhaps because mv can't rename something else - # to itself, or perhaps because mv is so ancient that it does not - # support -f. - { - # Now remove or move aside any old file at destination location. - # We try this two ways since rm can't unlink itself on some - # systems and the destination file might be busy for other - # reasons. In this case, the final cleanup might fail but the new - # file should still install successfully. - { - test ! -f "$dst" || - $doit $rmcmd -f "$dst" 2>/dev/null || - { $doit $mvcmd -f "$dst" "$rmtmp" 2>/dev/null && - { $doit $rmcmd -f "$rmtmp" 2>/dev/null; :; } - } || - { echo "$0: cannot unlink or rename $dst" >&2 - (exit 1); exit 1 - } - } && - - # Now rename the file to the real destination. - $doit $mvcmd "$dsttmp" "$dst" - } - fi || exit 1 - - trap '' 0 - fi -done - -# Local variables: -# eval: (add-hook 'write-file-hooks 'time-stamp) -# time-stamp-start: "scriptversion=" -# time-stamp-format: "%:y-%02m-%02d.%02H" -# time-stamp-time-zone: "UTC" -# time-stamp-end: "; # UTC" -# End: diff --git a/autoconf/tea/tclconfig/tcl.m4 b/autoconf/tea/tclconfig/tcl.m4 deleted file mode 100644 index 4b4bd1e888..0000000000 --- a/autoconf/tea/tclconfig/tcl.m4 +++ /dev/null @@ -1,4168 +0,0 @@ -# tcl.m4 -- -# -# This file provides a set of autoconf macros to help TEA-enable -# a Tcl extension. -# -# Copyright (c) 1999-2000 Ajuba Solutions. -# Copyright (c) 2002-2005 ActiveState Corporation. -# -# See the file "license.terms" for information on usage and redistribution -# of this file, and for a DISCLAIMER OF ALL WARRANTIES. - -AC_PREREQ(2.57) - -dnl TEA extensions pass us the version of TEA they think they -dnl are compatible with (must be set in TEA_INIT below) -dnl TEA_VERSION="3.9" - -# Possible values for key variables defined: -# -# TEA_WINDOWINGSYSTEM - win32 aqua x11 (mirrors 'tk windowingsystem') -# TEA_PLATFORM - windows unix -# - -#------------------------------------------------------------------------ -# TEA_PATH_TCLCONFIG -- -# -# Locate the tclConfig.sh file and perform a sanity check on -# the Tcl compile flags -# -# Arguments: -# none -# -# Results: -# -# Adds the following arguments to configure: -# --with-tcl=... -# -# Defines the following vars: -# TCL_BIN_DIR Full path to the directory containing -# the tclConfig.sh file -#------------------------------------------------------------------------ - -AC_DEFUN([TEA_PATH_TCLCONFIG], [ - dnl TEA specific: Make sure we are initialized - AC_REQUIRE([TEA_INIT]) - # - # Ok, lets find the tcl configuration - # First, look for one uninstalled. - # the alternative search directory is invoked by --with-tcl - # - - if test x"${no_tcl}" = x ; then - # we reset no_tcl in case something fails here - no_tcl=true - AC_ARG_WITH(tcl, - AC_HELP_STRING([--with-tcl], - [directory containing tcl configuration (tclConfig.sh)]), - with_tclconfig="${withval}") - AC_MSG_CHECKING([for Tcl configuration]) - AC_CACHE_VAL(ac_cv_c_tclconfig,[ - - # First check to see if --with-tcl was specified. - if test x"${with_tclconfig}" != x ; then - case "${with_tclconfig}" in - */tclConfig.sh ) - if test -f "${with_tclconfig}"; then - AC_MSG_WARN([--with-tcl argument should refer to directory containing tclConfig.sh, not to tclConfig.sh itself]) - with_tclconfig="`echo "${with_tclconfig}" | sed 's!/tclConfig\.sh$!!'`" - fi ;; - esac - if test -f "${with_tclconfig}/tclConfig.sh" ; then - ac_cv_c_tclconfig="`(cd "${with_tclconfig}"; pwd)`" - else - AC_MSG_ERROR([${with_tclconfig} directory doesn't contain tclConfig.sh]) - fi - fi - - # then check for a private Tcl installation - if test x"${ac_cv_c_tclconfig}" = x ; then - for i in \ - ../tcl \ - `ls -dr ../tcl[[8-9]].[[0-9]].[[0-9]]* 2>/dev/null` \ - `ls -dr ../tcl[[8-9]].[[0-9]] 2>/dev/null` \ - `ls -dr ../tcl[[8-9]].[[0-9]]* 2>/dev/null` \ - ../../tcl \ - `ls -dr ../../tcl[[8-9]].[[0-9]].[[0-9]]* 2>/dev/null` \ - `ls -dr ../../tcl[[8-9]].[[0-9]] 2>/dev/null` \ - `ls -dr ../../tcl[[8-9]].[[0-9]]* 2>/dev/null` \ - ../../../tcl \ - `ls -dr ../../../tcl[[8-9]].[[0-9]].[[0-9]]* 2>/dev/null` \ - `ls -dr ../../../tcl[[8-9]].[[0-9]] 2>/dev/null` \ - `ls -dr ../../../tcl[[8-9]].[[0-9]]* 2>/dev/null` ; do - if test "${TEA_PLATFORM}" = "windows" \ - -a -f "$i/win/tclConfig.sh" ; then - ac_cv_c_tclconfig="`(cd $i/win; pwd)`" - break - fi - if test -f "$i/unix/tclConfig.sh" ; then - ac_cv_c_tclconfig="`(cd $i/unix; pwd)`" - break - fi - done - fi - - # on Darwin, check in Framework installation locations - if test "`uname -s`" = "Darwin" -a x"${ac_cv_c_tclconfig}" = x ; then - for i in `ls -d ~/Library/Frameworks 2>/dev/null` \ - `ls -d /Library/Frameworks 2>/dev/null` \ - `ls -d /Network/Library/Frameworks 2>/dev/null` \ - `ls -d /System/Library/Frameworks 2>/dev/null` \ - ; do - if test -f "$i/Tcl.framework/tclConfig.sh" ; then - ac_cv_c_tclconfig="`(cd $i/Tcl.framework; pwd)`" - break - fi - done - fi - - # TEA specific: on Windows, check in common installation locations - if test "${TEA_PLATFORM}" = "windows" \ - -a x"${ac_cv_c_tclconfig}" = x ; then - for i in `ls -d C:/Tcl/lib 2>/dev/null` \ - `ls -d C:/Progra~1/Tcl/lib 2>/dev/null` \ - ; do - if test -f "$i/tclConfig.sh" ; then - ac_cv_c_tclconfig="`(cd $i; pwd)`" - break - fi - done - fi - - # check in a few common install locations - if test x"${ac_cv_c_tclconfig}" = x ; then - for i in `ls -d ${libdir} 2>/dev/null` \ - `ls -d ${exec_prefix}/lib 2>/dev/null` \ - `ls -d ${prefix}/lib 2>/dev/null` \ - `ls -d /usr/local/lib 2>/dev/null` \ - `ls -d /usr/contrib/lib 2>/dev/null` \ - `ls -d /usr/lib 2>/dev/null` \ - `ls -d /usr/lib64 2>/dev/null` \ - `ls -d /usr/lib/tcl8.6 2>/dev/null` \ - `ls -d /usr/lib/tcl8.5 2>/dev/null` \ - ; do - if test -f "$i/tclConfig.sh" ; then - ac_cv_c_tclconfig="`(cd $i; pwd)`" - break - fi - done - fi - - # check in a few other private locations - if test x"${ac_cv_c_tclconfig}" = x ; then - for i in \ - ${srcdir}/../tcl \ - `ls -dr ${srcdir}/../tcl[[8-9]].[[0-9]].[[0-9]]* 2>/dev/null` \ - `ls -dr ${srcdir}/../tcl[[8-9]].[[0-9]] 2>/dev/null` \ - `ls -dr ${srcdir}/../tcl[[8-9]].[[0-9]]* 2>/dev/null` ; do - if test "${TEA_PLATFORM}" = "windows" \ - -a -f "$i/win/tclConfig.sh" ; then - ac_cv_c_tclconfig="`(cd $i/win; pwd)`" - break - fi - if test -f "$i/unix/tclConfig.sh" ; then - ac_cv_c_tclconfig="`(cd $i/unix; pwd)`" - break - fi - done - fi - ]) - - if test x"${ac_cv_c_tclconfig}" = x ; then - TCL_BIN_DIR="# no Tcl configs found" - AC_MSG_ERROR([Can't find Tcl configuration definitions. Use --with-tcl to specify a directory containing tclConfig.sh]) - else - no_tcl= - TCL_BIN_DIR="${ac_cv_c_tclconfig}" - AC_MSG_RESULT([found ${TCL_BIN_DIR}/tclConfig.sh]) - fi - fi -]) - -#------------------------------------------------------------------------ -# TEA_PATH_TKCONFIG -- -# -# Locate the tkConfig.sh file -# -# Arguments: -# none -# -# Results: -# -# Adds the following arguments to configure: -# --with-tk=... -# -# Defines the following vars: -# TK_BIN_DIR Full path to the directory containing -# the tkConfig.sh file -#------------------------------------------------------------------------ - -AC_DEFUN([TEA_PATH_TKCONFIG], [ - # - # Ok, lets find the tk configuration - # First, look for one uninstalled. - # the alternative search directory is invoked by --with-tk - # - - if test x"${no_tk}" = x ; then - # we reset no_tk in case something fails here - no_tk=true - AC_ARG_WITH(tk, - AC_HELP_STRING([--with-tk], - [directory containing tk configuration (tkConfig.sh)]), - with_tkconfig="${withval}") - AC_MSG_CHECKING([for Tk configuration]) - AC_CACHE_VAL(ac_cv_c_tkconfig,[ - - # First check to see if --with-tkconfig was specified. - if test x"${with_tkconfig}" != x ; then - case "${with_tkconfig}" in - */tkConfig.sh ) - if test -f "${with_tkconfig}"; then - AC_MSG_WARN([--with-tk argument should refer to directory containing tkConfig.sh, not to tkConfig.sh itself]) - with_tkconfig="`echo "${with_tkconfig}" | sed 's!/tkConfig\.sh$!!'`" - fi ;; - esac - if test -f "${with_tkconfig}/tkConfig.sh" ; then - ac_cv_c_tkconfig="`(cd "${with_tkconfig}"; pwd)`" - else - AC_MSG_ERROR([${with_tkconfig} directory doesn't contain tkConfig.sh]) - fi - fi - - # then check for a private Tk library - if test x"${ac_cv_c_tkconfig}" = x ; then - for i in \ - ../tk \ - `ls -dr ../tk[[8-9]].[[0-9]].[[0-9]]* 2>/dev/null` \ - `ls -dr ../tk[[8-9]].[[0-9]] 2>/dev/null` \ - `ls -dr ../tk[[8-9]].[[0-9]]* 2>/dev/null` \ - ../../tk \ - `ls -dr ../../tk[[8-9]].[[0-9]].[[0-9]]* 2>/dev/null` \ - `ls -dr ../../tk[[8-9]].[[0-9]] 2>/dev/null` \ - `ls -dr ../../tk[[8-9]].[[0-9]]* 2>/dev/null` \ - ../../../tk \ - `ls -dr ../../../tk[[8-9]].[[0-9]].[[0-9]]* 2>/dev/null` \ - `ls -dr ../../../tk[[8-9]].[[0-9]] 2>/dev/null` \ - `ls -dr ../../../tk[[8-9]].[[0-9]]* 2>/dev/null` ; do - if test "${TEA_PLATFORM}" = "windows" \ - -a -f "$i/win/tkConfig.sh" ; then - ac_cv_c_tkconfig="`(cd $i/win; pwd)`" - break - fi - if test -f "$i/unix/tkConfig.sh" ; then - ac_cv_c_tkconfig="`(cd $i/unix; pwd)`" - break - fi - done - fi - - # on Darwin, check in Framework installation locations - if test "`uname -s`" = "Darwin" -a x"${ac_cv_c_tkconfig}" = x ; then - for i in `ls -d ~/Library/Frameworks 2>/dev/null` \ - `ls -d /Library/Frameworks 2>/dev/null` \ - `ls -d /Network/Library/Frameworks 2>/dev/null` \ - `ls -d /System/Library/Frameworks 2>/dev/null` \ - ; do - if test -f "$i/Tk.framework/tkConfig.sh" ; then - ac_cv_c_tkconfig="`(cd $i/Tk.framework; pwd)`" - break - fi - done - fi - - # check in a few common install locations - if test x"${ac_cv_c_tkconfig}" = x ; then - for i in `ls -d ${libdir} 2>/dev/null` \ - `ls -d ${exec_prefix}/lib 2>/dev/null` \ - `ls -d ${prefix}/lib 2>/dev/null` \ - `ls -d /usr/local/lib 2>/dev/null` \ - `ls -d /usr/contrib/lib 2>/dev/null` \ - `ls -d /usr/lib 2>/dev/null` \ - `ls -d /usr/lib64 2>/dev/null` \ - ; do - if test -f "$i/tkConfig.sh" ; then - ac_cv_c_tkconfig="`(cd $i; pwd)`" - break - fi - done - fi - - # TEA specific: on Windows, check in common installation locations - if test "${TEA_PLATFORM}" = "windows" \ - -a x"${ac_cv_c_tkconfig}" = x ; then - for i in `ls -d C:/Tcl/lib 2>/dev/null` \ - `ls -d C:/Progra~1/Tcl/lib 2>/dev/null` \ - ; do - if test -f "$i/tkConfig.sh" ; then - ac_cv_c_tkconfig="`(cd $i; pwd)`" - break - fi - done - fi - - # check in a few other private locations - if test x"${ac_cv_c_tkconfig}" = x ; then - for i in \ - ${srcdir}/../tk \ - `ls -dr ${srcdir}/../tk[[8-9]].[[0-9]].[[0-9]]* 2>/dev/null` \ - `ls -dr ${srcdir}/../tk[[8-9]].[[0-9]] 2>/dev/null` \ - `ls -dr ${srcdir}/../tk[[8-9]].[[0-9]]* 2>/dev/null` ; do - if test "${TEA_PLATFORM}" = "windows" \ - -a -f "$i/win/tkConfig.sh" ; then - ac_cv_c_tkconfig="`(cd $i/win; pwd)`" - break - fi - if test -f "$i/unix/tkConfig.sh" ; then - ac_cv_c_tkconfig="`(cd $i/unix; pwd)`" - break - fi - done - fi - ]) - - if test x"${ac_cv_c_tkconfig}" = x ; then - TK_BIN_DIR="# no Tk configs found" - AC_MSG_ERROR([Can't find Tk configuration definitions. Use --with-tk to specify a directory containing tkConfig.sh]) - else - no_tk= - TK_BIN_DIR="${ac_cv_c_tkconfig}" - AC_MSG_RESULT([found ${TK_BIN_DIR}/tkConfig.sh]) - fi - fi -]) - -#------------------------------------------------------------------------ -# TEA_LOAD_TCLCONFIG -- -# -# Load the tclConfig.sh file -# -# Arguments: -# -# Requires the following vars to be set: -# TCL_BIN_DIR -# -# Results: -# -# Substitutes the following vars: -# TCL_BIN_DIR -# TCL_SRC_DIR -# TCL_LIB_FILE -#------------------------------------------------------------------------ - -AC_DEFUN([TEA_LOAD_TCLCONFIG], [ - AC_MSG_CHECKING([for existence of ${TCL_BIN_DIR}/tclConfig.sh]) - - if test -f "${TCL_BIN_DIR}/tclConfig.sh" ; then - AC_MSG_RESULT([loading]) - . "${TCL_BIN_DIR}/tclConfig.sh" - else - AC_MSG_RESULT([could not find ${TCL_BIN_DIR}/tclConfig.sh]) - fi - - # eval is required to do the TCL_DBGX substitution - eval "TCL_LIB_FILE=\"${TCL_LIB_FILE}\"" - eval "TCL_STUB_LIB_FILE=\"${TCL_STUB_LIB_FILE}\"" - - # If the TCL_BIN_DIR is the build directory (not the install directory), - # then set the common variable name to the value of the build variables. - # For example, the variable TCL_LIB_SPEC will be set to the value - # of TCL_BUILD_LIB_SPEC. An extension should make use of TCL_LIB_SPEC - # instead of TCL_BUILD_LIB_SPEC since it will work with both an - # installed and uninstalled version of Tcl. - if test -f "${TCL_BIN_DIR}/Makefile" ; then - TCL_LIB_SPEC="${TCL_BUILD_LIB_SPEC}" - TCL_STUB_LIB_SPEC="${TCL_BUILD_STUB_LIB_SPEC}" - TCL_STUB_LIB_PATH="${TCL_BUILD_STUB_LIB_PATH}" - elif test "`uname -s`" = "Darwin"; then - # If Tcl was built as a framework, attempt to use the libraries - # from the framework at the given location so that linking works - # against Tcl.framework installed in an arbitrary location. - case ${TCL_DEFS} in - *TCL_FRAMEWORK*) - if test -f "${TCL_BIN_DIR}/${TCL_LIB_FILE}"; then - for i in "`cd "${TCL_BIN_DIR}"; pwd`" \ - "`cd "${TCL_BIN_DIR}"/../..; pwd`"; do - if test "`basename "$i"`" = "${TCL_LIB_FILE}.framework"; then - TCL_LIB_SPEC="-F`dirname "$i" | sed -e 's/ /\\\\ /g'` -framework ${TCL_LIB_FILE}" - break - fi - done - fi - if test -f "${TCL_BIN_DIR}/${TCL_STUB_LIB_FILE}"; then - TCL_STUB_LIB_SPEC="-L`echo "${TCL_BIN_DIR}" | sed -e 's/ /\\\\ /g'` ${TCL_STUB_LIB_FLAG}" - TCL_STUB_LIB_PATH="${TCL_BIN_DIR}/${TCL_STUB_LIB_FILE}" - fi - ;; - esac - fi - - # eval is required to do the TCL_DBGX substitution - eval "TCL_LIB_FLAG=\"${TCL_LIB_FLAG}\"" - eval "TCL_LIB_SPEC=\"${TCL_LIB_SPEC}\"" - eval "TCL_STUB_LIB_FLAG=\"${TCL_STUB_LIB_FLAG}\"" - eval "TCL_STUB_LIB_SPEC=\"${TCL_STUB_LIB_SPEC}\"" - - AC_SUBST(TCL_VERSION) - AC_SUBST(TCL_PATCH_LEVEL) - AC_SUBST(TCL_BIN_DIR) - AC_SUBST(TCL_SRC_DIR) - - AC_SUBST(TCL_LIB_FILE) - AC_SUBST(TCL_LIB_FLAG) - AC_SUBST(TCL_LIB_SPEC) - - AC_SUBST(TCL_STUB_LIB_FILE) - AC_SUBST(TCL_STUB_LIB_FLAG) - AC_SUBST(TCL_STUB_LIB_SPEC) - - AC_MSG_CHECKING([platform]) - hold_cc=$CC; CC="$TCL_CC" - AC_TRY_COMPILE(,[ - #ifdef _WIN32 - #error win32 - #endif - ], TEA_PLATFORM="unix", - TEA_PLATFORM="windows" - ) - CC=$hold_cc - AC_MSG_RESULT($TEA_PLATFORM) - - # The BUILD_$pkg is to define the correct extern storage class - # handling when making this package - AC_DEFINE_UNQUOTED(BUILD_${PACKAGE_NAME}, [], - [Building extension source?]) - # Do this here as we have fully defined TEA_PLATFORM now - if test "${TEA_PLATFORM}" = "windows" ; then - EXEEXT=".exe" - CLEANFILES="$CLEANFILES *.lib *.dll *.pdb *.exp" - fi - - # TEA specific: - AC_SUBST(CLEANFILES) - AC_SUBST(TCL_LIBS) - AC_SUBST(TCL_DEFS) - AC_SUBST(TCL_EXTRA_CFLAGS) - AC_SUBST(TCL_LD_FLAGS) - AC_SUBST(TCL_SHLIB_LD_LIBS) -]) - -#------------------------------------------------------------------------ -# TEA_LOAD_TKCONFIG -- -# -# Load the tkConfig.sh file -# -# Arguments: -# -# Requires the following vars to be set: -# TK_BIN_DIR -# -# Results: -# -# Sets the following vars that should be in tkConfig.sh: -# TK_BIN_DIR -#------------------------------------------------------------------------ - -AC_DEFUN([TEA_LOAD_TKCONFIG], [ - AC_MSG_CHECKING([for existence of ${TK_BIN_DIR}/tkConfig.sh]) - - if test -f "${TK_BIN_DIR}/tkConfig.sh" ; then - AC_MSG_RESULT([loading]) - . "${TK_BIN_DIR}/tkConfig.sh" - else - AC_MSG_RESULT([could not find ${TK_BIN_DIR}/tkConfig.sh]) - fi - - # eval is required to do the TK_DBGX substitution - eval "TK_LIB_FILE=\"${TK_LIB_FILE}\"" - eval "TK_STUB_LIB_FILE=\"${TK_STUB_LIB_FILE}\"" - - # If the TK_BIN_DIR is the build directory (not the install directory), - # then set the common variable name to the value of the build variables. - # For example, the variable TK_LIB_SPEC will be set to the value - # of TK_BUILD_LIB_SPEC. An extension should make use of TK_LIB_SPEC - # instead of TK_BUILD_LIB_SPEC since it will work with both an - # installed and uninstalled version of Tcl. - if test -f "${TK_BIN_DIR}/Makefile" ; then - TK_LIB_SPEC="${TK_BUILD_LIB_SPEC}" - TK_STUB_LIB_SPEC="${TK_BUILD_STUB_LIB_SPEC}" - TK_STUB_LIB_PATH="${TK_BUILD_STUB_LIB_PATH}" - elif test "`uname -s`" = "Darwin"; then - # If Tk was built as a framework, attempt to use the libraries - # from the framework at the given location so that linking works - # against Tk.framework installed in an arbitrary location. - case ${TK_DEFS} in - *TK_FRAMEWORK*) - if test -f "${TK_BIN_DIR}/${TK_LIB_FILE}"; then - for i in "`cd "${TK_BIN_DIR}"; pwd`" \ - "`cd "${TK_BIN_DIR}"/../..; pwd`"; do - if test "`basename "$i"`" = "${TK_LIB_FILE}.framework"; then - TK_LIB_SPEC="-F`dirname "$i" | sed -e 's/ /\\\\ /g'` -framework ${TK_LIB_FILE}" - break - fi - done - fi - if test -f "${TK_BIN_DIR}/${TK_STUB_LIB_FILE}"; then - TK_STUB_LIB_SPEC="-L` echo "${TK_BIN_DIR}" | sed -e 's/ /\\\\ /g'` ${TK_STUB_LIB_FLAG}" - TK_STUB_LIB_PATH="${TK_BIN_DIR}/${TK_STUB_LIB_FILE}" - fi - ;; - esac - fi - - # eval is required to do the TK_DBGX substitution - eval "TK_LIB_FLAG=\"${TK_LIB_FLAG}\"" - eval "TK_LIB_SPEC=\"${TK_LIB_SPEC}\"" - eval "TK_STUB_LIB_FLAG=\"${TK_STUB_LIB_FLAG}\"" - eval "TK_STUB_LIB_SPEC=\"${TK_STUB_LIB_SPEC}\"" - - # TEA specific: Ensure windowingsystem is defined - if test "${TEA_PLATFORM}" = "unix" ; then - case ${TK_DEFS} in - *MAC_OSX_TK*) - AC_DEFINE(MAC_OSX_TK, 1, [Are we building against Mac OS X TkAqua?]) - TEA_WINDOWINGSYSTEM="aqua" - ;; - *) - TEA_WINDOWINGSYSTEM="x11" - ;; - esac - elif test "${TEA_PLATFORM}" = "windows" ; then - TEA_WINDOWINGSYSTEM="win32" - fi - - AC_SUBST(TK_VERSION) - AC_SUBST(TK_BIN_DIR) - AC_SUBST(TK_SRC_DIR) - - AC_SUBST(TK_LIB_FILE) - AC_SUBST(TK_LIB_FLAG) - AC_SUBST(TK_LIB_SPEC) - - AC_SUBST(TK_STUB_LIB_FILE) - AC_SUBST(TK_STUB_LIB_FLAG) - AC_SUBST(TK_STUB_LIB_SPEC) - - # TEA specific: - AC_SUBST(TK_LIBS) - AC_SUBST(TK_XINCLUDES) -]) - -#------------------------------------------------------------------------ -# TEA_PROG_TCLSH -# Determine the fully qualified path name of the tclsh executable -# in the Tcl build directory or the tclsh installed in a bin -# directory. This macro will correctly determine the name -# of the tclsh executable even if tclsh has not yet been -# built in the build directory. The tclsh found is always -# associated with a tclConfig.sh file. This tclsh should be used -# only for running extension test cases. It should never be -# or generation of files (like pkgIndex.tcl) at build time. -# -# Arguments: -# none -# -# Results: -# Substitutes the following vars: -# TCLSH_PROG -#------------------------------------------------------------------------ - -AC_DEFUN([TEA_PROG_TCLSH], [ - AC_MSG_CHECKING([for tclsh]) - if test -f "${TCL_BIN_DIR}/Makefile" ; then - # tclConfig.sh is in Tcl build directory - if test "${TEA_PLATFORM}" = "windows"; then - TCLSH_PROG="${TCL_BIN_DIR}/tclsh${TCL_MAJOR_VERSION}${TCL_MINOR_VERSION}${TCL_DBGX}${EXEEXT}" - else - TCLSH_PROG="${TCL_BIN_DIR}/tclsh" - fi - else - # tclConfig.sh is in install location - if test "${TEA_PLATFORM}" = "windows"; then - TCLSH_PROG="tclsh${TCL_MAJOR_VERSION}${TCL_MINOR_VERSION}${TCL_DBGX}${EXEEXT}" - else - TCLSH_PROG="tclsh${TCL_MAJOR_VERSION}.${TCL_MINOR_VERSION}${TCL_DBGX}" - fi - list="`ls -d ${TCL_BIN_DIR}/../bin 2>/dev/null` \ - `ls -d ${TCL_BIN_DIR}/.. 2>/dev/null` \ - `ls -d ${TCL_PREFIX}/bin 2>/dev/null`" - for i in $list ; do - if test -f "$i/${TCLSH_PROG}" ; then - REAL_TCL_BIN_DIR="`cd "$i"; pwd`/" - break - fi - done - TCLSH_PROG="${REAL_TCL_BIN_DIR}${TCLSH_PROG}" - fi - AC_MSG_RESULT([${TCLSH_PROG}]) - AC_SUBST(TCLSH_PROG) -]) - -#------------------------------------------------------------------------ -# TEA_PROG_WISH -# Determine the fully qualified path name of the wish executable -# in the Tk build directory or the wish installed in a bin -# directory. This macro will correctly determine the name -# of the wish executable even if wish has not yet been -# built in the build directory. The wish found is always -# associated with a tkConfig.sh file. This wish should be used -# only for running extension test cases. It should never be -# or generation of files (like pkgIndex.tcl) at build time. -# -# Arguments: -# none -# -# Results: -# Substitutes the following vars: -# WISH_PROG -#------------------------------------------------------------------------ - -AC_DEFUN([TEA_PROG_WISH], [ - AC_MSG_CHECKING([for wish]) - if test -f "${TK_BIN_DIR}/Makefile" ; then - # tkConfig.sh is in Tk build directory - if test "${TEA_PLATFORM}" = "windows"; then - WISH_PROG="${TK_BIN_DIR}/wish${TK_MAJOR_VERSION}${TK_MINOR_VERSION}${TK_DBGX}${EXEEXT}" - else - WISH_PROG="${TK_BIN_DIR}/wish" - fi - else - # tkConfig.sh is in install location - if test "${TEA_PLATFORM}" = "windows"; then - WISH_PROG="wish${TK_MAJOR_VERSION}${TK_MINOR_VERSION}${TK_DBGX}${EXEEXT}" - else - WISH_PROG="wish${TK_MAJOR_VERSION}.${TK_MINOR_VERSION}${TK_DBGX}" - fi - list="`ls -d ${TK_BIN_DIR}/../bin 2>/dev/null` \ - `ls -d ${TK_BIN_DIR}/.. 2>/dev/null` \ - `ls -d ${TK_PREFIX}/bin 2>/dev/null`" - for i in $list ; do - if test -f "$i/${WISH_PROG}" ; then - REAL_TK_BIN_DIR="`cd "$i"; pwd`/" - break - fi - done - WISH_PROG="${REAL_TK_BIN_DIR}${WISH_PROG}" - fi - AC_MSG_RESULT([${WISH_PROG}]) - AC_SUBST(WISH_PROG) -]) - -#------------------------------------------------------------------------ -# TEA_ENABLE_SHARED -- -# -# Allows the building of shared libraries -# -# Arguments: -# none -# -# Results: -# -# Adds the following arguments to configure: -# --enable-shared=yes|no -# -# Defines the following vars: -# STATIC_BUILD Used for building import/export libraries -# on Windows. -# -# Sets the following vars: -# SHARED_BUILD Value of 1 or 0 -#------------------------------------------------------------------------ - -AC_DEFUN([TEA_ENABLE_SHARED], [ - AC_MSG_CHECKING([how to build libraries]) - AC_ARG_ENABLE(shared, - AC_HELP_STRING([--enable-shared], - [build and link with shared libraries (default: on)]), - [tcl_ok=$enableval], [tcl_ok=yes]) - - if test "${enable_shared+set}" = set; then - enableval="$enable_shared" - tcl_ok=$enableval - else - tcl_ok=yes - fi - - if test "$tcl_ok" = "yes" ; then - AC_MSG_RESULT([shared]) - SHARED_BUILD=1 - else - AC_MSG_RESULT([static]) - SHARED_BUILD=0 - AC_DEFINE(STATIC_BUILD, 1, [Is this a static build?]) - fi - AC_SUBST(SHARED_BUILD) -]) - -#------------------------------------------------------------------------ -# TEA_ENABLE_THREADS -- -# -# Specify if thread support should be enabled. If "yes" is specified -# as an arg (optional), threads are enabled by default, "no" means -# threads are disabled. "yes" is the default. -# -# TCL_THREADS is checked so that if you are compiling an extension -# against a threaded core, your extension must be compiled threaded -# as well. -# -# Note that it is legal to have a thread enabled extension run in a -# threaded or non-threaded Tcl core, but a non-threaded extension may -# only run in a non-threaded Tcl core. -# -# Arguments: -# none -# -# Results: -# -# Adds the following arguments to configure: -# --enable-threads -# -# Sets the following vars: -# THREADS_LIBS Thread library(s) -# -# Defines the following vars: -# TCL_THREADS -# _REENTRANT -# _THREAD_SAFE -#------------------------------------------------------------------------ - -AC_DEFUN([TEA_ENABLE_THREADS], [ - AC_ARG_ENABLE(threads, - AC_HELP_STRING([--enable-threads], - [build with threads]), - [tcl_ok=$enableval], [tcl_ok=yes]) - - if test "${enable_threads+set}" = set; then - enableval="$enable_threads" - tcl_ok=$enableval - else - tcl_ok=yes - fi - - if test "$tcl_ok" = "yes" -o "${TCL_THREADS}" = 1; then - TCL_THREADS=1 - - if test "${TEA_PLATFORM}" != "windows" ; then - # We are always OK on Windows, so check what this platform wants: - - # USE_THREAD_ALLOC tells us to try the special thread-based - # allocator that significantly reduces lock contention - AC_DEFINE(USE_THREAD_ALLOC, 1, - [Do we want to use the threaded memory allocator?]) - AC_DEFINE(_REENTRANT, 1, [Do we want the reentrant OS API?]) - if test "`uname -s`" = "SunOS" ; then - AC_DEFINE(_POSIX_PTHREAD_SEMANTICS, 1, - [Do we really want to follow the standard? Yes we do!]) - fi - AC_DEFINE(_THREAD_SAFE, 1, [Do we want the thread-safe OS API?]) - AC_CHECK_LIB(pthread,pthread_mutex_init,tcl_ok=yes,tcl_ok=no) - if test "$tcl_ok" = "no"; then - # Check a little harder for __pthread_mutex_init in the same - # library, as some systems hide it there until pthread.h is - # defined. We could alternatively do an AC_TRY_COMPILE with - # pthread.h, but that will work with libpthread really doesn't - # exist, like AIX 4.2. [Bug: 4359] - AC_CHECK_LIB(pthread, __pthread_mutex_init, - tcl_ok=yes, tcl_ok=no) - fi - - if test "$tcl_ok" = "yes"; then - # The space is needed - THREADS_LIBS=" -lpthread" - else - AC_CHECK_LIB(pthreads, pthread_mutex_init, - tcl_ok=yes, tcl_ok=no) - if test "$tcl_ok" = "yes"; then - # The space is needed - THREADS_LIBS=" -lpthreads" - else - AC_CHECK_LIB(c, pthread_mutex_init, - tcl_ok=yes, tcl_ok=no) - if test "$tcl_ok" = "no"; then - AC_CHECK_LIB(c_r, pthread_mutex_init, - tcl_ok=yes, tcl_ok=no) - if test "$tcl_ok" = "yes"; then - # The space is needed - THREADS_LIBS=" -pthread" - else - TCL_THREADS=0 - AC_MSG_WARN([Do not know how to find pthread lib on your system - thread support disabled]) - fi - fi - fi - fi - fi - else - TCL_THREADS=0 - fi - # Do checking message here to not mess up interleaved configure output - AC_MSG_CHECKING([for building with threads]) - if test "${TCL_THREADS}" = 1; then - AC_DEFINE(TCL_THREADS, 1, [Are we building with threads enabled?]) - AC_MSG_RESULT([yes (default)]) - else - AC_MSG_RESULT([no]) - fi - # TCL_THREADS sanity checking. See if our request for building with - # threads is the same as the way Tcl was built. If not, warn the user. - case ${TCL_DEFS} in - *THREADS=1*) - if test "${TCL_THREADS}" = "0"; then - AC_MSG_WARN([ - Building ${PACKAGE_NAME} without threads enabled, but building against Tcl - that IS thread-enabled. It is recommended to use --enable-threads.]) - fi - ;; - *) - if test "${TCL_THREADS}" = "1"; then - AC_MSG_WARN([ - --enable-threads requested, but building against a Tcl that is NOT - thread-enabled. This is an OK configuration that will also run in - a thread-enabled core.]) - fi - ;; - esac - AC_SUBST(TCL_THREADS) -]) - -#------------------------------------------------------------------------ -# TEA_ENABLE_SYMBOLS -- -# -# Specify if debugging symbols should be used. -# Memory (TCL_MEM_DEBUG) debugging can also be enabled. -# -# Arguments: -# none -# -# TEA varies from core Tcl in that C|LDFLAGS_DEFAULT receives -# the value of C|LDFLAGS_OPTIMIZE|DEBUG already substituted. -# Requires the following vars to be set in the Makefile: -# CFLAGS_DEFAULT -# LDFLAGS_DEFAULT -# -# Results: -# -# Adds the following arguments to configure: -# --enable-symbols -# -# Defines the following vars: -# CFLAGS_DEFAULT Sets to $(CFLAGS_DEBUG) if true -# Sets to "$(CFLAGS_OPTIMIZE) -DNDEBUG" if false -# LDFLAGS_DEFAULT Sets to $(LDFLAGS_DEBUG) if true -# Sets to $(LDFLAGS_OPTIMIZE) if false -# DBGX Formerly used as debug library extension; -# always blank now. -#------------------------------------------------------------------------ - -AC_DEFUN([TEA_ENABLE_SYMBOLS], [ - dnl TEA specific: Make sure we are initialized - AC_REQUIRE([TEA_CONFIG_CFLAGS]) - AC_MSG_CHECKING([for build with symbols]) - AC_ARG_ENABLE(symbols, - AC_HELP_STRING([--enable-symbols], - [build with debugging symbols (default: off)]), - [tcl_ok=$enableval], [tcl_ok=no]) - DBGX="" - if test "$tcl_ok" = "no"; then - CFLAGS_DEFAULT="${CFLAGS_OPTIMIZE} -DNDEBUG" - LDFLAGS_DEFAULT="${LDFLAGS_OPTIMIZE}" - AC_MSG_RESULT([no]) - else - CFLAGS_DEFAULT="${CFLAGS_DEBUG}" - LDFLAGS_DEFAULT="${LDFLAGS_DEBUG}" - if test "$tcl_ok" = "yes"; then - AC_MSG_RESULT([yes (standard debugging)]) - fi - fi - # TEA specific: - if test "${TEA_PLATFORM}" != "windows" ; then - LDFLAGS_DEFAULT="${LDFLAGS}" - fi - AC_SUBST(CFLAGS_DEFAULT) - AC_SUBST(LDFLAGS_DEFAULT) - AC_SUBST(TCL_DBGX) - - if test "$tcl_ok" = "mem" -o "$tcl_ok" = "all"; then - AC_DEFINE(TCL_MEM_DEBUG, 1, [Is memory debugging enabled?]) - fi - - if test "$tcl_ok" != "yes" -a "$tcl_ok" != "no"; then - if test "$tcl_ok" = "all"; then - AC_MSG_RESULT([enabled symbols mem debugging]) - else - AC_MSG_RESULT([enabled $tcl_ok debugging]) - fi - fi -]) - -#------------------------------------------------------------------------ -# TEA_ENABLE_LANGINFO -- -# -# Allows use of modern nl_langinfo check for better l10n. -# This is only relevant for Unix. -# -# Arguments: -# none -# -# Results: -# -# Adds the following arguments to configure: -# --enable-langinfo=yes|no (default is yes) -# -# Defines the following vars: -# HAVE_LANGINFO Triggers use of nl_langinfo if defined. -#------------------------------------------------------------------------ - -AC_DEFUN([TEA_ENABLE_LANGINFO], [ - AC_ARG_ENABLE(langinfo, - AC_HELP_STRING([--enable-langinfo], - [use nl_langinfo if possible to determine encoding at startup, otherwise use old heuristic (default: on)]), - [langinfo_ok=$enableval], [langinfo_ok=yes]) - - HAVE_LANGINFO=0 - if test "$langinfo_ok" = "yes"; then - AC_CHECK_HEADER(langinfo.h,[langinfo_ok=yes],[langinfo_ok=no]) - fi - AC_MSG_CHECKING([whether to use nl_langinfo]) - if test "$langinfo_ok" = "yes"; then - AC_CACHE_VAL(tcl_cv_langinfo_h, [ - AC_TRY_COMPILE([#include ], [nl_langinfo(CODESET);], - [tcl_cv_langinfo_h=yes],[tcl_cv_langinfo_h=no])]) - AC_MSG_RESULT([$tcl_cv_langinfo_h]) - if test $tcl_cv_langinfo_h = yes; then - AC_DEFINE(HAVE_LANGINFO, 1, [Do we have nl_langinfo()?]) - fi - else - AC_MSG_RESULT([$langinfo_ok]) - fi -]) - -#-------------------------------------------------------------------- -# TEA_CONFIG_SYSTEM -# -# Determine what the system is (some things cannot be easily checked -# on a feature-driven basis, alas). This can usually be done via the -# "uname" command. -# -# Arguments: -# none -# -# Results: -# Defines the following var: -# -# system - System/platform/version identification code. -#-------------------------------------------------------------------- - -AC_DEFUN([TEA_CONFIG_SYSTEM], [ - AC_CACHE_CHECK([system version], tcl_cv_sys_version, [ - # TEA specific: - if test "${TEA_PLATFORM}" = "windows" ; then - tcl_cv_sys_version=windows - else - tcl_cv_sys_version=`uname -s`-`uname -r` - if test "$?" -ne 0 ; then - AC_MSG_WARN([can't find uname command]) - tcl_cv_sys_version=unknown - else - if test "`uname -s`" = "AIX" ; then - tcl_cv_sys_version=AIX-`uname -v`.`uname -r` - fi - fi - fi - ]) - system=$tcl_cv_sys_version -]) - -#-------------------------------------------------------------------- -# TEA_CONFIG_CFLAGS -# -# Try to determine the proper flags to pass to the compiler -# for building shared libraries and other such nonsense. -# -# Arguments: -# none -# -# Results: -# -# Defines and substitutes the following vars: -# -# DL_OBJS, DL_LIBS - removed for TEA, only needed by core. -# LDFLAGS - Flags to pass to the compiler when linking object -# files into an executable application binary such -# as tclsh. -# LD_SEARCH_FLAGS-Flags to pass to ld, such as "-R /usr/local/tcl/lib", -# that tell the run-time dynamic linker where to look -# for shared libraries such as libtcl.so. Depends on -# the variable LIB_RUNTIME_DIR in the Makefile. Could -# be the same as CC_SEARCH_FLAGS if ${CC} is used to link. -# CC_SEARCH_FLAGS-Flags to pass to ${CC}, such as "-Wl,-rpath,/usr/local/tcl/lib", -# that tell the run-time dynamic linker where to look -# for shared libraries such as libtcl.so. Depends on -# the variable LIB_RUNTIME_DIR in the Makefile. -# SHLIB_CFLAGS - Flags to pass to cc when compiling the components -# of a shared library (may request position-independent -# code, among other things). -# SHLIB_LD - Base command to use for combining object files -# into a shared library. -# SHLIB_LD_LIBS - Dependent libraries for the linker to scan when -# creating shared libraries. This symbol typically -# goes at the end of the "ld" commands that build -# shared libraries. The value of the symbol defaults to -# "${LIBS}" if all of the dependent libraries should -# be specified when creating a shared library. If -# dependent libraries should not be specified (as on -# SunOS 4.x, where they cause the link to fail, or in -# general if Tcl and Tk aren't themselves shared -# libraries), then this symbol has an empty string -# as its value. -# SHLIB_SUFFIX - Suffix to use for the names of dynamically loadable -# extensions. An empty string means we don't know how -# to use shared libraries on this platform. -# LIB_SUFFIX - Specifies everything that comes after the "libfoo" -# in a static or shared library name, using the $PACKAGE_VERSION variable -# to put the version in the right place. This is used -# by platforms that need non-standard library names. -# Examples: ${PACKAGE_VERSION}.so.1.1 on NetBSD, since it needs -# to have a version after the .so, and ${PACKAGE_VERSION}.a -# on AIX, since a shared library needs to have -# a .a extension whereas shared objects for loadable -# extensions have a .so extension. Defaults to -# ${PACKAGE_VERSION}${SHLIB_SUFFIX}. -# CFLAGS_DEBUG - -# Flags used when running the compiler in debug mode -# CFLAGS_OPTIMIZE - -# Flags used when running the compiler in optimize mode -# CFLAGS - Additional CFLAGS added as necessary (usually 64-bit) -#-------------------------------------------------------------------- - -AC_DEFUN([TEA_CONFIG_CFLAGS], [ - dnl TEA specific: Make sure we are initialized - AC_REQUIRE([TEA_INIT]) - - # Step 0.a: Enable 64 bit support? - - AC_MSG_CHECKING([if 64bit support is requested]) - AC_ARG_ENABLE(64bit, - AC_HELP_STRING([--enable-64bit], - [enable 64bit support (default: off)]), - [do64bit=$enableval], [do64bit=no]) - AC_MSG_RESULT([$do64bit]) - - # Step 0.b: Enable Solaris 64 bit VIS support? - - AC_MSG_CHECKING([if 64bit Sparc VIS support is requested]) - AC_ARG_ENABLE(64bit-vis, - AC_HELP_STRING([--enable-64bit-vis], - [enable 64bit Sparc VIS support (default: off)]), - [do64bitVIS=$enableval], [do64bitVIS=no]) - AC_MSG_RESULT([$do64bitVIS]) - # Force 64bit on with VIS - AS_IF([test "$do64bitVIS" = "yes"], [do64bit=yes]) - - # Step 0.c: Check if visibility support is available. Do this here so - # that platform specific alternatives can be used below if this fails. - - AC_CACHE_CHECK([if compiler supports visibility "hidden"], - tcl_cv_cc_visibility_hidden, [ - hold_cflags=$CFLAGS; CFLAGS="$CFLAGS -Werror" - AC_TRY_LINK([ - extern __attribute__((__visibility__("hidden"))) void f(void); - void f(void) {}], [f();], tcl_cv_cc_visibility_hidden=yes, - tcl_cv_cc_visibility_hidden=no) - CFLAGS=$hold_cflags]) - AS_IF([test $tcl_cv_cc_visibility_hidden = yes], [ - AC_DEFINE(MODULE_SCOPE, - [extern __attribute__((__visibility__("hidden")))], - [Compiler support for module scope symbols]) - AC_DEFINE(HAVE_HIDDEN, [1], [Compiler support for module scope symbols]) - ]) - - # Step 0.d: Disable -rpath support? - - AC_MSG_CHECKING([if rpath support is requested]) - AC_ARG_ENABLE(rpath, - AC_HELP_STRING([--disable-rpath], - [disable rpath support (default: on)]), - [doRpath=$enableval], [doRpath=yes]) - AC_MSG_RESULT([$doRpath]) - - # TEA specific: Cross-compiling options for Windows/CE builds? - - AS_IF([test "${TEA_PLATFORM}" = windows], [ - AC_MSG_CHECKING([if Windows/CE build is requested]) - AC_ARG_ENABLE(wince, - AC_HELP_STRING([--enable-wince], - [enable Win/CE support (where applicable)]), - [doWince=$enableval], [doWince=no]) - AC_MSG_RESULT([$doWince]) - ]) - - # Set the variable "system" to hold the name and version number - # for the system. - - TEA_CONFIG_SYSTEM - - # Require ranlib early so we can override it in special cases below. - - AC_REQUIRE([AC_PROG_RANLIB]) - - # Set configuration options based on system name and version. - # This is similar to Tcl's unix/tcl.m4 except that we've added a - # "windows" case and removed some core-only vars. - - do64bit_ok=no - # default to '{$LIBS}' and set to "" on per-platform necessary basis - SHLIB_LD_LIBS='${LIBS}' - # When ld needs options to work in 64-bit mode, put them in - # LDFLAGS_ARCH so they eventually end up in LDFLAGS even if [load] - # is disabled by the user. [Bug 1016796] - LDFLAGS_ARCH="" - UNSHARED_LIB_SUFFIX="" - # TEA specific: use PACKAGE_VERSION instead of VERSION - TCL_TRIM_DOTS='`echo ${PACKAGE_VERSION} | tr -d .`' - ECHO_VERSION='`echo ${PACKAGE_VERSION}`' - TCL_LIB_VERSIONS_OK=ok - CFLAGS_DEBUG=-g - AS_IF([test "$GCC" = yes], [ - CFLAGS_OPTIMIZE=-O2 - CFLAGS_WARNING="-Wall" - ], [ - CFLAGS_OPTIMIZE=-O - CFLAGS_WARNING="" - ]) - AC_CHECK_TOOL(AR, ar) - STLIB_LD='${AR} cr' - LD_LIBRARY_PATH_VAR="LD_LIBRARY_PATH" - AS_IF([test "x$SHLIB_VERSION" = x],[SHLIB_VERSION="1.0"]) - case $system in - # TEA specific: - windows) - # This is a 2-stage check to make sure we have the 64-bit SDK - # We have to know where the SDK is installed. - # This magic is based on MS Platform SDK for Win2003 SP1 - hobbs - # MACHINE is IX86 for LINK, but this is used by the manifest, - # which requires x86|amd64|ia64. - MACHINE="X86" - if test "$do64bit" != "no" ; then - if test "x${MSSDK}x" = "xx" ; then - MSSDK="C:/Progra~1/Microsoft Platform SDK" - fi - MSSDK=`echo "$MSSDK" | sed -e 's!\\\!/!g'` - PATH64="" - case "$do64bit" in - amd64|x64|yes) - MACHINE="AMD64" ; # default to AMD64 64-bit build - PATH64="${MSSDK}/Bin/Win64/x86/AMD64" - ;; - ia64) - MACHINE="IA64" - PATH64="${MSSDK}/Bin/Win64" - ;; - esac - if test "$GCC" != "yes" -a ! -d "${PATH64}" ; then - AC_MSG_WARN([Could not find 64-bit $MACHINE SDK to enable 64bit mode]) - AC_MSG_WARN([Ensure latest Platform SDK is installed]) - do64bit="no" - else - AC_MSG_RESULT([ Using 64-bit $MACHINE mode]) - do64bit_ok="yes" - fi - fi - - if test "$doWince" != "no" ; then - if test "$do64bit" != "no" ; then - AC_MSG_ERROR([Windows/CE and 64-bit builds incompatible]) - fi - if test "$GCC" = "yes" ; then - AC_MSG_ERROR([Windows/CE and GCC builds incompatible]) - fi - TEA_PATH_CELIB - # Set defaults for common evc4/PPC2003 setup - # Currently Tcl requires 300+, possibly 420+ for sockets - CEVERSION=420; # could be 211 300 301 400 420 ... - TARGETCPU=ARMV4; # could be ARMV4 ARM MIPS SH3 X86 ... - ARCH=ARM; # could be ARM MIPS X86EM ... - PLATFORM="Pocket PC 2003"; # or "Pocket PC 2002" - if test "$doWince" != "yes"; then - # If !yes then the user specified something - # Reset ARCH to allow user to skip specifying it - ARCH= - eval `echo $doWince | awk -F, '{ \ - if (length([$]1)) { printf "CEVERSION=\"%s\"\n", [$]1; \ - if ([$]1 < 400) { printf "PLATFORM=\"Pocket PC 2002\"\n" } }; \ - if (length([$]2)) { printf "TARGETCPU=\"%s\"\n", toupper([$]2) }; \ - if (length([$]3)) { printf "ARCH=\"%s\"\n", toupper([$]3) }; \ - if (length([$]4)) { printf "PLATFORM=\"%s\"\n", [$]4 }; \ - }'` - if test "x${ARCH}" = "x" ; then - ARCH=$TARGETCPU; - fi - fi - OSVERSION=WCE$CEVERSION; - if test "x${WCEROOT}" = "x" ; then - WCEROOT="C:/Program Files/Microsoft eMbedded C++ 4.0" - if test ! -d "${WCEROOT}" ; then - WCEROOT="C:/Program Files/Microsoft eMbedded Tools" - fi - fi - if test "x${SDKROOT}" = "x" ; then - SDKROOT="C:/Program Files/Windows CE Tools" - if test ! -d "${SDKROOT}" ; then - SDKROOT="C:/Windows CE Tools" - fi - fi - WCEROOT=`echo "$WCEROOT" | sed -e 's!\\\!/!g'` - SDKROOT=`echo "$SDKROOT" | sed -e 's!\\\!/!g'` - if test ! -d "${SDKROOT}/${OSVERSION}/${PLATFORM}/Lib/${TARGETCPU}" \ - -o ! -d "${WCEROOT}/EVC/${OSVERSION}/bin"; then - AC_MSG_ERROR([could not find PocketPC SDK or target compiler to enable WinCE mode [$CEVERSION,$TARGETCPU,$ARCH,$PLATFORM]]) - doWince="no" - else - # We could PATH_NOSPACE these, but that's not important, - # as long as we quote them when used. - CEINCLUDE="${SDKROOT}/${OSVERSION}/${PLATFORM}/include" - if test -d "${CEINCLUDE}/${TARGETCPU}" ; then - CEINCLUDE="${CEINCLUDE}/${TARGETCPU}" - fi - CELIBPATH="${SDKROOT}/${OSVERSION}/${PLATFORM}/Lib/${TARGETCPU}" - fi - fi - - if test "$GCC" != "yes" ; then - if test "${SHARED_BUILD}" = "0" ; then - runtime=-MT - else - runtime=-MD - fi - - if test "$do64bit" != "no" ; then - # All this magic is necessary for the Win64 SDK RC1 - hobbs - CC="\"${PATH64}/cl.exe\"" - CFLAGS="${CFLAGS} -I\"${MSSDK}/Include\" -I\"${MSSDK}/Include/crt\" -I\"${MSSDK}/Include/crt/sys\"" - RC="\"${MSSDK}/bin/rc.exe\"" - lflags="-nologo -MACHINE:${MACHINE} -LIBPATH:\"${MSSDK}/Lib/${MACHINE}\"" - LINKBIN="\"${PATH64}/link.exe\"" - CFLAGS_DEBUG="-nologo -Zi -Od -W3 ${runtime}d" - CFLAGS_OPTIMIZE="-nologo -O2 -W2 ${runtime}" - # Avoid 'unresolved external symbol __security_cookie' - # errors, c.f. http://support.microsoft.com/?id=894573 - TEA_ADD_LIBS([bufferoverflowU.lib]) - elif test "$doWince" != "no" ; then - CEBINROOT="${WCEROOT}/EVC/${OSVERSION}/bin" - if test "${TARGETCPU}" = "X86"; then - CC="\"${CEBINROOT}/cl.exe\"" - else - CC="\"${CEBINROOT}/cl${ARCH}.exe\"" - fi - CFLAGS="$CFLAGS -I\"${CELIB_DIR}/inc\" -I\"${CEINCLUDE}\"" - RC="\"${WCEROOT}/Common/EVC/bin/rc.exe\"" - arch=`echo ${ARCH} | awk '{print tolower([$]0)}'` - defs="${ARCH} _${ARCH}_ ${arch} PALM_SIZE _MT _WINDOWS" - if test "${SHARED_BUILD}" = "1" ; then - # Static CE builds require static celib as well - defs="${defs} _DLL" - fi - for i in $defs ; do - AC_DEFINE_UNQUOTED($i, 1, [WinCE def ]$i) - done - AC_DEFINE_UNQUOTED(_WIN32_WCE, $CEVERSION, [_WIN32_WCE version]) - AC_DEFINE_UNQUOTED(UNDER_CE, $CEVERSION, [UNDER_CE version]) - CFLAGS_DEBUG="-nologo -Zi -Od" - CFLAGS_OPTIMIZE="-nologo -Ox" - lversion=`echo ${CEVERSION} | sed -e 's/\(.\)\(..\)/\1\.\2/'` - lflags="-MACHINE:${ARCH} -LIBPATH:\"${CELIBPATH}\" -subsystem:windowsce,${lversion} -nologo" - LINKBIN="\"${CEBINROOT}/link.exe\"" - AC_SUBST(CELIB_DIR) - else - RC="rc" - lflags="-nologo" - LINKBIN="link" - CFLAGS_DEBUG="-nologo -Z7 -Od -W3 -WX ${runtime}d" - CFLAGS_OPTIMIZE="-nologo -O2 -W2 ${runtime}" - fi - fi - - if test "$GCC" = "yes"; then - # mingw gcc mode - AC_CHECK_TOOL(RC, windres) - CFLAGS_DEBUG="-g" - CFLAGS_OPTIMIZE="-O2 -fomit-frame-pointer" - SHLIB_LD='${CC} -shared' - UNSHARED_LIB_SUFFIX='${TCL_TRIM_DOTS}.a' - LDFLAGS_CONSOLE="-wl,--subsystem,console ${lflags}" - LDFLAGS_WINDOW="-wl,--subsystem,windows ${lflags}" - - AC_CACHE_CHECK(for cross-compile version of gcc, - ac_cv_cross, - AC_TRY_COMPILE([ - #ifdef _WIN32 - #error cross-compiler - #endif - ], [], - ac_cv_cross=yes, - ac_cv_cross=no) - ) - if test "$ac_cv_cross" = "yes"; then - case "$do64bit" in - amd64|x64|yes) - CC="x86_64-w64-mingw32-gcc" - LD="x86_64-w64-mingw32-ld" - AR="x86_64-w64-mingw32-ar" - RANLIB="x86_64-w64-mingw32-ranlib" - RC="x86_64-w64-mingw32-windres" - ;; - *) - CC="i686-w64-mingw32-gcc" - LD="i686-w64-mingw32-ld" - AR="i686-w64-mingw32-ar" - RANLIB="i686-w64-mingw32-ranlib" - RC="i686-w64-mingw32-windres" - ;; - esac - fi - - else - SHLIB_LD="${LINKBIN} -dll ${lflags}" - # link -lib only works when -lib is the first arg - STLIB_LD="${LINKBIN} -lib ${lflags}" - UNSHARED_LIB_SUFFIX='${TCL_TRIM_DOTS}.lib' - PATHTYPE=-w - # For information on what debugtype is most useful, see: - # http://msdn.microsoft.com/library/en-us/dnvc60/html/gendepdebug.asp - # and also - # http://msdn2.microsoft.com/en-us/library/y0zzbyt4%28VS.80%29.aspx - # This essentially turns it all on. - LDFLAGS_DEBUG="-debug -debugtype:cv" - LDFLAGS_OPTIMIZE="-release" - if test "$doWince" != "no" ; then - LDFLAGS_CONSOLE="-link ${lflags}" - LDFLAGS_WINDOW=${LDFLAGS_CONSOLE} - else - LDFLAGS_CONSOLE="-link -subsystem:console ${lflags}" - LDFLAGS_WINDOW="-link -subsystem:windows ${lflags}" - fi - fi - - SHLIB_SUFFIX=".dll" - SHARED_LIB_SUFFIX='${TCL_TRIM_DOTS}.dll' - - TCL_LIB_VERSIONS_OK=nodots - ;; - AIX-*) - AS_IF([test "${TCL_THREADS}" = "1" -a "$GCC" != "yes"], [ - # AIX requires the _r compiler when gcc isn't being used - case "${CC}" in - *_r|*_r\ *) - # ok ... - ;; - *) - # Make sure only first arg gets _r - CC=`echo "$CC" | sed -e 's/^\([[^ ]]*\)/\1_r/'` - ;; - esac - AC_MSG_RESULT([Using $CC for compiling with threads]) - ]) - LIBS="$LIBS -lc" - SHLIB_CFLAGS="" - SHLIB_SUFFIX=".so" - - LD_LIBRARY_PATH_VAR="LIBPATH" - - # Check to enable 64-bit flags for compiler/linker - AS_IF([test "$do64bit" = yes], [ - AS_IF([test "$GCC" = yes], [ - AC_MSG_WARN([64bit mode not supported with GCC on $system]) - ], [ - do64bit_ok=yes - CFLAGS="$CFLAGS -q64" - LDFLAGS_ARCH="-q64" - RANLIB="${RANLIB} -X64" - AR="${AR} -X64" - SHLIB_LD_FLAGS="-b64" - ]) - ]) - - AS_IF([test "`uname -m`" = ia64], [ - # AIX-5 uses ELF style dynamic libraries on IA-64, but not PPC - SHLIB_LD="/usr/ccs/bin/ld -G -z text" - AS_IF([test "$GCC" = yes], [ - CC_SEARCH_FLAGS='-Wl,-R,${LIB_RUNTIME_DIR}' - ], [ - CC_SEARCH_FLAGS='-R${LIB_RUNTIME_DIR}' - ]) - LD_SEARCH_FLAGS='-R ${LIB_RUNTIME_DIR}' - ], [ - AS_IF([test "$GCC" = yes], [ - SHLIB_LD='${CC} -shared -Wl,-bexpall' - ], [ - SHLIB_LD="/bin/ld -bhalt:4 -bM:SRE -bexpall -H512 -T512 -bnoentry" - LDFLAGS="$LDFLAGS -brtl" - ]) - SHLIB_LD="${SHLIB_LD} ${SHLIB_LD_FLAGS}" - CC_SEARCH_FLAGS='-L${LIB_RUNTIME_DIR}' - LD_SEARCH_FLAGS=${CC_SEARCH_FLAGS} - ]) - ;; - BeOS*) - SHLIB_CFLAGS="-fPIC" - SHLIB_LD='${CC} -nostart' - SHLIB_SUFFIX=".so" - - #----------------------------------------------------------- - # Check for inet_ntoa in -lbind, for BeOS (which also needs - # -lsocket, even if the network functions are in -lnet which - # is always linked to, for compatibility. - #----------------------------------------------------------- - AC_CHECK_LIB(bind, inet_ntoa, [LIBS="$LIBS -lbind -lsocket"]) - ;; - BSD/OS-4.*) - SHLIB_CFLAGS="-export-dynamic -fPIC" - SHLIB_LD='${CC} -shared' - SHLIB_SUFFIX=".so" - LDFLAGS="$LDFLAGS -export-dynamic" - CC_SEARCH_FLAGS="" - LD_SEARCH_FLAGS="" - ;; - CYGWIN_*) - SHLIB_CFLAGS="" - SHLIB_LD='${CC} -shared' - SHLIB_SUFFIX=".dll" - EXEEXT=".exe" - do64bit_ok=yes - CC_SEARCH_FLAGS="" - LD_SEARCH_FLAGS="" - ;; - Haiku*) - LDFLAGS="$LDFLAGS -Wl,--export-dynamic" - SHLIB_CFLAGS="-fPIC" - SHLIB_SUFFIX=".so" - SHLIB_LD='${CC} -shared ${CFLAGS} ${LDFLAGS}' - AC_CHECK_LIB(network, inet_ntoa, [LIBS="$LIBS -lnetwork"]) - ;; - HP-UX-*.11.*) - # Use updated header definitions where possible - AC_DEFINE(_XOPEN_SOURCE_EXTENDED, 1, [Do we want to use the XOPEN network library?]) - # TEA specific: Needed by Tcl, but not most extensions - #AC_DEFINE(_XOPEN_SOURCE, 1, [Do we want to use the XOPEN network library?]) - #LIBS="$LIBS -lxnet" # Use the XOPEN network library - - AS_IF([test "`uname -m`" = ia64], [ - SHLIB_SUFFIX=".so" - # Use newer C++ library for C++ extensions - #if test "$GCC" != "yes" ; then - # CPPFLAGS="-AA" - #fi - ], [ - SHLIB_SUFFIX=".sl" - ]) - AC_CHECK_LIB(dld, shl_load, tcl_ok=yes, tcl_ok=no) - AS_IF([test "$tcl_ok" = yes], [ - LDFLAGS="$LDFLAGS -Wl,-E" - CC_SEARCH_FLAGS='-Wl,+s,+b,${LIB_RUNTIME_DIR}:.' - LD_SEARCH_FLAGS='+s +b ${LIB_RUNTIME_DIR}:.' - LD_LIBRARY_PATH_VAR="SHLIB_PATH" - ]) - AS_IF([test "$GCC" = yes], [ - SHLIB_LD='${CC} -shared' - LD_SEARCH_FLAGS=${CC_SEARCH_FLAGS} - ], [ - CFLAGS="$CFLAGS -z" - # Users may want PA-RISC 1.1/2.0 portable code - needs HP cc - #CFLAGS="$CFLAGS +DAportable" - SHLIB_CFLAGS="+z" - SHLIB_LD="ld -b" - ]) - - # Check to enable 64-bit flags for compiler/linker - AS_IF([test "$do64bit" = "yes"], [ - AS_IF([test "$GCC" = yes], [ - case `${CC} -dumpmachine` in - hppa64*) - # 64-bit gcc in use. Fix flags for GNU ld. - do64bit_ok=yes - SHLIB_LD='${CC} -shared' - AS_IF([test $doRpath = yes], [ - CC_SEARCH_FLAGS='-Wl,-rpath,${LIB_RUNTIME_DIR}']) - LD_SEARCH_FLAGS=${CC_SEARCH_FLAGS} - ;; - *) - AC_MSG_WARN([64bit mode not supported with GCC on $system]) - ;; - esac - ], [ - do64bit_ok=yes - CFLAGS="$CFLAGS +DD64" - LDFLAGS_ARCH="+DD64" - ]) - ]) ;; - IRIX-6.*) - SHLIB_CFLAGS="" - SHLIB_LD="ld -n32 -shared -rdata_shared" - SHLIB_SUFFIX=".so" - AS_IF([test $doRpath = yes], [ - CC_SEARCH_FLAGS='-Wl,-rpath,${LIB_RUNTIME_DIR}' - LD_SEARCH_FLAGS='-rpath ${LIB_RUNTIME_DIR}']) - AS_IF([test "$GCC" = yes], [ - CFLAGS="$CFLAGS -mabi=n32" - LDFLAGS="$LDFLAGS -mabi=n32" - ], [ - case $system in - IRIX-6.3) - # Use to build 6.2 compatible binaries on 6.3. - CFLAGS="$CFLAGS -n32 -D_OLD_TERMIOS" - ;; - *) - CFLAGS="$CFLAGS -n32" - ;; - esac - LDFLAGS="$LDFLAGS -n32" - ]) - ;; - IRIX64-6.*) - SHLIB_CFLAGS="" - SHLIB_LD="ld -n32 -shared -rdata_shared" - SHLIB_SUFFIX=".so" - AS_IF([test $doRpath = yes], [ - CC_SEARCH_FLAGS='-Wl,-rpath,${LIB_RUNTIME_DIR}' - LD_SEARCH_FLAGS='-rpath ${LIB_RUNTIME_DIR}']) - - # Check to enable 64-bit flags for compiler/linker - - AS_IF([test "$do64bit" = yes], [ - AS_IF([test "$GCC" = yes], [ - AC_MSG_WARN([64bit mode not supported by gcc]) - ], [ - do64bit_ok=yes - SHLIB_LD="ld -64 -shared -rdata_shared" - CFLAGS="$CFLAGS -64" - LDFLAGS_ARCH="-64" - ]) - ]) - ;; - Linux*|GNU*|NetBSD-Debian) - SHLIB_CFLAGS="-fPIC" - SHLIB_SUFFIX=".so" - - # TEA specific: - CFLAGS_OPTIMIZE="-O2 -fomit-frame-pointer" - - # TEA specific: use LDFLAGS_DEFAULT instead of LDFLAGS - SHLIB_LD='${CC} -shared ${CFLAGS} ${LDFLAGS_DEFAULT}' - LDFLAGS="$LDFLAGS -Wl,--export-dynamic" - AS_IF([test $doRpath = yes], [ - CC_SEARCH_FLAGS='-Wl,-rpath,${LIB_RUNTIME_DIR}']) - LD_SEARCH_FLAGS=${CC_SEARCH_FLAGS} - AS_IF([test "`uname -m`" = "alpha"], [CFLAGS="$CFLAGS -mieee"]) - AS_IF([test $do64bit = yes], [ - AC_CACHE_CHECK([if compiler accepts -m64 flag], tcl_cv_cc_m64, [ - hold_cflags=$CFLAGS - CFLAGS="$CFLAGS -m64" - AC_TRY_LINK(,, tcl_cv_cc_m64=yes, tcl_cv_cc_m64=no) - CFLAGS=$hold_cflags]) - AS_IF([test $tcl_cv_cc_m64 = yes], [ - CFLAGS="$CFLAGS -m64" - do64bit_ok=yes - ]) - ]) - - # The combo of gcc + glibc has a bug related to inlining of - # functions like strtod(). The -fno-builtin flag should address - # this problem but it does not work. The -fno-inline flag is kind - # of overkill but it works. Disable inlining only when one of the - # files in compat/*.c is being linked in. - - AS_IF([test x"${USE_COMPAT}" != x],[CFLAGS="$CFLAGS -fno-inline"]) - ;; - Lynx*) - SHLIB_CFLAGS="-fPIC" - SHLIB_SUFFIX=".so" - CFLAGS_OPTIMIZE=-02 - SHLIB_LD='${CC} -shared' - LD_FLAGS="-Wl,--export-dynamic" - AS_IF([test $doRpath = yes], [ - CC_SEARCH_FLAGS='-Wl,-rpath,${LIB_RUNTIME_DIR}' - LD_SEARCH_FLAGS='-Wl,-rpath,${LIB_RUNTIME_DIR}']) - ;; - OpenBSD-*) - arch=`arch -s` - case "$arch" in - vax) - SHLIB_SUFFIX="" - SHARED_LIB_SUFFIX="" - LDFLAGS="" - ;; - *) - SHLIB_CFLAGS="-fPIC" - SHLIB_LD='${CC} -shared ${SHLIB_CFLAGS}' - SHLIB_SUFFIX=".so" - AS_IF([test $doRpath = yes], [ - CC_SEARCH_FLAGS='-Wl,-rpath,${LIB_RUNTIME_DIR}']) - LD_SEARCH_FLAGS=${CC_SEARCH_FLAGS} - SHARED_LIB_SUFFIX='${TCL_TRIM_DOTS}.so.${SHLIB_VERSION}' - LDFLAGS="-Wl,-export-dynamic" - ;; - esac - case "$arch" in - vax) - CFLAGS_OPTIMIZE="-O1" - ;; - *) - CFLAGS_OPTIMIZE="-O2" - ;; - esac - AS_IF([test "${TCL_THREADS}" = "1"], [ - # On OpenBSD: Compile with -pthread - # Don't link with -lpthread - LIBS=`echo $LIBS | sed s/-lpthread//` - CFLAGS="$CFLAGS -pthread" - ]) - # OpenBSD doesn't do version numbers with dots. - UNSHARED_LIB_SUFFIX='${TCL_TRIM_DOTS}.a' - TCL_LIB_VERSIONS_OK=nodots - ;; - NetBSD-*) - # NetBSD has ELF and can use 'cc -shared' to build shared libs - SHLIB_CFLAGS="-fPIC" - SHLIB_LD='${CC} -shared ${SHLIB_CFLAGS}' - SHLIB_SUFFIX=".so" - LDFLAGS="$LDFLAGS -export-dynamic" - AS_IF([test $doRpath = yes], [ - CC_SEARCH_FLAGS='-Wl,-rpath,${LIB_RUNTIME_DIR}']) - LD_SEARCH_FLAGS=${CC_SEARCH_FLAGS} - AS_IF([test "${TCL_THREADS}" = "1"], [ - # The -pthread needs to go in the CFLAGS, not LIBS - LIBS=`echo $LIBS | sed s/-pthread//` - CFLAGS="$CFLAGS -pthread" - LDFLAGS="$LDFLAGS -pthread" - ]) - ;; - FreeBSD-*) - # This configuration from FreeBSD Ports. - SHLIB_CFLAGS="-fPIC" - SHLIB_LD="${CC} -shared" - TCL_SHLIB_LD_EXTRAS="-Wl,-soname=\$[@]" - TK_SHLIB_LD_EXTRAS="-Wl,-soname,\$[@]" - SHLIB_SUFFIX=".so" - LDFLAGS="" - AS_IF([test $doRpath = yes], [ - CC_SEARCH_FLAGS='-Wl,-rpath,${LIB_RUNTIME_DIR}' - LD_SEARCH_FLAGS='-Wl,-rpath,${LIB_RUNTIME_DIR}']) - AS_IF([test "${TCL_THREADS}" = "1"], [ - # The -pthread needs to go in the LDFLAGS, not LIBS - LIBS=`echo $LIBS | sed s/-pthread//` - CFLAGS="$CFLAGS $PTHREAD_CFLAGS" - LDFLAGS="$LDFLAGS $PTHREAD_LIBS"]) - case $system in - FreeBSD-3.*) - # Version numbers are dot-stripped by system policy. - TCL_TRIM_DOTS=`echo ${VERSION} | tr -d .` - UNSHARED_LIB_SUFFIX='${TCL_TRIM_DOTS}.a' - SHARED_LIB_SUFFIX='${TCL_TRIM_DOTS}.so' - TCL_LIB_VERSIONS_OK=nodots - ;; - esac - ;; - Darwin-*) - CFLAGS_OPTIMIZE="-Os" - SHLIB_CFLAGS="-fno-common" - # To avoid discrepancies between what headers configure sees during - # preprocessing tests and compiling tests, move any -isysroot and - # -mmacosx-version-min flags from CFLAGS to CPPFLAGS: - CPPFLAGS="${CPPFLAGS} `echo " ${CFLAGS}" | \ - awk 'BEGIN {FS=" +-";ORS=" "}; {for (i=2;i<=NF;i++) \ - if ([$]i~/^(isysroot|mmacosx-version-min)/) print "-"[$]i}'`" - CFLAGS="`echo " ${CFLAGS}" | \ - awk 'BEGIN {FS=" +-";ORS=" "}; {for (i=2;i<=NF;i++) \ - if (!([$]i~/^(isysroot|mmacosx-version-min)/)) print "-"[$]i}'`" - AS_IF([test $do64bit = yes], [ - case `arch` in - ppc) - AC_CACHE_CHECK([if compiler accepts -arch ppc64 flag], - tcl_cv_cc_arch_ppc64, [ - hold_cflags=$CFLAGS - CFLAGS="$CFLAGS -arch ppc64 -mpowerpc64 -mcpu=G5" - AC_TRY_LINK(,, tcl_cv_cc_arch_ppc64=yes, - tcl_cv_cc_arch_ppc64=no) - CFLAGS=$hold_cflags]) - AS_IF([test $tcl_cv_cc_arch_ppc64 = yes], [ - CFLAGS="$CFLAGS -arch ppc64 -mpowerpc64 -mcpu=G5" - do64bit_ok=yes - ]);; - i386) - AC_CACHE_CHECK([if compiler accepts -arch x86_64 flag], - tcl_cv_cc_arch_x86_64, [ - hold_cflags=$CFLAGS - CFLAGS="$CFLAGS -arch x86_64" - AC_TRY_LINK(,, tcl_cv_cc_arch_x86_64=yes, - tcl_cv_cc_arch_x86_64=no) - CFLAGS=$hold_cflags]) - AS_IF([test $tcl_cv_cc_arch_x86_64 = yes], [ - CFLAGS="$CFLAGS -arch x86_64" - do64bit_ok=yes - ]);; - *) - AC_MSG_WARN([Don't know how enable 64-bit on architecture `arch`]);; - esac - ], [ - # Check for combined 32-bit and 64-bit fat build - AS_IF([echo "$CFLAGS " |grep -E -q -- '-arch (ppc64|x86_64) ' \ - && echo "$CFLAGS " |grep -E -q -- '-arch (ppc|i386) '], [ - fat_32_64=yes]) - ]) - # TEA specific: use LDFLAGS_DEFAULT instead of LDFLAGS - SHLIB_LD='${CC} -dynamiclib ${CFLAGS} ${LDFLAGS_DEFAULT}' - AC_CACHE_CHECK([if ld accepts -single_module flag], tcl_cv_ld_single_module, [ - hold_ldflags=$LDFLAGS - LDFLAGS="$LDFLAGS -dynamiclib -Wl,-single_module" - AC_TRY_LINK(, [int i;], tcl_cv_ld_single_module=yes, tcl_cv_ld_single_module=no) - LDFLAGS=$hold_ldflags]) - AS_IF([test $tcl_cv_ld_single_module = yes], [ - SHLIB_LD="${SHLIB_LD} -Wl,-single_module" - ]) - # TEA specific: link shlib with current and compatibility version flags - vers=`echo ${PACKAGE_VERSION} | sed -e 's/^\([[0-9]]\{1,5\}\)\(\(\.[[0-9]]\{1,3\}\)\{0,2\}\).*$/\1\2/p' -e d` - SHLIB_LD="${SHLIB_LD} -current_version ${vers:-0} -compatibility_version ${vers:-0}" - SHLIB_SUFFIX=".dylib" - # Don't use -prebind when building for Mac OS X 10.4 or later only: - AS_IF([test "`echo "${MACOSX_DEPLOYMENT_TARGET}" | awk -F '10\\.' '{print int([$]2)}'`" -lt 4 -a \ - "`echo "${CPPFLAGS}" | awk -F '-mmacosx-version-min=10\\.' '{print int([$]2)}'`" -lt 4], [ - LDFLAGS="$LDFLAGS -prebind"]) - LDFLAGS="$LDFLAGS -headerpad_max_install_names" - AC_CACHE_CHECK([if ld accepts -search_paths_first flag], - tcl_cv_ld_search_paths_first, [ - hold_ldflags=$LDFLAGS - LDFLAGS="$LDFLAGS -Wl,-search_paths_first" - AC_TRY_LINK(, [int i;], tcl_cv_ld_search_paths_first=yes, - tcl_cv_ld_search_paths_first=no) - LDFLAGS=$hold_ldflags]) - AS_IF([test $tcl_cv_ld_search_paths_first = yes], [ - LDFLAGS="$LDFLAGS -Wl,-search_paths_first" - ]) - AS_IF([test "$tcl_cv_cc_visibility_hidden" != yes], [ - AC_DEFINE(MODULE_SCOPE, [__private_extern__], - [Compiler support for module scope symbols]) - tcl_cv_cc_visibility_hidden=yes - ]) - CC_SEARCH_FLAGS="" - LD_SEARCH_FLAGS="" - LD_LIBRARY_PATH_VAR="DYLD_LIBRARY_PATH" - # TEA specific: for combined 32 & 64 bit fat builds of Tk - # extensions, verify that 64-bit build is possible. - AS_IF([test "$fat_32_64" = yes && test -n "${TK_BIN_DIR}"], [ - AS_IF([test "${TEA_WINDOWINGSYSTEM}" = x11], [ - AC_CACHE_CHECK([for 64-bit X11], tcl_cv_lib_x11_64, [ - for v in CFLAGS CPPFLAGS LDFLAGS; do - eval 'hold_'$v'="$'$v'";'$v'="`echo "$'$v' "|sed -e "s/-arch ppc / /g" -e "s/-arch i386 / /g"`"' - done - CPPFLAGS="$CPPFLAGS -I/usr/X11R6/include" - LDFLAGS="$LDFLAGS -L/usr/X11R6/lib -lX11" - AC_TRY_LINK([#include ], [XrmInitialize();], - tcl_cv_lib_x11_64=yes, tcl_cv_lib_x11_64=no) - for v in CFLAGS CPPFLAGS LDFLAGS; do - eval $v'="$hold_'$v'"' - done]) - ]) - AS_IF([test "${TEA_WINDOWINGSYSTEM}" = aqua], [ - AC_CACHE_CHECK([for 64-bit Tk], tcl_cv_lib_tk_64, [ - for v in CFLAGS CPPFLAGS LDFLAGS; do - eval 'hold_'$v'="$'$v'";'$v'="`echo "$'$v' "|sed -e "s/-arch ppc / /g" -e "s/-arch i386 / /g"`"' - done - CPPFLAGS="$CPPFLAGS -DUSE_TCL_STUBS=1 -DUSE_TK_STUBS=1 ${TCL_INCLUDES} ${TK_INCLUDES}" - LDFLAGS="$LDFLAGS ${TCL_STUB_LIB_SPEC} ${TK_STUB_LIB_SPEC}" - AC_TRY_LINK([#include ], [Tk_InitStubs(NULL, "", 0);], - tcl_cv_lib_tk_64=yes, tcl_cv_lib_tk_64=no) - for v in CFLAGS CPPFLAGS LDFLAGS; do - eval $v'="$hold_'$v'"' - done]) - ]) - # remove 64-bit arch flags from CFLAGS et al. if configuration - # does not support 64-bit. - AS_IF([test "$tcl_cv_lib_tk_64" = no -o "$tcl_cv_lib_x11_64" = no], [ - AC_MSG_NOTICE([Removing 64-bit architectures from compiler & linker flags]) - for v in CFLAGS CPPFLAGS LDFLAGS; do - eval $v'="`echo "$'$v' "|sed -e "s/-arch ppc64 / /g" -e "s/-arch x86_64 / /g"`"' - done]) - ]) - ;; - OS/390-*) - CFLAGS_OPTIMIZE="" # Optimizer is buggy - AC_DEFINE(_OE_SOCKETS, 1, # needed in sys/socket.h - [Should OS/390 do the right thing with sockets?]) - ;; - OSF1-V*) - # Digital OSF/1 - SHLIB_CFLAGS="" - AS_IF([test "$SHARED_BUILD" = 1], [ - SHLIB_LD='ld -shared -expect_unresolved "*"' - ], [ - SHLIB_LD='ld -non_shared -expect_unresolved "*"' - ]) - SHLIB_SUFFIX=".so" - AS_IF([test $doRpath = yes], [ - CC_SEARCH_FLAGS='-Wl,-rpath,${LIB_RUNTIME_DIR}' - LD_SEARCH_FLAGS='-rpath ${LIB_RUNTIME_DIR}']) - AS_IF([test "$GCC" = yes], [CFLAGS="$CFLAGS -mieee"], [ - CFLAGS="$CFLAGS -DHAVE_TZSET -std1 -ieee"]) - # see pthread_intro(3) for pthread support on osf1, k.furukawa - AS_IF([test "${TCL_THREADS}" = 1], [ - CFLAGS="$CFLAGS -DHAVE_PTHREAD_ATTR_SETSTACKSIZE" - CFLAGS="$CFLAGS -DTCL_THREAD_STACK_MIN=PTHREAD_STACK_MIN*64" - LIBS=`echo $LIBS | sed s/-lpthreads//` - AS_IF([test "$GCC" = yes], [ - LIBS="$LIBS -lpthread -lmach -lexc" - ], [ - CFLAGS="$CFLAGS -pthread" - LDFLAGS="$LDFLAGS -pthread" - ]) - ]) - ;; - QNX-6*) - # QNX RTP - # This may work for all QNX, but it was only reported for v6. - SHLIB_CFLAGS="-fPIC" - SHLIB_LD="ld -Bshareable -x" - SHLIB_LD_LIBS="" - SHLIB_SUFFIX=".so" - CC_SEARCH_FLAGS="" - LD_SEARCH_FLAGS="" - ;; - SCO_SV-3.2*) - AS_IF([test "$GCC" = yes], [ - SHLIB_CFLAGS="-fPIC -melf" - LDFLAGS="$LDFLAGS -melf -Wl,-Bexport" - ], [ - SHLIB_CFLAGS="-Kpic -belf" - LDFLAGS="$LDFLAGS -belf -Wl,-Bexport" - ]) - SHLIB_LD="ld -G" - SHLIB_LD_LIBS="" - SHLIB_SUFFIX=".so" - CC_SEARCH_FLAGS="" - LD_SEARCH_FLAGS="" - ;; - SunOS-5.[[0-6]]) - # Careful to not let 5.10+ fall into this case - - # Note: If _REENTRANT isn't defined, then Solaris - # won't define thread-safe library routines. - - AC_DEFINE(_REENTRANT, 1, [Do we want the reentrant OS API?]) - AC_DEFINE(_POSIX_PTHREAD_SEMANTICS, 1, - [Do we really want to follow the standard? Yes we do!]) - - SHLIB_CFLAGS="-KPIC" - SHLIB_SUFFIX=".so" - AS_IF([test "$GCC" = yes], [ - SHLIB_LD='${CC} -shared' - CC_SEARCH_FLAGS='-Wl,-R,${LIB_RUNTIME_DIR}' - LD_SEARCH_FLAGS=${CC_SEARCH_FLAGS} - ], [ - SHLIB_LD="/usr/ccs/bin/ld -G -z text" - CC_SEARCH_FLAGS='-R ${LIB_RUNTIME_DIR}' - LD_SEARCH_FLAGS=${CC_SEARCH_FLAGS} - ]) - ;; - SunOS-5*) - # Note: If _REENTRANT isn't defined, then Solaris - # won't define thread-safe library routines. - - AC_DEFINE(_REENTRANT, 1, [Do we want the reentrant OS API?]) - AC_DEFINE(_POSIX_PTHREAD_SEMANTICS, 1, - [Do we really want to follow the standard? Yes we do!]) - - SHLIB_CFLAGS="-KPIC" - - # Check to enable 64-bit flags for compiler/linker - AS_IF([test "$do64bit" = yes], [ - arch=`isainfo` - AS_IF([test "$arch" = "sparcv9 sparc"], [ - AS_IF([test "$GCC" = yes], [ - AS_IF([test "`${CC} -dumpversion | awk -F. '{print [$]1}'`" -lt 3], [ - AC_MSG_WARN([64bit mode not supported with GCC < 3.2 on $system]) - ], [ - do64bit_ok=yes - CFLAGS="$CFLAGS -m64 -mcpu=v9" - LDFLAGS="$LDFLAGS -m64 -mcpu=v9" - SHLIB_CFLAGS="-fPIC" - ]) - ], [ - do64bit_ok=yes - AS_IF([test "$do64bitVIS" = yes], [ - CFLAGS="$CFLAGS -xarch=v9a" - LDFLAGS_ARCH="-xarch=v9a" - ], [ - CFLAGS="$CFLAGS -xarch=v9" - LDFLAGS_ARCH="-xarch=v9" - ]) - # Solaris 64 uses this as well - #LD_LIBRARY_PATH_VAR="LD_LIBRARY_PATH_64" - ]) - ], [AS_IF([test "$arch" = "amd64 i386"], [ - AS_IF([test "$GCC" = yes], [ - case $system in - SunOS-5.1[[1-9]]*|SunOS-5.[[2-9]][[0-9]]*) - do64bit_ok=yes - CFLAGS="$CFLAGS -m64" - LDFLAGS="$LDFLAGS -m64";; - *) - AC_MSG_WARN([64bit mode not supported with GCC on $system]);; - esac - ], [ - do64bit_ok=yes - case $system in - SunOS-5.1[[1-9]]*|SunOS-5.[[2-9]][[0-9]]*) - CFLAGS="$CFLAGS -m64" - LDFLAGS="$LDFLAGS -m64";; - *) - CFLAGS="$CFLAGS -xarch=amd64" - LDFLAGS="$LDFLAGS -xarch=amd64";; - esac - ]) - ], [AC_MSG_WARN([64bit mode not supported for $arch])])]) - ]) - - SHLIB_SUFFIX=".so" - AS_IF([test "$GCC" = yes], [ - SHLIB_LD='${CC} -shared' - CC_SEARCH_FLAGS='-Wl,-R,${LIB_RUNTIME_DIR}' - LD_SEARCH_FLAGS=${CC_SEARCH_FLAGS} - AS_IF([test "$do64bit_ok" = yes], [ - AS_IF([test "$arch" = "sparcv9 sparc"], [ - # We need to specify -static-libgcc or we need to - # add the path to the sparv9 libgcc. - # JH: static-libgcc is necessary for core Tcl, but may - # not be necessary for extensions. - SHLIB_LD="$SHLIB_LD -m64 -mcpu=v9 -static-libgcc" - # for finding sparcv9 libgcc, get the regular libgcc - # path, remove so name and append 'sparcv9' - #v9gcclibdir="`gcc -print-file-name=libgcc_s.so` | ..." - #CC_SEARCH_FLAGS="${CC_SEARCH_FLAGS},-R,$v9gcclibdir" - ], [AS_IF([test "$arch" = "amd64 i386"], [ - # JH: static-libgcc is necessary for core Tcl, but may - # not be necessary for extensions. - SHLIB_LD="$SHLIB_LD -m64 -static-libgcc" - ])]) - ]) - ], [ - case $system in - SunOS-5.[[1-9]][[0-9]]*) - # TEA specific: use LDFLAGS_DEFAULT instead of LDFLAGS - SHLIB_LD='${CC} -G -z text ${LDFLAGS_DEFAULT}';; - *) - SHLIB_LD='/usr/ccs/bin/ld -G -z text';; - esac - CC_SEARCH_FLAGS='-Wl,-R,${LIB_RUNTIME_DIR}' - LD_SEARCH_FLAGS='-R ${LIB_RUNTIME_DIR}' - ]) - ;; - UNIX_SV* | UnixWare-5*) - SHLIB_CFLAGS="-KPIC" - SHLIB_LD='${CC} -G' - SHLIB_LD_LIBS="" - SHLIB_SUFFIX=".so" - # Some UNIX_SV* systems (unixware 1.1.2 for example) have linkers - # that don't grok the -Bexport option. Test that it does. - AC_CACHE_CHECK([for ld accepts -Bexport flag], tcl_cv_ld_Bexport, [ - hold_ldflags=$LDFLAGS - LDFLAGS="$LDFLAGS -Wl,-Bexport" - AC_TRY_LINK(, [int i;], tcl_cv_ld_Bexport=yes, tcl_cv_ld_Bexport=no) - LDFLAGS=$hold_ldflags]) - AS_IF([test $tcl_cv_ld_Bexport = yes], [ - LDFLAGS="$LDFLAGS -Wl,-Bexport" - ]) - CC_SEARCH_FLAGS="" - LD_SEARCH_FLAGS="" - ;; - esac - - AS_IF([test "$do64bit" = yes -a "$do64bit_ok" = no], [ - AC_MSG_WARN([64bit support being disabled -- don't know magic for this platform]) - ]) - -dnl # Add any CPPFLAGS set in the environment to our CFLAGS, but delay doing so -dnl # until the end of configure, as configure's compile and link tests use -dnl # both CPPFLAGS and CFLAGS (unlike our compile and link) but configure's -dnl # preprocessing tests use only CPPFLAGS. - AC_CONFIG_COMMANDS_PRE([CFLAGS="${CFLAGS} ${CPPFLAGS}"; CPPFLAGS=""]) - - # Add in the arch flags late to ensure it wasn't removed. - # Not necessary in TEA, but this is aligned with core - LDFLAGS="$LDFLAGS $LDFLAGS_ARCH" - - # If we're running gcc, then change the C flags for compiling shared - # libraries to the right flags for gcc, instead of those for the - # standard manufacturer compiler. - - AS_IF([test "$GCC" = yes], [ - case $system in - AIX-*) ;; - BSD/OS*) ;; - CYGWIN_*|MINGW32_*) ;; - IRIX*) ;; - NetBSD-*|FreeBSD-*|OpenBSD-*) ;; - Darwin-*) ;; - SCO_SV-3.2*) ;; - windows) ;; - *) SHLIB_CFLAGS="-fPIC" ;; - esac]) - - AS_IF([test "$tcl_cv_cc_visibility_hidden" != yes], [ - AC_DEFINE(MODULE_SCOPE, [extern], - [No Compiler support for module scope symbols]) - ]) - - AS_IF([test "$SHARED_LIB_SUFFIX" = ""], [ - # TEA specific: use PACKAGE_VERSION instead of VERSION - SHARED_LIB_SUFFIX='${PACKAGE_VERSION}${SHLIB_SUFFIX}']) - AS_IF([test "$UNSHARED_LIB_SUFFIX" = ""], [ - # TEA specific: use PACKAGE_VERSION instead of VERSION - UNSHARED_LIB_SUFFIX='${PACKAGE_VERSION}.a']) - - if test "${GCC}" = "yes" -a ${SHLIB_SUFFIX} = ".dll"; then - AC_CACHE_CHECK(for SEH support in compiler, - tcl_cv_seh, - AC_TRY_RUN([ -#define WIN32_LEAN_AND_MEAN -#include -#undef WIN32_LEAN_AND_MEAN - - int main(int argc, char** argv) { - int a, b = 0; - __try { - a = 666 / b; - } - __except (EXCEPTION_EXECUTE_HANDLER) { - return 0; - } - return 1; - } - ], - tcl_cv_seh=yes, - tcl_cv_seh=no, - tcl_cv_seh=no) - ) - if test "$tcl_cv_seh" = "no" ; then - AC_DEFINE(HAVE_NO_SEH, 1, - [Defined when mingw does not support SEH]) - fi - - # - # Check to see if the excpt.h include file provided contains the - # definition for EXCEPTION_DISPOSITION; if not, which is the case - # with Cygwin's version as of 2002-04-10, define it to be int, - # sufficient for getting the current code to work. - # - AC_CACHE_CHECK(for EXCEPTION_DISPOSITION support in include files, - tcl_cv_eh_disposition, - AC_TRY_COMPILE([ -# define WIN32_LEAN_AND_MEAN -# include -# undef WIN32_LEAN_AND_MEAN - ],[ - EXCEPTION_DISPOSITION x; - ], - tcl_cv_eh_disposition=yes, - tcl_cv_eh_disposition=no) - ) - if test "$tcl_cv_eh_disposition" = "no" ; then - AC_DEFINE(EXCEPTION_DISPOSITION, int, - [Defined when cygwin/mingw does not support EXCEPTION DISPOSITION]) - fi - - # Check to see if winnt.h defines CHAR, SHORT, and LONG - # even if VOID has already been #defined. The win32api - # used by mingw and cygwin is known to do this. - - AC_CACHE_CHECK(for winnt.h that ignores VOID define, - tcl_cv_winnt_ignore_void, - AC_TRY_COMPILE([ -#define VOID void -#define WIN32_LEAN_AND_MEAN -#include -#undef WIN32_LEAN_AND_MEAN - ], [ - CHAR c; - SHORT s; - LONG l; - ], - tcl_cv_winnt_ignore_void=yes, - tcl_cv_winnt_ignore_void=no) - ) - if test "$tcl_cv_winnt_ignore_void" = "yes" ; then - AC_DEFINE(HAVE_WINNT_IGNORE_VOID, 1, - [Defined when cygwin/mingw ignores VOID define in winnt.h]) - fi - fi - - # See if the compiler supports casting to a union type. - # This is used to stop gcc from printing a compiler - # warning when initializing a union member. - - AC_CACHE_CHECK(for cast to union support, - tcl_cv_cast_to_union, - AC_TRY_COMPILE([], - [ - union foo { int i; double d; }; - union foo f = (union foo) (int) 0; - ], - tcl_cv_cast_to_union=yes, - tcl_cv_cast_to_union=no) - ) - if test "$tcl_cv_cast_to_union" = "yes"; then - AC_DEFINE(HAVE_CAST_TO_UNION, 1, - [Defined when compiler supports casting to union type.]) - fi - - AC_SUBST(CFLAGS_DEBUG) - AC_SUBST(CFLAGS_OPTIMIZE) - AC_SUBST(CFLAGS_WARNING) - - AC_SUBST(STLIB_LD) - AC_SUBST(SHLIB_LD) - - AC_SUBST(SHLIB_LD_LIBS) - AC_SUBST(SHLIB_CFLAGS) - - AC_SUBST(LD_LIBRARY_PATH_VAR) - - # These must be called after we do the basic CFLAGS checks and - # verify any possible 64-bit or similar switches are necessary - TEA_TCL_EARLY_FLAGS - TEA_TCL_64BIT_FLAGS -]) - -#-------------------------------------------------------------------- -# TEA_SERIAL_PORT -# -# Determine which interface to use to talk to the serial port. -# Note that #include lines must begin in leftmost column for -# some compilers to recognize them as preprocessor directives, -# and some build environments have stdin not pointing at a -# pseudo-terminal (usually /dev/null instead.) -# -# Arguments: -# none -# -# Results: -# -# Defines only one of the following vars: -# HAVE_SYS_MODEM_H -# USE_TERMIOS -# USE_TERMIO -# USE_SGTTY -#-------------------------------------------------------------------- - -AC_DEFUN([TEA_SERIAL_PORT], [ - AC_CHECK_HEADERS(sys/modem.h) - AC_CACHE_CHECK([termios vs. termio vs. sgtty], tcl_cv_api_serial, [ - AC_TRY_RUN([ -#include - -int main() { - struct termios t; - if (tcgetattr(0, &t) == 0) { - cfsetospeed(&t, 0); - t.c_cflag |= PARENB | PARODD | CSIZE | CSTOPB; - return 0; - } - return 1; -}], tcl_cv_api_serial=termios, tcl_cv_api_serial=no, tcl_cv_api_serial=no) - if test $tcl_cv_api_serial = no ; then - AC_TRY_RUN([ -#include - -int main() { - struct termio t; - if (ioctl(0, TCGETA, &t) == 0) { - t.c_cflag |= CBAUD | PARENB | PARODD | CSIZE | CSTOPB; - return 0; - } - return 1; -}], tcl_cv_api_serial=termio, tcl_cv_api_serial=no, tcl_cv_api_serial=no) - fi - if test $tcl_cv_api_serial = no ; then - AC_TRY_RUN([ -#include - -int main() { - struct sgttyb t; - if (ioctl(0, TIOCGETP, &t) == 0) { - t.sg_ospeed = 0; - t.sg_flags |= ODDP | EVENP | RAW; - return 0; - } - return 1; -}], tcl_cv_api_serial=sgtty, tcl_cv_api_serial=no, tcl_cv_api_serial=no) - fi - if test $tcl_cv_api_serial = no ; then - AC_TRY_RUN([ -#include -#include - -int main() { - struct termios t; - if (tcgetattr(0, &t) == 0 - || errno == ENOTTY || errno == ENXIO || errno == EINVAL) { - cfsetospeed(&t, 0); - t.c_cflag |= PARENB | PARODD | CSIZE | CSTOPB; - return 0; - } - return 1; -}], tcl_cv_api_serial=termios, tcl_cv_api_serial=no, tcl_cv_api_serial=no) - fi - if test $tcl_cv_api_serial = no; then - AC_TRY_RUN([ -#include -#include - -int main() { - struct termio t; - if (ioctl(0, TCGETA, &t) == 0 - || errno == ENOTTY || errno == ENXIO || errno == EINVAL) { - t.c_cflag |= CBAUD | PARENB | PARODD | CSIZE | CSTOPB; - return 0; - } - return 1; - }], tcl_cv_api_serial=termio, tcl_cv_api_serial=no, tcl_cv_api_serial=no) - fi - if test $tcl_cv_api_serial = no; then - AC_TRY_RUN([ -#include -#include - -int main() { - struct sgttyb t; - if (ioctl(0, TIOCGETP, &t) == 0 - || errno == ENOTTY || errno == ENXIO || errno == EINVAL) { - t.sg_ospeed = 0; - t.sg_flags |= ODDP | EVENP | RAW; - return 0; - } - return 1; -}], tcl_cv_api_serial=sgtty, tcl_cv_api_serial=none, tcl_cv_api_serial=none) - fi]) - case $tcl_cv_api_serial in - termios) AC_DEFINE(USE_TERMIOS, 1, [Use the termios API for serial lines]);; - termio) AC_DEFINE(USE_TERMIO, 1, [Use the termio API for serial lines]);; - sgtty) AC_DEFINE(USE_SGTTY, 1, [Use the sgtty API for serial lines]);; - esac -]) - -#-------------------------------------------------------------------- -# TEA_MISSING_POSIX_HEADERS -# -# Supply substitutes for missing POSIX header files. Special -# notes: -# - stdlib.h doesn't define strtol, strtoul, or -# strtod in some versions of SunOS -# - some versions of string.h don't declare procedures such -# as strstr -# -# Arguments: -# none -# -# Results: -# -# Defines some of the following vars: -# NO_DIRENT_H -# NO_ERRNO_H -# NO_VALUES_H -# HAVE_LIMITS_H or NO_LIMITS_H -# NO_STDLIB_H -# NO_STRING_H -# NO_SYS_WAIT_H -# NO_DLFCN_H -# HAVE_SYS_PARAM_H -# -# HAVE_STRING_H ? -# -# tkUnixPort.h checks for HAVE_LIMITS_H, so do both HAVE and -# CHECK on limits.h -#-------------------------------------------------------------------- - -AC_DEFUN([TEA_MISSING_POSIX_HEADERS], [ - AC_CACHE_CHECK([dirent.h], tcl_cv_dirent_h, [ - AC_TRY_LINK([#include -#include ], [ -#ifndef _POSIX_SOURCE -# ifdef __Lynx__ - /* - * Generate compilation error to make the test fail: Lynx headers - * are only valid if really in the POSIX environment. - */ - - missing_procedure(); -# endif -#endif -DIR *d; -struct dirent *entryPtr; -char *p; -d = opendir("foobar"); -entryPtr = readdir(d); -p = entryPtr->d_name; -closedir(d); -], tcl_cv_dirent_h=yes, tcl_cv_dirent_h=no)]) - - if test $tcl_cv_dirent_h = no; then - AC_DEFINE(NO_DIRENT_H, 1, [Do we have ?]) - fi - - # TEA specific: - AC_CHECK_HEADER(errno.h, , [AC_DEFINE(NO_ERRNO_H, 1, [Do we have ?])]) - AC_CHECK_HEADER(float.h, , [AC_DEFINE(NO_FLOAT_H, 1, [Do we have ?])]) - AC_CHECK_HEADER(values.h, , [AC_DEFINE(NO_VALUES_H, 1, [Do we have ?])]) - AC_CHECK_HEADER(limits.h, - [AC_DEFINE(HAVE_LIMITS_H, 1, [Do we have ?])], - [AC_DEFINE(NO_LIMITS_H, 1, [Do we have ?])]) - AC_CHECK_HEADER(stdlib.h, tcl_ok=1, tcl_ok=0) - AC_EGREP_HEADER(strtol, stdlib.h, , tcl_ok=0) - AC_EGREP_HEADER(strtoul, stdlib.h, , tcl_ok=0) - AC_EGREP_HEADER(strtod, stdlib.h, , tcl_ok=0) - if test $tcl_ok = 0; then - AC_DEFINE(NO_STDLIB_H, 1, [Do we have ?]) - fi - AC_CHECK_HEADER(string.h, tcl_ok=1, tcl_ok=0) - AC_EGREP_HEADER(strstr, string.h, , tcl_ok=0) - AC_EGREP_HEADER(strerror, string.h, , tcl_ok=0) - - # See also memmove check below for a place where NO_STRING_H can be - # set and why. - - if test $tcl_ok = 0; then - AC_DEFINE(NO_STRING_H, 1, [Do we have ?]) - fi - - AC_CHECK_HEADER(sys/wait.h, , [AC_DEFINE(NO_SYS_WAIT_H, 1, [Do we have ?])]) - AC_CHECK_HEADER(dlfcn.h, , [AC_DEFINE(NO_DLFCN_H, 1, [Do we have ?])]) - - # OS/390 lacks sys/param.h (and doesn't need it, by chance). - AC_HAVE_HEADERS(sys/param.h) -]) - -#-------------------------------------------------------------------- -# TEA_PATH_X -# -# Locate the X11 header files and the X11 library archive. Try -# the ac_path_x macro first, but if it doesn't find the X stuff -# (e.g. because there's no xmkmf program) then check through -# a list of possible directories. Under some conditions the -# autoconf macro will return an include directory that contains -# no include files, so double-check its result just to be safe. -# -# This should be called after TEA_CONFIG_CFLAGS as setting the -# LIBS line can confuse some configure macro magic. -# -# Arguments: -# none -# -# Results: -# -# Sets the following vars: -# XINCLUDES -# XLIBSW -# PKG_LIBS (appends to) -#-------------------------------------------------------------------- - -AC_DEFUN([TEA_PATH_X], [ - if test "${TEA_WINDOWINGSYSTEM}" = "x11" ; then - TEA_PATH_UNIX_X - fi -]) - -AC_DEFUN([TEA_PATH_UNIX_X], [ - AC_PATH_X - not_really_there="" - if test "$no_x" = ""; then - if test "$x_includes" = ""; then - AC_TRY_CPP([#include ], , not_really_there="yes") - else - if test ! -r $x_includes/X11/Xlib.h; then - not_really_there="yes" - fi - fi - fi - if test "$no_x" = "yes" -o "$not_really_there" = "yes"; then - AC_MSG_CHECKING([for X11 header files]) - found_xincludes="no" - AC_TRY_CPP([#include ], found_xincludes="yes", found_xincludes="no") - if test "$found_xincludes" = "no"; then - dirs="/usr/unsupported/include /usr/local/include /usr/X386/include /usr/X11R6/include /usr/X11R5/include /usr/include/X11R5 /usr/include/X11R4 /usr/openwin/include /usr/X11/include /usr/sww/include" - for i in $dirs ; do - if test -r $i/X11/Xlib.h; then - AC_MSG_RESULT([$i]) - XINCLUDES=" -I$i" - found_xincludes="yes" - break - fi - done - fi - else - if test "$x_includes" != ""; then - XINCLUDES="-I$x_includes" - found_xincludes="yes" - fi - fi - if test "$found_xincludes" = "no"; then - AC_MSG_RESULT([couldn't find any!]) - fi - - if test "$no_x" = yes; then - AC_MSG_CHECKING([for X11 libraries]) - XLIBSW=nope - dirs="/usr/unsupported/lib /usr/local/lib /usr/X386/lib /usr/X11R6/lib /usr/X11R5/lib /usr/lib/X11R5 /usr/lib/X11R4 /usr/openwin/lib /usr/X11/lib /usr/sww/X11/lib" - for i in $dirs ; do - if test -r $i/libX11.a -o -r $i/libX11.so -o -r $i/libX11.sl -o -r $i/libX11.dylib; then - AC_MSG_RESULT([$i]) - XLIBSW="-L$i -lX11" - x_libraries="$i" - break - fi - done - else - if test "$x_libraries" = ""; then - XLIBSW=-lX11 - else - XLIBSW="-L$x_libraries -lX11" - fi - fi - if test "$XLIBSW" = nope ; then - AC_CHECK_LIB(Xwindow, XCreateWindow, XLIBSW=-lXwindow) - fi - if test "$XLIBSW" = nope ; then - AC_MSG_RESULT([could not find any! Using -lX11.]) - XLIBSW=-lX11 - fi - # TEA specific: - if test x"${XLIBSW}" != x ; then - PKG_LIBS="${PKG_LIBS} ${XLIBSW}" - fi -]) - -#-------------------------------------------------------------------- -# TEA_BLOCKING_STYLE -# -# The statements below check for systems where POSIX-style -# non-blocking I/O (O_NONBLOCK) doesn't work or is unimplemented. -# On these systems (mostly older ones), use the old BSD-style -# FIONBIO approach instead. -# -# Arguments: -# none -# -# Results: -# -# Defines some of the following vars: -# HAVE_SYS_IOCTL_H -# HAVE_SYS_FILIO_H -# USE_FIONBIO -# O_NONBLOCK -#-------------------------------------------------------------------- - -AC_DEFUN([TEA_BLOCKING_STYLE], [ - AC_CHECK_HEADERS(sys/ioctl.h) - AC_CHECK_HEADERS(sys/filio.h) - TEA_CONFIG_SYSTEM - AC_MSG_CHECKING([FIONBIO vs. O_NONBLOCK for nonblocking I/O]) - case $system in - OSF*) - AC_DEFINE(USE_FIONBIO, 1, [Should we use FIONBIO?]) - AC_MSG_RESULT([FIONBIO]) - ;; - *) - AC_MSG_RESULT([O_NONBLOCK]) - ;; - esac -]) - -#-------------------------------------------------------------------- -# TEA_TIME_HANDLER -# -# Checks how the system deals with time.h, what time structures -# are used on the system, and what fields the structures have. -# -# Arguments: -# none -# -# Results: -# -# Defines some of the following vars: -# USE_DELTA_FOR_TZ -# HAVE_TM_GMTOFF -# HAVE_TM_TZADJ -# HAVE_TIMEZONE_VAR -#-------------------------------------------------------------------- - -AC_DEFUN([TEA_TIME_HANDLER], [ - AC_CHECK_HEADERS(sys/time.h) - AC_HEADER_TIME - AC_STRUCT_TIMEZONE - - AC_CHECK_FUNCS(gmtime_r localtime_r) - - AC_CACHE_CHECK([tm_tzadj in struct tm], tcl_cv_member_tm_tzadj, [ - AC_TRY_COMPILE([#include ], [struct tm tm; tm.tm_tzadj;], - tcl_cv_member_tm_tzadj=yes, tcl_cv_member_tm_tzadj=no)]) - if test $tcl_cv_member_tm_tzadj = yes ; then - AC_DEFINE(HAVE_TM_TZADJ, 1, [Should we use the tm_tzadj field of struct tm?]) - fi - - AC_CACHE_CHECK([tm_gmtoff in struct tm], tcl_cv_member_tm_gmtoff, [ - AC_TRY_COMPILE([#include ], [struct tm tm; tm.tm_gmtoff;], - tcl_cv_member_tm_gmtoff=yes, tcl_cv_member_tm_gmtoff=no)]) - if test $tcl_cv_member_tm_gmtoff = yes ; then - AC_DEFINE(HAVE_TM_GMTOFF, 1, [Should we use the tm_gmtoff field of struct tm?]) - fi - - # - # Its important to include time.h in this check, as some systems - # (like convex) have timezone functions, etc. - # - AC_CACHE_CHECK([long timezone variable], tcl_cv_timezone_long, [ - AC_TRY_COMPILE([#include ], - [extern long timezone; - timezone += 1; - exit (0);], - tcl_cv_timezone_long=yes, tcl_cv_timezone_long=no)]) - if test $tcl_cv_timezone_long = yes ; then - AC_DEFINE(HAVE_TIMEZONE_VAR, 1, [Should we use the global timezone variable?]) - else - # - # On some systems (eg IRIX 6.2), timezone is a time_t and not a long. - # - AC_CACHE_CHECK([time_t timezone variable], tcl_cv_timezone_time, [ - AC_TRY_COMPILE([#include ], - [extern time_t timezone; - timezone += 1; - exit (0);], - tcl_cv_timezone_time=yes, tcl_cv_timezone_time=no)]) - if test $tcl_cv_timezone_time = yes ; then - AC_DEFINE(HAVE_TIMEZONE_VAR, 1, [Should we use the global timezone variable?]) - fi - fi -]) - -#-------------------------------------------------------------------- -# TEA_BUGGY_STRTOD -# -# Under Solaris 2.4, strtod returns the wrong value for the -# terminating character under some conditions. Check for this -# and if the problem exists use a substitute procedure -# "fixstrtod" (provided by Tcl) that corrects the error. -# Also, on Compaq's Tru64 Unix 5.0, -# strtod(" ") returns 0.0 instead of a failure to convert. -# -# Arguments: -# none -# -# Results: -# -# Might defines some of the following vars: -# strtod (=fixstrtod) -#-------------------------------------------------------------------- - -AC_DEFUN([TEA_BUGGY_STRTOD], [ - AC_CHECK_FUNC(strtod, tcl_strtod=1, tcl_strtod=0) - if test "$tcl_strtod" = 1; then - AC_CACHE_CHECK([for Solaris2.4/Tru64 strtod bugs], tcl_cv_strtod_buggy,[ - AC_TRY_RUN([ - extern double strtod(); - int main() { - char *infString="Inf", *nanString="NaN", *spaceString=" "; - char *term; - double value; - value = strtod(infString, &term); - if ((term != infString) && (term[-1] == 0)) { - exit(1); - } - value = strtod(nanString, &term); - if ((term != nanString) && (term[-1] == 0)) { - exit(1); - } - value = strtod(spaceString, &term); - if (term == (spaceString+1)) { - exit(1); - } - exit(0); - }], tcl_cv_strtod_buggy=ok, tcl_cv_strtod_buggy=buggy, - tcl_cv_strtod_buggy=buggy)]) - if test "$tcl_cv_strtod_buggy" = buggy; then - AC_LIBOBJ([fixstrtod]) - USE_COMPAT=1 - AC_DEFINE(strtod, fixstrtod, [Do we want to use the strtod() in compat?]) - fi - fi -]) - -#-------------------------------------------------------------------- -# TEA_TCL_LINK_LIBS -# -# Search for the libraries needed to link the Tcl shell. -# Things like the math library (-lm) and socket stuff (-lsocket vs. -# -lnsl) are dealt with here. -# -# Arguments: -# Requires the following vars to be set in the Makefile: -# DL_LIBS (not in TEA, only needed in core) -# LIBS -# MATH_LIBS -# -# Results: -# -# Substitutes the following vars: -# TCL_LIBS -# MATH_LIBS -# -# Might append to the following vars: -# LIBS -# -# Might define the following vars: -# HAVE_NET_ERRNO_H -#-------------------------------------------------------------------- - -AC_DEFUN([TEA_TCL_LINK_LIBS], [ - #-------------------------------------------------------------------- - # On a few very rare systems, all of the libm.a stuff is - # already in libc.a. Set compiler flags accordingly. - # Also, Linux requires the "ieee" library for math to work - # right (and it must appear before "-lm"). - #-------------------------------------------------------------------- - - AC_CHECK_FUNC(sin, MATH_LIBS="", MATH_LIBS="-lm") - AC_CHECK_LIB(ieee, main, [MATH_LIBS="-lieee $MATH_LIBS"]) - - #-------------------------------------------------------------------- - # Interactive UNIX requires -linet instead of -lsocket, plus it - # needs net/errno.h to define the socket-related error codes. - #-------------------------------------------------------------------- - - AC_CHECK_LIB(inet, main, [LIBS="$LIBS -linet"]) - AC_CHECK_HEADER(net/errno.h, [ - AC_DEFINE(HAVE_NET_ERRNO_H, 1, [Do we have ?])]) - - #-------------------------------------------------------------------- - # Check for the existence of the -lsocket and -lnsl libraries. - # The order here is important, so that they end up in the right - # order in the command line generated by make. Here are some - # special considerations: - # 1. Use "connect" and "accept" to check for -lsocket, and - # "gethostbyname" to check for -lnsl. - # 2. Use each function name only once: can't redo a check because - # autoconf caches the results of the last check and won't redo it. - # 3. Use -lnsl and -lsocket only if they supply procedures that - # aren't already present in the normal libraries. This is because - # IRIX 5.2 has libraries, but they aren't needed and they're - # bogus: they goof up name resolution if used. - # 4. On some SVR4 systems, can't use -lsocket without -lnsl too. - # To get around this problem, check for both libraries together - # if -lsocket doesn't work by itself. - #-------------------------------------------------------------------- - - tcl_checkBoth=0 - AC_CHECK_FUNC(connect, tcl_checkSocket=0, tcl_checkSocket=1) - if test "$tcl_checkSocket" = 1; then - AC_CHECK_FUNC(setsockopt, , [AC_CHECK_LIB(socket, setsockopt, - LIBS="$LIBS -lsocket", tcl_checkBoth=1)]) - fi - if test "$tcl_checkBoth" = 1; then - tk_oldLibs=$LIBS - LIBS="$LIBS -lsocket -lnsl" - AC_CHECK_FUNC(accept, tcl_checkNsl=0, [LIBS=$tk_oldLibs]) - fi - AC_CHECK_FUNC(gethostbyname, , [AC_CHECK_LIB(nsl, gethostbyname, - [LIBS="$LIBS -lnsl"])]) - - # TEA specific: Don't perform the eval of the libraries here because - # DL_LIBS won't be set until we call TEA_CONFIG_CFLAGS - - TCL_LIBS='${DL_LIBS} ${LIBS} ${MATH_LIBS}' - AC_SUBST(TCL_LIBS) - AC_SUBST(MATH_LIBS) -]) - -#-------------------------------------------------------------------- -# TEA_TCL_EARLY_FLAGS -# -# Check for what flags are needed to be passed so the correct OS -# features are available. -# -# Arguments: -# None -# -# Results: -# -# Might define the following vars: -# _ISOC99_SOURCE -# _LARGEFILE64_SOURCE -# _LARGEFILE_SOURCE64 -#-------------------------------------------------------------------- - -AC_DEFUN([TEA_TCL_EARLY_FLAG],[ - AC_CACHE_VAL([tcl_cv_flag_]translit($1,[A-Z],[a-z]), - AC_TRY_COMPILE([$2], $3, [tcl_cv_flag_]translit($1,[A-Z],[a-z])=no, - AC_TRY_COMPILE([[#define ]$1[ 1 -]$2], $3, - [tcl_cv_flag_]translit($1,[A-Z],[a-z])=yes, - [tcl_cv_flag_]translit($1,[A-Z],[a-z])=no))) - if test ["x${tcl_cv_flag_]translit($1,[A-Z],[a-z])[}" = "xyes"] ; then - AC_DEFINE($1, 1, [Add the ]$1[ flag when building]) - tcl_flags="$tcl_flags $1" - fi -]) - -AC_DEFUN([TEA_TCL_EARLY_FLAGS],[ - AC_MSG_CHECKING([for required early compiler flags]) - tcl_flags="" - TEA_TCL_EARLY_FLAG(_ISOC99_SOURCE,[#include ], - [char *p = (char *)strtoll; char *q = (char *)strtoull;]) - TEA_TCL_EARLY_FLAG(_LARGEFILE64_SOURCE,[#include ], - [struct stat64 buf; int i = stat64("/", &buf);]) - TEA_TCL_EARLY_FLAG(_LARGEFILE_SOURCE64,[#include ], - [char *p = (char *)open64;]) - if test "x${tcl_flags}" = "x" ; then - AC_MSG_RESULT([none]) - else - AC_MSG_RESULT([${tcl_flags}]) - fi -]) - -#-------------------------------------------------------------------- -# TEA_TCL_64BIT_FLAGS -# -# Check for what is defined in the way of 64-bit features. -# -# Arguments: -# None -# -# Results: -# -# Might define the following vars: -# TCL_WIDE_INT_IS_LONG -# TCL_WIDE_INT_TYPE -# HAVE_STRUCT_DIRENT64 -# HAVE_STRUCT_STAT64 -# HAVE_TYPE_OFF64_T -#-------------------------------------------------------------------- - -AC_DEFUN([TEA_TCL_64BIT_FLAGS], [ - AC_MSG_CHECKING([for 64-bit integer type]) - AC_CACHE_VAL(tcl_cv_type_64bit,[ - tcl_cv_type_64bit=none - # See if the compiler knows natively about __int64 - AC_TRY_COMPILE(,[__int64 value = (__int64) 0;], - tcl_type_64bit=__int64, tcl_type_64bit="long long") - # See if we should use long anyway Note that we substitute in the - # type that is our current guess for a 64-bit type inside this check - # program, so it should be modified only carefully... - AC_TRY_COMPILE(,[switch (0) { - case 1: case (sizeof(]${tcl_type_64bit}[)==sizeof(long)): ; - }],tcl_cv_type_64bit=${tcl_type_64bit})]) - if test "${tcl_cv_type_64bit}" = none ; then - AC_DEFINE(TCL_WIDE_INT_IS_LONG, 1, [Are wide integers to be implemented with C 'long's?]) - AC_MSG_RESULT([using long]) - elif test "${tcl_cv_type_64bit}" = "__int64" \ - -a "${TEA_PLATFORM}" = "windows" ; then - # TEA specific: We actually want to use the default tcl.h checks in - # this case to handle both TCL_WIDE_INT_TYPE and TCL_LL_MODIFIER* - AC_MSG_RESULT([using Tcl header defaults]) - else - AC_DEFINE_UNQUOTED(TCL_WIDE_INT_TYPE,${tcl_cv_type_64bit}, - [What type should be used to define wide integers?]) - AC_MSG_RESULT([${tcl_cv_type_64bit}]) - - # Now check for auxiliary declarations - AC_CACHE_CHECK([for struct dirent64], tcl_cv_struct_dirent64,[ - AC_TRY_COMPILE([#include -#include ],[struct dirent64 p;], - tcl_cv_struct_dirent64=yes,tcl_cv_struct_dirent64=no)]) - if test "x${tcl_cv_struct_dirent64}" = "xyes" ; then - AC_DEFINE(HAVE_STRUCT_DIRENT64, 1, [Is 'struct dirent64' in ?]) - fi - - AC_CACHE_CHECK([for struct stat64], tcl_cv_struct_stat64,[ - AC_TRY_COMPILE([#include ],[struct stat64 p; -], - tcl_cv_struct_stat64=yes,tcl_cv_struct_stat64=no)]) - if test "x${tcl_cv_struct_stat64}" = "xyes" ; then - AC_DEFINE(HAVE_STRUCT_STAT64, 1, [Is 'struct stat64' in ?]) - fi - - AC_CHECK_FUNCS(open64 lseek64) - AC_MSG_CHECKING([for off64_t]) - AC_CACHE_VAL(tcl_cv_type_off64_t,[ - AC_TRY_COMPILE([#include ],[off64_t offset; -], - tcl_cv_type_off64_t=yes,tcl_cv_type_off64_t=no)]) - dnl Define HAVE_TYPE_OFF64_T only when the off64_t type and the - dnl functions lseek64 and open64 are defined. - if test "x${tcl_cv_type_off64_t}" = "xyes" && \ - test "x${ac_cv_func_lseek64}" = "xyes" && \ - test "x${ac_cv_func_open64}" = "xyes" ; then - AC_DEFINE(HAVE_TYPE_OFF64_T, 1, [Is off64_t in ?]) - AC_MSG_RESULT([yes]) - else - AC_MSG_RESULT([no]) - fi - fi -]) - -## -## Here ends the standard Tcl configuration bits and starts the -## TEA specific functions -## - -#------------------------------------------------------------------------ -# TEA_INIT -- -# -# Init various Tcl Extension Architecture (TEA) variables. -# This should be the first called TEA_* macro. -# -# Arguments: -# none -# -# Results: -# -# Defines and substs the following vars: -# CYGPATH -# EXEEXT -# Defines only: -# TEA_VERSION -# TEA_INITED -# TEA_PLATFORM (windows or unix) -# -# "cygpath" is used on windows to generate native path names for include -# files. These variables should only be used with the compiler and linker -# since they generate native path names. -# -# EXEEXT -# Select the executable extension based on the host type. This -# is a lightweight replacement for AC_EXEEXT that doesn't require -# a compiler. -#------------------------------------------------------------------------ - -AC_DEFUN([TEA_INIT], [ - # TEA extensions pass this us the version of TEA they think they - # are compatible with. - TEA_VERSION="3.9" - - AC_MSG_CHECKING([for correct TEA configuration]) - if test x"${PACKAGE_NAME}" = x ; then - AC_MSG_ERROR([ -The PACKAGE_NAME variable must be defined by your TEA configure.in]) - fi - if test x"$1" = x ; then - AC_MSG_ERROR([ -TEA version not specified.]) - elif test "$1" != "${TEA_VERSION}" ; then - AC_MSG_RESULT([warning: requested TEA version "$1", have "${TEA_VERSION}"]) - else - AC_MSG_RESULT([ok (TEA ${TEA_VERSION})]) - fi - - # If the user did not set CFLAGS, set it now to keep macros - # like AC_PROG_CC and AC_TRY_COMPILE from adding "-g -O2". - if test "${CFLAGS+set}" != "set" ; then - CFLAGS="" - fi - - case "`uname -s`" in - *win32*|*WIN32*|*MINGW32_*) - AC_CHECK_PROG(CYGPATH, cygpath, cygpath -w, echo) - EXEEXT=".exe" - TEA_PLATFORM="windows" - ;; - *CYGWIN_*) - CYGPATH=echo - EXEEXT=".exe" - # TEA_PLATFORM is determined later in LOAD_TCLCONFIG - ;; - *) - CYGPATH=echo - # Maybe we are cross-compiling.... - case ${host_alias} in - *mingw32*) - EXEEXT=".exe" - TEA_PLATFORM="windows" - ;; - *) - EXEEXT="" - TEA_PLATFORM="unix" - ;; - esac - ;; - esac - - # Check if exec_prefix is set. If not use fall back to prefix. - # Note when adjusted, so that TEA_PREFIX can correct for this. - # This is needed for recursive configures, since autoconf propagates - # $prefix, but not $exec_prefix (doh!). - if test x$exec_prefix = xNONE ; then - exec_prefix_default=yes - exec_prefix=$prefix - fi - - AC_MSG_NOTICE([configuring ${PACKAGE_NAME} ${PACKAGE_VERSION}]) - - AC_SUBST(EXEEXT) - AC_SUBST(CYGPATH) - - # This package name must be replaced statically for AC_SUBST to work - AC_SUBST(PKG_LIB_FILE) - # Substitute STUB_LIB_FILE in case package creates a stub library too. - AC_SUBST(PKG_STUB_LIB_FILE) - - # We AC_SUBST these here to ensure they are subst'ed, - # in case the user doesn't call TEA_ADD_... - AC_SUBST(PKG_STUB_SOURCES) - AC_SUBST(PKG_STUB_OBJECTS) - AC_SUBST(PKG_TCL_SOURCES) - AC_SUBST(PKG_HEADERS) - AC_SUBST(PKG_INCLUDES) - AC_SUBST(PKG_LIBS) - AC_SUBST(PKG_CFLAGS) -]) - -#------------------------------------------------------------------------ -# TEA_ADD_SOURCES -- -# -# Specify one or more source files. Users should check for -# the right platform before adding to their list. -# It is not important to specify the directory, as long as it is -# in the generic, win or unix subdirectory of $(srcdir). -# -# Arguments: -# one or more file names -# -# Results: -# -# Defines and substs the following vars: -# PKG_SOURCES -# PKG_OBJECTS -#------------------------------------------------------------------------ -AC_DEFUN([TEA_ADD_SOURCES], [ - vars="$@" - for i in $vars; do - case $i in - [\$]*) - # allow $-var names - PKG_SOURCES="$PKG_SOURCES $i" - PKG_OBJECTS="$PKG_OBJECTS $i" - ;; - *) - # check for existence - allows for generic/win/unix VPATH - # To add more dirs here (like 'src'), you have to update VPATH - # in Makefile.in as well - if test ! -f "${srcdir}/$i" -a ! -f "${srcdir}/generic/$i" \ - -a ! -f "${srcdir}/win/$i" -a ! -f "${srcdir}/unix/$i" \ - -a ! -f "${srcdir}/macosx/$i" \ - ; then - AC_MSG_ERROR([could not find source file '$i']) - fi - PKG_SOURCES="$PKG_SOURCES $i" - # this assumes it is in a VPATH dir - i=`basename $i` - # handle user calling this before or after TEA_SETUP_COMPILER - if test x"${OBJEXT}" != x ; then - j="`echo $i | sed -e 's/\.[[^.]]*$//'`.${OBJEXT}" - else - j="`echo $i | sed -e 's/\.[[^.]]*$//'`.\${OBJEXT}" - fi - PKG_OBJECTS="$PKG_OBJECTS $j" - ;; - esac - done - AC_SUBST(PKG_SOURCES) - AC_SUBST(PKG_OBJECTS) -]) - -#------------------------------------------------------------------------ -# TEA_ADD_STUB_SOURCES -- -# -# Specify one or more source files. Users should check for -# the right platform before adding to their list. -# It is not important to specify the directory, as long as it is -# in the generic, win or unix subdirectory of $(srcdir). -# -# Arguments: -# one or more file names -# -# Results: -# -# Defines and substs the following vars: -# PKG_STUB_SOURCES -# PKG_STUB_OBJECTS -#------------------------------------------------------------------------ -AC_DEFUN([TEA_ADD_STUB_SOURCES], [ - vars="$@" - for i in $vars; do - # check for existence - allows for generic/win/unix VPATH - if test ! -f "${srcdir}/$i" -a ! -f "${srcdir}/generic/$i" \ - -a ! -f "${srcdir}/win/$i" -a ! -f "${srcdir}/unix/$i" \ - -a ! -f "${srcdir}/macosx/$i" \ - ; then - AC_MSG_ERROR([could not find stub source file '$i']) - fi - PKG_STUB_SOURCES="$PKG_STUB_SOURCES $i" - # this assumes it is in a VPATH dir - i=`basename $i` - # handle user calling this before or after TEA_SETUP_COMPILER - if test x"${OBJEXT}" != x ; then - j="`echo $i | sed -e 's/\.[[^.]]*$//'`.${OBJEXT}" - else - j="`echo $i | sed -e 's/\.[[^.]]*$//'`.\${OBJEXT}" - fi - PKG_STUB_OBJECTS="$PKG_STUB_OBJECTS $j" - done - AC_SUBST(PKG_STUB_SOURCES) - AC_SUBST(PKG_STUB_OBJECTS) -]) - -#------------------------------------------------------------------------ -# TEA_ADD_TCL_SOURCES -- -# -# Specify one or more Tcl source files. These should be platform -# independent runtime files. -# -# Arguments: -# one or more file names -# -# Results: -# -# Defines and substs the following vars: -# PKG_TCL_SOURCES -#------------------------------------------------------------------------ -AC_DEFUN([TEA_ADD_TCL_SOURCES], [ - vars="$@" - for i in $vars; do - # check for existence, be strict because it is installed - if test ! -f "${srcdir}/$i" ; then - AC_MSG_ERROR([could not find tcl source file '${srcdir}/$i']) - fi - PKG_TCL_SOURCES="$PKG_TCL_SOURCES $i" - done - AC_SUBST(PKG_TCL_SOURCES) -]) - -#------------------------------------------------------------------------ -# TEA_ADD_HEADERS -- -# -# Specify one or more source headers. Users should check for -# the right platform before adding to their list. -# -# Arguments: -# one or more file names -# -# Results: -# -# Defines and substs the following vars: -# PKG_HEADERS -#------------------------------------------------------------------------ -AC_DEFUN([TEA_ADD_HEADERS], [ - vars="$@" - for i in $vars; do - # check for existence, be strict because it is installed - if test ! -f "${srcdir}/$i" ; then - AC_MSG_ERROR([could not find header file '${srcdir}/$i']) - fi - PKG_HEADERS="$PKG_HEADERS $i" - done - AC_SUBST(PKG_HEADERS) -]) - -#------------------------------------------------------------------------ -# TEA_ADD_INCLUDES -- -# -# Specify one or more include dirs. Users should check for -# the right platform before adding to their list. -# -# Arguments: -# one or more file names -# -# Results: -# -# Defines and substs the following vars: -# PKG_INCLUDES -#------------------------------------------------------------------------ -AC_DEFUN([TEA_ADD_INCLUDES], [ - vars="$@" - for i in $vars; do - PKG_INCLUDES="$PKG_INCLUDES $i" - done - AC_SUBST(PKG_INCLUDES) -]) - -#------------------------------------------------------------------------ -# TEA_ADD_LIBS -- -# -# Specify one or more libraries. Users should check for -# the right platform before adding to their list. For Windows, -# libraries provided in "foo.lib" format will be converted to -# "-lfoo" when using GCC (mingw). -# -# Arguments: -# one or more file names -# -# Results: -# -# Defines and substs the following vars: -# PKG_LIBS -#------------------------------------------------------------------------ -AC_DEFUN([TEA_ADD_LIBS], [ - vars="$@" - for i in $vars; do - if test "${TEA_PLATFORM}" = "windows" -a "$GCC" = "yes" ; then - # Convert foo.lib to -lfoo for GCC. No-op if not *.lib - i=`echo "$i" | sed -e 's/^\([[^-]].*\)\.lib[$]/-l\1/i'` - fi - PKG_LIBS="$PKG_LIBS $i" - done - AC_SUBST(PKG_LIBS) -]) - -#------------------------------------------------------------------------ -# TEA_ADD_CFLAGS -- -# -# Specify one or more CFLAGS. Users should check for -# the right platform before adding to their list. -# -# Arguments: -# one or more file names -# -# Results: -# -# Defines and substs the following vars: -# PKG_CFLAGS -#------------------------------------------------------------------------ -AC_DEFUN([TEA_ADD_CFLAGS], [ - PKG_CFLAGS="$PKG_CFLAGS $@" - AC_SUBST(PKG_CFLAGS) -]) - -#------------------------------------------------------------------------ -# TEA_ADD_CLEANFILES -- -# -# Specify one or more CLEANFILES. -# -# Arguments: -# one or more file names to clean target -# -# Results: -# -# Appends to CLEANFILES, already defined for subst in LOAD_TCLCONFIG -#------------------------------------------------------------------------ -AC_DEFUN([TEA_ADD_CLEANFILES], [ - CLEANFILES="$CLEANFILES $@" -]) - -#------------------------------------------------------------------------ -# TEA_PREFIX -- -# -# Handle the --prefix=... option by defaulting to what Tcl gave -# -# Arguments: -# none -# -# Results: -# -# If --prefix or --exec-prefix was not specified, $prefix and -# $exec_prefix will be set to the values given to Tcl when it was -# configured. -#------------------------------------------------------------------------ -AC_DEFUN([TEA_PREFIX], [ - if test "${prefix}" = "NONE"; then - prefix_default=yes - if test x"${TCL_PREFIX}" != x; then - AC_MSG_NOTICE([--prefix defaulting to TCL_PREFIX ${TCL_PREFIX}]) - prefix=${TCL_PREFIX} - else - AC_MSG_NOTICE([--prefix defaulting to /usr/local]) - prefix=/usr/local - fi - fi - if test "${exec_prefix}" = "NONE" -a x"${prefix_default}" = x"yes" \ - -o x"${exec_prefix_default}" = x"yes" ; then - if test x"${TCL_EXEC_PREFIX}" != x; then - AC_MSG_NOTICE([--exec-prefix defaulting to TCL_EXEC_PREFIX ${TCL_EXEC_PREFIX}]) - exec_prefix=${TCL_EXEC_PREFIX} - else - AC_MSG_NOTICE([--exec-prefix defaulting to ${prefix}]) - exec_prefix=$prefix - fi - fi -]) - -#------------------------------------------------------------------------ -# TEA_SETUP_COMPILER_CC -- -# -# Do compiler checks the way we want. This is just a replacement -# for AC_PROG_CC in TEA configure.in files to make them cleaner. -# -# Arguments: -# none -# -# Results: -# -# Sets up CC var and other standard bits we need to make executables. -#------------------------------------------------------------------------ -AC_DEFUN([TEA_SETUP_COMPILER_CC], [ - # Don't put any macros that use the compiler (e.g. AC_TRY_COMPILE) - # in this macro, they need to go into TEA_SETUP_COMPILER instead. - - AC_PROG_CC - AC_PROG_CPP - - INSTALL="\$(SHELL) \$(srcdir)/tclconfig/install-sh -c" - AC_SUBST(INSTALL) - INSTALL_DATA="\${INSTALL} -m 644" - AC_SUBST(INSTALL_DATA) - INSTALL_PROGRAM="\${INSTALL}" - AC_SUBST(INSTALL_PROGRAM) - INSTALL_SCRIPT="\${INSTALL}" - AC_SUBST(INSTALL_SCRIPT) - - #-------------------------------------------------------------------- - # Checks to see if the make program sets the $MAKE variable. - #-------------------------------------------------------------------- - - AC_PROG_MAKE_SET - - #-------------------------------------------------------------------- - # Find ranlib - #-------------------------------------------------------------------- - - AC_CHECK_TOOL(RANLIB, ranlib) - - #-------------------------------------------------------------------- - # Determines the correct binary file extension (.o, .obj, .exe etc.) - #-------------------------------------------------------------------- - - AC_OBJEXT - AC_EXEEXT -]) - -#------------------------------------------------------------------------ -# TEA_SETUP_COMPILER -- -# -# Do compiler checks that use the compiler. This must go after -# TEA_SETUP_COMPILER_CC, which does the actual compiler check. -# -# Arguments: -# none -# -# Results: -# -# Sets up CC var and other standard bits we need to make executables. -#------------------------------------------------------------------------ -AC_DEFUN([TEA_SETUP_COMPILER], [ - # Any macros that use the compiler (e.g. AC_TRY_COMPILE) have to go here. - AC_REQUIRE([TEA_SETUP_COMPILER_CC]) - - #------------------------------------------------------------------------ - # If we're using GCC, see if the compiler understands -pipe. If so, use it. - # It makes compiling go faster. (This is only a performance feature.) - #------------------------------------------------------------------------ - - if test -z "$no_pipe" -a -n "$GCC"; then - AC_CACHE_CHECK([if the compiler understands -pipe], - tcl_cv_cc_pipe, [ - hold_cflags=$CFLAGS; CFLAGS="$CFLAGS -pipe" - AC_TRY_COMPILE(,, tcl_cv_cc_pipe=yes, tcl_cv_cc_pipe=no) - CFLAGS=$hold_cflags]) - if test $tcl_cv_cc_pipe = yes; then - CFLAGS="$CFLAGS -pipe" - fi - fi - - #-------------------------------------------------------------------- - # Common compiler flag setup - #-------------------------------------------------------------------- - - AC_C_BIGENDIAN - if test "${TEA_PLATFORM}" = "unix" ; then - TEA_TCL_LINK_LIBS - TEA_MISSING_POSIX_HEADERS - # Let the user call this, because if it triggers, they will - # need a compat/strtod.c that is correct. Users can also - # use Tcl_GetDouble(FromObj) instead. - #TEA_BUGGY_STRTOD - fi -]) - -#------------------------------------------------------------------------ -# TEA_MAKE_LIB -- -# -# Generate a line that can be used to build a shared/unshared library -# in a platform independent manner. -# -# Arguments: -# none -# -# Requires: -# -# Results: -# -# Defines the following vars: -# CFLAGS - Done late here to note disturb other AC macros -# MAKE_LIB - Command to execute to build the Tcl library; -# differs depending on whether or not Tcl is being -# compiled as a shared library. -# MAKE_SHARED_LIB Makefile rule for building a shared library -# MAKE_STATIC_LIB Makefile rule for building a static library -# MAKE_STUB_LIB Makefile rule for building a stub library -# VC_MANIFEST_EMBED_DLL Makefile rule for embedded VC manifest in DLL -# VC_MANIFEST_EMBED_EXE Makefile rule for embedded VC manifest in EXE -#------------------------------------------------------------------------ - -AC_DEFUN([TEA_MAKE_LIB], [ - if test "${TEA_PLATFORM}" = "windows" -a "$GCC" != "yes"; then - MAKE_STATIC_LIB="\${STLIB_LD} -out:\[$]@ \$(PKG_OBJECTS)" - MAKE_SHARED_LIB="\${SHLIB_LD} \${SHLIB_LD_LIBS} \${LDFLAGS_DEFAULT} -out:\[$]@ \$(PKG_OBJECTS)" - AC_EGREP_CPP([manifest needed], [ -#if defined(_MSC_VER) && _MSC_VER >= 1400 -print("manifest needed") -#endif - ], [ - # Could do a CHECK_PROG for mt, but should always be with MSVC8+ - VC_MANIFEST_EMBED_DLL="if test -f \[$]@.manifest ; then mt.exe -nologo -manifest \[$]@.manifest -outputresource:\[$]@\;2 ; fi" - VC_MANIFEST_EMBED_EXE="if test -f \[$]@.manifest ; then mt.exe -nologo -manifest \[$]@.manifest -outputresource:\[$]@\;1 ; fi" - MAKE_SHARED_LIB="${MAKE_SHARED_LIB} ; ${VC_MANIFEST_EMBED_DLL}" - TEA_ADD_CLEANFILES([*.manifest]) - ]) - MAKE_STUB_LIB="\${STLIB_LD} -nodefaultlib -out:\[$]@ \$(PKG_STUB_OBJECTS)" - else - MAKE_STATIC_LIB="\${STLIB_LD} \[$]@ \$(PKG_OBJECTS)" - MAKE_SHARED_LIB="\${SHLIB_LD} -o \[$]@ \$(PKG_OBJECTS) \${SHLIB_LD_LIBS}" - MAKE_STUB_LIB="\${STLIB_LD} \[$]@ \$(PKG_STUB_OBJECTS)" - fi - - if test "${SHARED_BUILD}" = "1" ; then - MAKE_LIB="${MAKE_SHARED_LIB} " - else - MAKE_LIB="${MAKE_STATIC_LIB} " - fi - - #-------------------------------------------------------------------- - # Shared libraries and static libraries have different names. - # Use the double eval to make sure any variables in the suffix is - # substituted. (@@@ Might not be necessary anymore) - #-------------------------------------------------------------------- - - if test "${TEA_PLATFORM}" = "windows" ; then - if test "${SHARED_BUILD}" = "1" ; then - # We force the unresolved linking of symbols that are really in - # the private libraries of Tcl and Tk. - if test x"${TK_BIN_DIR}" != x ; then - SHLIB_LD_LIBS="${SHLIB_LD_LIBS} \"`${CYGPATH} ${TK_BIN_DIR}/${TK_STUB_LIB_FILE}`\"" - fi - SHLIB_LD_LIBS="${SHLIB_LD_LIBS} \"`${CYGPATH} ${TCL_BIN_DIR}/${TCL_STUB_LIB_FILE}`\"" - if test "$GCC" = "yes"; then - SHLIB_LD_LIBS="${SHLIB_LD_LIBS} -static-libgcc" - fi - eval eval "PKG_LIB_FILE=${PACKAGE_NAME}${SHARED_LIB_SUFFIX}" - else - eval eval "PKG_LIB_FILE=${PACKAGE_NAME}${UNSHARED_LIB_SUFFIX}" - if test "$GCC" = "yes"; then - PKG_LIB_FILE=lib${PKG_LIB_FILE} - fi - fi - # Some packages build their own stubs libraries - eval eval "PKG_STUB_LIB_FILE=${PACKAGE_NAME}stub${UNSHARED_LIB_SUFFIX}" - if test "$GCC" = "yes"; then - PKG_STUB_LIB_FILE=lib${PKG_STUB_LIB_FILE} - fi - # These aren't needed on Windows (either MSVC or gcc) - RANLIB=: - RANLIB_STUB=: - else - RANLIB_STUB="${RANLIB}" - if test "${SHARED_BUILD}" = "1" ; then - SHLIB_LD_LIBS="${SHLIB_LD_LIBS} ${TCL_STUB_LIB_SPEC}" - if test x"${TK_BIN_DIR}" != x ; then - SHLIB_LD_LIBS="${SHLIB_LD_LIBS} ${TK_STUB_LIB_SPEC}" - fi - eval eval "PKG_LIB_FILE=lib${PACKAGE_NAME}${SHARED_LIB_SUFFIX}" - RANLIB=: - else - eval eval "PKG_LIB_FILE=lib${PACKAGE_NAME}${UNSHARED_LIB_SUFFIX}" - fi - # Some packages build their own stubs libraries - eval eval "PKG_STUB_LIB_FILE=lib${PACKAGE_NAME}stub${UNSHARED_LIB_SUFFIX}" - fi - - # These are escaped so that only CFLAGS is picked up at configure time. - # The other values will be substituted at make time. - CFLAGS="${CFLAGS} \${CFLAGS_DEFAULT} \${CFLAGS_WARNING}" - if test "${SHARED_BUILD}" = "1" ; then - CFLAGS="${CFLAGS} \${SHLIB_CFLAGS}" - fi - - AC_SUBST(MAKE_LIB) - AC_SUBST(MAKE_SHARED_LIB) - AC_SUBST(MAKE_STATIC_LIB) - AC_SUBST(MAKE_STUB_LIB) - AC_SUBST(RANLIB_STUB) - AC_SUBST(VC_MANIFEST_EMBED_DLL) - AC_SUBST(VC_MANIFEST_EMBED_EXE) -]) - -#------------------------------------------------------------------------ -# TEA_LIB_SPEC -- -# -# Compute the name of an existing object library located in libdir -# from the given base name and produce the appropriate linker flags. -# -# Arguments: -# basename The base name of the library without version -# numbers, extensions, or "lib" prefixes. -# extra_dir Extra directory in which to search for the -# library. This location is used first, then -# $prefix/$exec-prefix, then some defaults. -# -# Requires: -# TEA_INIT and TEA_PREFIX must be called first. -# -# Results: -# -# Defines the following vars: -# ${basename}_LIB_NAME The computed library name. -# ${basename}_LIB_SPEC The computed linker flags. -#------------------------------------------------------------------------ - -AC_DEFUN([TEA_LIB_SPEC], [ - AC_MSG_CHECKING([for $1 library]) - - # Look in exec-prefix for the library (defined by TEA_PREFIX). - - tea_lib_name_dir="${exec_prefix}/lib" - - # Or in a user-specified location. - - if test x"$2" != x ; then - tea_extra_lib_dir=$2 - else - tea_extra_lib_dir=NONE - fi - - for i in \ - `ls -dr ${tea_extra_lib_dir}/$1[[0-9]]*.lib 2>/dev/null ` \ - `ls -dr ${tea_extra_lib_dir}/lib$1[[0-9]]* 2>/dev/null ` \ - `ls -dr ${tea_lib_name_dir}/$1[[0-9]]*.lib 2>/dev/null ` \ - `ls -dr ${tea_lib_name_dir}/lib$1[[0-9]]* 2>/dev/null ` \ - `ls -dr /usr/lib/$1[[0-9]]*.lib 2>/dev/null ` \ - `ls -dr /usr/lib/lib$1[[0-9]]* 2>/dev/null ` \ - `ls -dr /usr/lib64/$1[[0-9]]*.lib 2>/dev/null ` \ - `ls -dr /usr/lib64/lib$1[[0-9]]* 2>/dev/null ` \ - `ls -dr /usr/local/lib/$1[[0-9]]*.lib 2>/dev/null ` \ - `ls -dr /usr/local/lib/lib$1[[0-9]]* 2>/dev/null ` ; do - if test -f "$i" ; then - tea_lib_name_dir=`dirname $i` - $1_LIB_NAME=`basename $i` - $1_LIB_PATH_NAME=$i - break - fi - done - - if test "${TEA_PLATFORM}" = "windows"; then - $1_LIB_SPEC=\"`${CYGPATH} ${$1_LIB_PATH_NAME} 2>/dev/null`\" - else - # Strip off the leading "lib" and trailing ".a" or ".so" - - tea_lib_name_lib=`echo ${$1_LIB_NAME}|sed -e 's/^lib//' -e 's/\.[[^.]]*$//' -e 's/\.so.*//'` - $1_LIB_SPEC="-L${tea_lib_name_dir} -l${tea_lib_name_lib}" - fi - - if test "x${$1_LIB_NAME}" = x ; then - AC_MSG_ERROR([not found]) - else - AC_MSG_RESULT([${$1_LIB_SPEC}]) - fi -]) - -#------------------------------------------------------------------------ -# TEA_PRIVATE_TCL_HEADERS -- -# -# Locate the private Tcl include files -# -# Arguments: -# -# Requires: -# TCL_SRC_DIR Assumes that TEA_LOAD_TCLCONFIG has -# already been called. -# -# Results: -# -# Substitutes the following vars: -# TCL_TOP_DIR_NATIVE -# TCL_INCLUDES -#------------------------------------------------------------------------ - -AC_DEFUN([TEA_PRIVATE_TCL_HEADERS], [ - # Allow for --with-tclinclude to take effect and define ${ac_cv_c_tclh} - AC_REQUIRE([TEA_PUBLIC_TCL_HEADERS]) - AC_MSG_CHECKING([for Tcl private include files]) - - TCL_SRC_DIR_NATIVE=`${CYGPATH} ${TCL_SRC_DIR}` - TCL_TOP_DIR_NATIVE=\"${TCL_SRC_DIR_NATIVE}\" - - # Check to see if tclPort.h isn't already with the public headers - # Don't look for tclInt.h because that resides with tcl.h in the core - # sources, but the Port headers are in a different directory - if test "${TEA_PLATFORM}" = "windows" -a \ - -f "${ac_cv_c_tclh}/tclWinPort.h"; then - result="private headers found with public headers" - elif test "${TEA_PLATFORM}" = "unix" -a \ - -f "${ac_cv_c_tclh}/tclUnixPort.h"; then - result="private headers found with public headers" - else - TCL_GENERIC_DIR_NATIVE=\"${TCL_SRC_DIR_NATIVE}/generic\" - if test "${TEA_PLATFORM}" = "windows"; then - TCL_PLATFORM_DIR_NATIVE=\"${TCL_SRC_DIR_NATIVE}/win\" - else - TCL_PLATFORM_DIR_NATIVE=\"${TCL_SRC_DIR_NATIVE}/unix\" - fi - # Overwrite the previous TCL_INCLUDES as this should capture both - # public and private headers in the same set. - # We want to ensure these are substituted so as not to require - # any *_NATIVE vars be defined in the Makefile - TCL_INCLUDES="-I${TCL_GENERIC_DIR_NATIVE} -I${TCL_PLATFORM_DIR_NATIVE}" - if test "`uname -s`" = "Darwin"; then - # If Tcl was built as a framework, attempt to use - # the framework's Headers and PrivateHeaders directories - case ${TCL_DEFS} in - *TCL_FRAMEWORK*) - if test -d "${TCL_BIN_DIR}/Headers" -a \ - -d "${TCL_BIN_DIR}/PrivateHeaders"; then - TCL_INCLUDES="-I\"${TCL_BIN_DIR}/Headers\" -I\"${TCL_BIN_DIR}/PrivateHeaders\" ${TCL_INCLUDES}" - else - TCL_INCLUDES="${TCL_INCLUDES} ${TCL_INCLUDE_SPEC} `echo "${TCL_INCLUDE_SPEC}" | sed -e 's/Headers/PrivateHeaders/'`" - fi - ;; - esac - result="Using ${TCL_INCLUDES}" - else - if test ! -f "${TCL_SRC_DIR}/generic/tclInt.h" ; then - AC_MSG_ERROR([Cannot find private header tclInt.h in ${TCL_SRC_DIR}]) - fi - result="Using srcdir found in tclConfig.sh: ${TCL_SRC_DIR}" - fi - fi - - AC_SUBST(TCL_TOP_DIR_NATIVE) - - AC_SUBST(TCL_INCLUDES) - AC_MSG_RESULT([${result}]) -]) - -#------------------------------------------------------------------------ -# TEA_PUBLIC_TCL_HEADERS -- -# -# Locate the installed public Tcl header files -# -# Arguments: -# None. -# -# Requires: -# CYGPATH must be set -# -# Results: -# -# Adds a --with-tclinclude switch to configure. -# Result is cached. -# -# Substitutes the following vars: -# TCL_INCLUDES -#------------------------------------------------------------------------ - -AC_DEFUN([TEA_PUBLIC_TCL_HEADERS], [ - AC_MSG_CHECKING([for Tcl public headers]) - - AC_ARG_WITH(tclinclude, [ --with-tclinclude directory containing the public Tcl header files], with_tclinclude=${withval}) - - AC_CACHE_VAL(ac_cv_c_tclh, [ - # Use the value from --with-tclinclude, if it was given - - if test x"${with_tclinclude}" != x ; then - if test -f "${with_tclinclude}/tcl.h" ; then - ac_cv_c_tclh=${with_tclinclude} - else - AC_MSG_ERROR([${with_tclinclude} directory does not contain tcl.h]) - fi - else - list="" - if test "`uname -s`" = "Darwin"; then - # If Tcl was built as a framework, attempt to use - # the framework's Headers directory - case ${TCL_DEFS} in - *TCL_FRAMEWORK*) - list="`ls -d ${TCL_BIN_DIR}/Headers 2>/dev/null`" - ;; - esac - fi - - # Look in the source dir only if Tcl is not installed, - # and in that situation, look there before installed locations. - if test -f "${TCL_BIN_DIR}/Makefile" ; then - list="$list `ls -d ${TCL_SRC_DIR}/generic 2>/dev/null`" - fi - - # Check order: pkg --prefix location, Tcl's --prefix location, - # relative to directory of tclConfig.sh. - - eval "temp_includedir=${includedir}" - list="$list \ - `ls -d ${temp_includedir} 2>/dev/null` \ - `ls -d ${TCL_PREFIX}/include 2>/dev/null` \ - `ls -d ${TCL_BIN_DIR}/../include 2>/dev/null`" - if test "${TEA_PLATFORM}" != "windows" -o "$GCC" = "yes"; then - list="$list /usr/local/include /usr/include" - if test x"${TCL_INCLUDE_SPEC}" != x ; then - d=`echo "${TCL_INCLUDE_SPEC}" | sed -e 's/^-I//'` - list="$list `ls -d ${d} 2>/dev/null`" - fi - fi - for i in $list ; do - if test -f "$i/tcl.h" ; then - ac_cv_c_tclh=$i - break - fi - done - fi - ]) - - # Print a message based on how we determined the include path - - if test x"${ac_cv_c_tclh}" = x ; then - AC_MSG_ERROR([tcl.h not found. Please specify its location with --with-tclinclude]) - else - AC_MSG_RESULT([${ac_cv_c_tclh}]) - fi - - # Convert to a native path and substitute into the output files. - - INCLUDE_DIR_NATIVE=`${CYGPATH} ${ac_cv_c_tclh}` - - TCL_INCLUDES=-I\"${INCLUDE_DIR_NATIVE}\" - - AC_SUBST(TCL_INCLUDES) -]) - -#------------------------------------------------------------------------ -# TEA_PRIVATE_TK_HEADERS -- -# -# Locate the private Tk include files -# -# Arguments: -# -# Requires: -# TK_SRC_DIR Assumes that TEA_LOAD_TKCONFIG has -# already been called. -# -# Results: -# -# Substitutes the following vars: -# TK_INCLUDES -#------------------------------------------------------------------------ - -AC_DEFUN([TEA_PRIVATE_TK_HEADERS], [ - # Allow for --with-tkinclude to take effect and define ${ac_cv_c_tkh} - AC_REQUIRE([TEA_PUBLIC_TK_HEADERS]) - AC_MSG_CHECKING([for Tk private include files]) - - TK_SRC_DIR_NATIVE=`${CYGPATH} ${TK_SRC_DIR}` - TK_TOP_DIR_NATIVE=\"${TK_SRC_DIR_NATIVE}\" - - # Check to see if tkPort.h isn't already with the public headers - # Don't look for tkInt.h because that resides with tk.h in the core - # sources, but the Port headers are in a different directory - if test "${TEA_PLATFORM}" = "windows" -a \ - -f "${ac_cv_c_tkh}/tkWinPort.h"; then - result="private headers found with public headers" - elif test "${TEA_PLATFORM}" = "unix" -a \ - -f "${ac_cv_c_tkh}/tkUnixPort.h"; then - result="private headers found with public headers" - else - TK_GENERIC_DIR_NATIVE=\"${TK_SRC_DIR_NATIVE}/generic\" - TK_XLIB_DIR_NATIVE=\"${TK_SRC_DIR_NATIVE}/xlib\" - if test "${TEA_PLATFORM}" = "windows"; then - TK_PLATFORM_DIR_NATIVE=\"${TK_SRC_DIR_NATIVE}/win\" - else - TK_PLATFORM_DIR_NATIVE=\"${TK_SRC_DIR_NATIVE}/unix\" - fi - # Overwrite the previous TK_INCLUDES as this should capture both - # public and private headers in the same set. - # We want to ensure these are substituted so as not to require - # any *_NATIVE vars be defined in the Makefile - TK_INCLUDES="-I${TK_GENERIC_DIR_NATIVE} -I${TK_PLATFORM_DIR_NATIVE}" - # Detect and add ttk subdir - if test -d "${TK_SRC_DIR}/generic/ttk"; then - TK_INCLUDES="${TK_INCLUDES} -I\"${TK_SRC_DIR_NATIVE}/generic/ttk\"" - fi - if test "${TEA_WINDOWINGSYSTEM}" != "x11"; then - TK_INCLUDES="${TK_INCLUDES} -I\"${TK_XLIB_DIR_NATIVE}\"" - fi - if test "${TEA_WINDOWINGSYSTEM}" = "aqua"; then - TK_INCLUDES="${TK_INCLUDES} -I\"${TK_SRC_DIR_NATIVE}/macosx\"" - fi - if test "`uname -s`" = "Darwin"; then - # If Tk was built as a framework, attempt to use - # the framework's Headers and PrivateHeaders directories - case ${TK_DEFS} in - *TK_FRAMEWORK*) - if test -d "${TK_BIN_DIR}/Headers" -a \ - -d "${TK_BIN_DIR}/PrivateHeaders"; then - TK_INCLUDES="-I\"${TK_BIN_DIR}/Headers\" -I\"${TK_BIN_DIR}/PrivateHeaders\" ${TK_INCLUDES}" - else - TK_INCLUDES="${TK_INCLUDES} ${TK_INCLUDE_SPEC} `echo "${TK_INCLUDE_SPEC}" | sed -e 's/Headers/PrivateHeaders/'`" - fi - ;; - esac - result="Using ${TK_INCLUDES}" - else - if test ! -f "${TK_SRC_DIR}/generic/tkInt.h" ; then - AC_MSG_ERROR([Cannot find private header tkInt.h in ${TK_SRC_DIR}]) - fi - result="Using srcdir found in tkConfig.sh: ${TK_SRC_DIR}" - fi - fi - - AC_SUBST(TK_TOP_DIR_NATIVE) - AC_SUBST(TK_XLIB_DIR_NATIVE) - - AC_SUBST(TK_INCLUDES) - AC_MSG_RESULT([${result}]) -]) - -#------------------------------------------------------------------------ -# TEA_PUBLIC_TK_HEADERS -- -# -# Locate the installed public Tk header files -# -# Arguments: -# None. -# -# Requires: -# CYGPATH must be set -# -# Results: -# -# Adds a --with-tkinclude switch to configure. -# Result is cached. -# -# Substitutes the following vars: -# TK_INCLUDES -#------------------------------------------------------------------------ - -AC_DEFUN([TEA_PUBLIC_TK_HEADERS], [ - AC_MSG_CHECKING([for Tk public headers]) - - AC_ARG_WITH(tkinclude, [ --with-tkinclude directory containing the public Tk header files], with_tkinclude=${withval}) - - AC_CACHE_VAL(ac_cv_c_tkh, [ - # Use the value from --with-tkinclude, if it was given - - if test x"${with_tkinclude}" != x ; then - if test -f "${with_tkinclude}/tk.h" ; then - ac_cv_c_tkh=${with_tkinclude} - else - AC_MSG_ERROR([${with_tkinclude} directory does not contain tk.h]) - fi - else - list="" - if test "`uname -s`" = "Darwin"; then - # If Tk was built as a framework, attempt to use - # the framework's Headers directory. - case ${TK_DEFS} in - *TK_FRAMEWORK*) - list="`ls -d ${TK_BIN_DIR}/Headers 2>/dev/null`" - ;; - esac - fi - - # Look in the source dir only if Tk is not installed, - # and in that situation, look there before installed locations. - if test -f "${TK_BIN_DIR}/Makefile" ; then - list="$list `ls -d ${TK_SRC_DIR}/generic 2>/dev/null`" - fi - - # Check order: pkg --prefix location, Tk's --prefix location, - # relative to directory of tkConfig.sh, Tcl's --prefix location, - # relative to directory of tclConfig.sh. - - eval "temp_includedir=${includedir}" - list="$list \ - `ls -d ${temp_includedir} 2>/dev/null` \ - `ls -d ${TK_PREFIX}/include 2>/dev/null` \ - `ls -d ${TK_BIN_DIR}/../include 2>/dev/null` \ - `ls -d ${TCL_PREFIX}/include 2>/dev/null` \ - `ls -d ${TCL_BIN_DIR}/../include 2>/dev/null`" - if test "${TEA_PLATFORM}" != "windows" -o "$GCC" = "yes"; then - list="$list /usr/local/include /usr/include" - if test x"${TK_INCLUDE_SPEC}" != x ; then - d=`echo "${TK_INCLUDE_SPEC}" | sed -e 's/^-I//'` - list="$list `ls -d ${d} 2>/dev/null`" - fi - fi - for i in $list ; do - if test -f "$i/tk.h" ; then - ac_cv_c_tkh=$i - break - fi - done - fi - ]) - - # Print a message based on how we determined the include path - - if test x"${ac_cv_c_tkh}" = x ; then - AC_MSG_ERROR([tk.h not found. Please specify its location with --with-tkinclude]) - else - AC_MSG_RESULT([${ac_cv_c_tkh}]) - fi - - # Convert to a native path and substitute into the output files. - - INCLUDE_DIR_NATIVE=`${CYGPATH} ${ac_cv_c_tkh}` - - TK_INCLUDES=-I\"${INCLUDE_DIR_NATIVE}\" - - AC_SUBST(TK_INCLUDES) - - if test "${TEA_WINDOWINGSYSTEM}" != "x11"; then - # On Windows and Aqua, we need the X compat headers - AC_MSG_CHECKING([for X11 header files]) - if test ! -r "${INCLUDE_DIR_NATIVE}/X11/Xlib.h"; then - INCLUDE_DIR_NATIVE="`${CYGPATH} ${TK_SRC_DIR}/xlib`" - TK_XINCLUDES=-I\"${INCLUDE_DIR_NATIVE}\" - AC_SUBST(TK_XINCLUDES) - fi - AC_MSG_RESULT([${INCLUDE_DIR_NATIVE}]) - fi -]) - -#------------------------------------------------------------------------ -# TEA_PATH_CONFIG -- -# -# Locate the ${1}Config.sh file and perform a sanity check on -# the ${1} compile flags. These are used by packages like -# [incr Tk] that load *Config.sh files from more than Tcl and Tk. -# -# Arguments: -# none -# -# Results: -# -# Adds the following arguments to configure: -# --with-$1=... -# -# Defines the following vars: -# $1_BIN_DIR Full path to the directory containing -# the $1Config.sh file -#------------------------------------------------------------------------ - -AC_DEFUN([TEA_PATH_CONFIG], [ - # - # Ok, lets find the $1 configuration - # First, look for one uninstalled. - # the alternative search directory is invoked by --with-$1 - # - - if test x"${no_$1}" = x ; then - # we reset no_$1 in case something fails here - no_$1=true - AC_ARG_WITH($1, [ --with-$1 directory containing $1 configuration ($1Config.sh)], with_$1config=${withval}) - AC_MSG_CHECKING([for $1 configuration]) - AC_CACHE_VAL(ac_cv_c_$1config,[ - - # First check to see if --with-$1 was specified. - if test x"${with_$1config}" != x ; then - case ${with_$1config} in - */$1Config.sh ) - if test -f ${with_$1config}; then - AC_MSG_WARN([--with-$1 argument should refer to directory containing $1Config.sh, not to $1Config.sh itself]) - with_$1config=`echo ${with_$1config} | sed 's!/$1Config\.sh$!!'` - fi;; - esac - if test -f "${with_$1config}/$1Config.sh" ; then - ac_cv_c_$1config=`(cd ${with_$1config}; pwd)` - else - AC_MSG_ERROR([${with_$1config} directory doesn't contain $1Config.sh]) - fi - fi - - # then check for a private $1 installation - if test x"${ac_cv_c_$1config}" = x ; then - for i in \ - ../$1 \ - `ls -dr ../$1*[[0-9]].[[0-9]]*.[[0-9]]* 2>/dev/null` \ - `ls -dr ../$1*[[0-9]].[[0-9]][[0-9]] 2>/dev/null` \ - `ls -dr ../$1*[[0-9]].[[0-9]] 2>/dev/null` \ - `ls -dr ../$1*[[0-9]].[[0-9]]* 2>/dev/null` \ - ../../$1 \ - `ls -dr ../../$1*[[0-9]].[[0-9]]*.[[0-9]]* 2>/dev/null` \ - `ls -dr ../../$1*[[0-9]].[[0-9]][[0-9]] 2>/dev/null` \ - `ls -dr ../../$1*[[0-9]].[[0-9]] 2>/dev/null` \ - `ls -dr ../../$1*[[0-9]].[[0-9]]* 2>/dev/null` \ - ../../../$1 \ - `ls -dr ../../../$1*[[0-9]].[[0-9]]*.[[0-9]]* 2>/dev/null` \ - `ls -dr ../../../$1*[[0-9]].[[0-9]][[0-9]] 2>/dev/null` \ - `ls -dr ../../../$1*[[0-9]].[[0-9]] 2>/dev/null` \ - `ls -dr ../../../$1*[[0-9]].[[0-9]]* 2>/dev/null` \ - ${srcdir}/../$1 \ - `ls -dr ${srcdir}/../$1*[[0-9]].[[0-9]]*.[[0-9]]* 2>/dev/null` \ - `ls -dr ${srcdir}/../$1*[[0-9]].[[0-9]][[0-9]] 2>/dev/null` \ - `ls -dr ${srcdir}/../$1*[[0-9]].[[0-9]] 2>/dev/null` \ - `ls -dr ${srcdir}/../$1*[[0-9]].[[0-9]]* 2>/dev/null` \ - ; do - if test -f "$i/$1Config.sh" ; then - ac_cv_c_$1config=`(cd $i; pwd)` - break - fi - if test -f "$i/unix/$1Config.sh" ; then - ac_cv_c_$1config=`(cd $i/unix; pwd)` - break - fi - done - fi - - # check in a few common install locations - if test x"${ac_cv_c_$1config}" = x ; then - for i in `ls -d ${libdir} 2>/dev/null` \ - `ls -d ${exec_prefix}/lib 2>/dev/null` \ - `ls -d ${prefix}/lib 2>/dev/null` \ - `ls -d /usr/local/lib 2>/dev/null` \ - `ls -d /usr/contrib/lib 2>/dev/null` \ - `ls -d /usr/lib 2>/dev/null` \ - `ls -d /usr/lib64 2>/dev/null` \ - ; do - if test -f "$i/$1Config.sh" ; then - ac_cv_c_$1config=`(cd $i; pwd)` - break - fi - done - fi - ]) - - if test x"${ac_cv_c_$1config}" = x ; then - $1_BIN_DIR="# no $1 configs found" - AC_MSG_WARN([Cannot find $1 configuration definitions]) - exit 0 - else - no_$1= - $1_BIN_DIR=${ac_cv_c_$1config} - AC_MSG_RESULT([found $$1_BIN_DIR/$1Config.sh]) - fi - fi -]) - -#------------------------------------------------------------------------ -# TEA_LOAD_CONFIG -- -# -# Load the $1Config.sh file -# -# Arguments: -# -# Requires the following vars to be set: -# $1_BIN_DIR -# -# Results: -# -# Substitutes the following vars: -# $1_SRC_DIR -# $1_LIB_FILE -# $1_LIB_SPEC -#------------------------------------------------------------------------ - -AC_DEFUN([TEA_LOAD_CONFIG], [ - AC_MSG_CHECKING([for existence of ${$1_BIN_DIR}/$1Config.sh]) - - if test -f "${$1_BIN_DIR}/$1Config.sh" ; then - AC_MSG_RESULT([loading]) - . "${$1_BIN_DIR}/$1Config.sh" - else - AC_MSG_RESULT([file not found]) - fi - - # - # If the $1_BIN_DIR is the build directory (not the install directory), - # then set the common variable name to the value of the build variables. - # For example, the variable $1_LIB_SPEC will be set to the value - # of $1_BUILD_LIB_SPEC. An extension should make use of $1_LIB_SPEC - # instead of $1_BUILD_LIB_SPEC since it will work with both an - # installed and uninstalled version of Tcl. - # - - if test -f "${$1_BIN_DIR}/Makefile" ; then - AC_MSG_WARN([Found Makefile - using build library specs for $1]) - $1_LIB_SPEC=${$1_BUILD_LIB_SPEC} - $1_STUB_LIB_SPEC=${$1_BUILD_STUB_LIB_SPEC} - $1_STUB_LIB_PATH=${$1_BUILD_STUB_LIB_PATH} - $1_INCLUDE_SPEC=${$1_BUILD_INCLUDE_SPEC} - $1_LIBRARY_PATH=${$1_LIBRARY_PATH} - fi - - AC_SUBST($1_VERSION) - AC_SUBST($1_BIN_DIR) - AC_SUBST($1_SRC_DIR) - - AC_SUBST($1_LIB_FILE) - AC_SUBST($1_LIB_SPEC) - - AC_SUBST($1_STUB_LIB_FILE) - AC_SUBST($1_STUB_LIB_SPEC) - AC_SUBST($1_STUB_LIB_PATH) - - # Allow the caller to prevent this auto-check by specifying any 2nd arg - AS_IF([test "x$2" = x], [ - # Check both upper and lower-case variants - # If a dev wanted non-stubs libs, this function could take an option - # to not use _STUB in the paths below - AS_IF([test "x${$1_STUB_LIB_SPEC}" = x], - [TEA_LOAD_CONFIG_LIB(translit($1,[a-z],[A-Z])_STUB)], - [TEA_LOAD_CONFIG_LIB($1_STUB)]) - ]) -]) - -#------------------------------------------------------------------------ -# TEA_LOAD_CONFIG_LIB -- -# -# Helper function to load correct library from another extension's -# ${PACKAGE}Config.sh. -# -# Results: -# Adds to LIBS the appropriate extension library -#------------------------------------------------------------------------ -AC_DEFUN([TEA_LOAD_CONFIG_LIB], [ - AC_MSG_CHECKING([For $1 library for LIBS]) - # This simplifies the use of stub libraries by automatically adding - # the stub lib to your path. Normally this would add to SHLIB_LD_LIBS, - # but this is called before CONFIG_CFLAGS. More importantly, this adds - # to PKG_LIBS, which becomes LIBS, and that is only used by SHLIB_LD. - if test "x${$1_LIB_SPEC}" != "x" ; then - if test "${TEA_PLATFORM}" = "windows" -a "$GCC" != "yes" ; then - TEA_ADD_LIBS([\"`${CYGPATH} ${$1_LIB_PATH}`\"]) - AC_MSG_RESULT([using $1_LIB_PATH ${$1_LIB_PATH}]) - else - TEA_ADD_LIBS([${$1_LIB_SPEC}]) - AC_MSG_RESULT([using $1_LIB_SPEC ${$1_LIB_SPEC}]) - fi - else - AC_MSG_RESULT([file not found]) - fi -]) - -#------------------------------------------------------------------------ -# TEA_EXPORT_CONFIG -- -# -# Define the data to insert into the ${PACKAGE}Config.sh file -# -# Arguments: -# -# Requires the following vars to be set: -# $1 -# -# Results: -# Substitutes the following vars: -#------------------------------------------------------------------------ - -AC_DEFUN([TEA_EXPORT_CONFIG], [ - #-------------------------------------------------------------------- - # These are for $1Config.sh - #-------------------------------------------------------------------- - - # pkglibdir must be a fully qualified path and (not ${exec_prefix}/lib) - eval pkglibdir="[$]{libdir}/$1${PACKAGE_VERSION}" - if test "${TCL_LIB_VERSIONS_OK}" = "ok"; then - eval $1_LIB_FLAG="-l$1${PACKAGE_VERSION}${DBGX}" - eval $1_STUB_LIB_FLAG="-l$1stub${PACKAGE_VERSION}${DBGX}" - else - eval $1_LIB_FLAG="-l$1`echo ${PACKAGE_VERSION} | tr -d .`${DBGX}" - eval $1_STUB_LIB_FLAG="-l$1stub`echo ${PACKAGE_VERSION} | tr -d .`${DBGX}" - fi - $1_BUILD_LIB_SPEC="-L`pwd` ${$1_LIB_FLAG}" - $1_LIB_SPEC="-L${pkglibdir} ${$1_LIB_FLAG}" - $1_BUILD_STUB_LIB_SPEC="-L`pwd` [$]{$1_STUB_LIB_FLAG}" - $1_STUB_LIB_SPEC="-L${pkglibdir} [$]{$1_STUB_LIB_FLAG}" - $1_BUILD_STUB_LIB_PATH="`pwd`/[$]{PKG_STUB_LIB_FILE}" - $1_STUB_LIB_PATH="${pkglibdir}/[$]{PKG_STUB_LIB_FILE}" - - AC_SUBST($1_BUILD_LIB_SPEC) - AC_SUBST($1_LIB_SPEC) - AC_SUBST($1_BUILD_STUB_LIB_SPEC) - AC_SUBST($1_STUB_LIB_SPEC) - AC_SUBST($1_BUILD_STUB_LIB_PATH) - AC_SUBST($1_STUB_LIB_PATH) - - AC_SUBST(MAJOR_VERSION) - AC_SUBST(MINOR_VERSION) - AC_SUBST(PATCHLEVEL) -]) - - -#------------------------------------------------------------------------ -# TEA_PATH_CELIB -- -# -# Locate Keuchel's celib emulation layer for targeting Win/CE -# -# Arguments: -# none -# -# Results: -# -# Adds the following arguments to configure: -# --with-celib=... -# -# Defines the following vars: -# CELIB_DIR Full path to the directory containing -# the include and platform lib files -#------------------------------------------------------------------------ - -AC_DEFUN([TEA_PATH_CELIB], [ - # First, look for one uninstalled. - # the alternative search directory is invoked by --with-celib - - if test x"${no_celib}" = x ; then - # we reset no_celib in case something fails here - no_celib=true - AC_ARG_WITH(celib,[ --with-celib=DIR use Windows/CE support library from DIR], with_celibconfig=${withval}) - AC_MSG_CHECKING([for Windows/CE celib directory]) - AC_CACHE_VAL(ac_cv_c_celibconfig,[ - # First check to see if --with-celibconfig was specified. - if test x"${with_celibconfig}" != x ; then - if test -d "${with_celibconfig}/inc" ; then - ac_cv_c_celibconfig=`(cd ${with_celibconfig}; pwd)` - else - AC_MSG_ERROR([${with_celibconfig} directory doesn't contain inc directory]) - fi - fi - - # then check for a celib library - if test x"${ac_cv_c_celibconfig}" = x ; then - for i in \ - ../celib-palm-3.0 \ - ../celib \ - ../../celib-palm-3.0 \ - ../../celib \ - `ls -dr ../celib-*3.[[0-9]]* 2>/dev/null` \ - ${srcdir}/../celib-palm-3.0 \ - ${srcdir}/../celib \ - `ls -dr ${srcdir}/../celib-*3.[[0-9]]* 2>/dev/null` \ - ; do - if test -d "$i/inc" ; then - ac_cv_c_celibconfig=`(cd $i; pwd)` - break - fi - done - fi - ]) - if test x"${ac_cv_c_celibconfig}" = x ; then - AC_MSG_ERROR([Cannot find celib support library directory]) - else - no_celib= - CELIB_DIR=${ac_cv_c_celibconfig} - CELIB_DIR=`echo "$CELIB_DIR" | sed -e 's!\\\!/!g'` - AC_MSG_RESULT([found $CELIB_DIR]) - fi - fi -]) -# Local Variables: -# mode: autoconf -# End: diff --git a/autoconf/tea/teaish.tcl b/autoconf/tea/teaish.tcl new file mode 100644 index 0000000000..47e0ea7013 --- /dev/null +++ b/autoconf/tea/teaish.tcl @@ -0,0 +1,569 @@ +# Teaish configure script for the SQLite Tcl extension + +# +# State for disparate config-time pieces. +# +array set sqlite__Config [proj-strip-hash-comments { + # + # The list of feature --flags which the --all flag implies. This + # requires special handling in a few places. + # + all-flag-enables {fts3 fts4 fts5 rtree geopoly} + + # >0 if building in the canonical tree. -1=undetermined + is-canonical -1 +}] + +# +# Set up the package info for teaish... +# +apply {{dir} { + # Figure out the version number... + set version "" + if {[file exists $dir/../VERSION]} { + # The canonical SQLite TEA(ish) build + set version [proj-file-content -trim $dir/../VERSION] + set ::sqlite__Config(is-canonical) 1 + set distname sqlite-tcl + } elseif {[file exists $dir/generic/tclsqlite3.c]} { + # The copy from the teaish tree, used as a dev/test bed before + # updating SQLite's tree. + set ::sqlite__Config(is-canonical) 0 + set fd [open $dir/generic/tclsqlite3.c rb] + while {[gets $fd line] >=0} { + if {[regexp {^#define[ ]+SQLITE_VERSION[ ]+"(3.+)"} \ + $line - version]} { + set distname sqlite-teaish + break + } + } + close $fd + } + + if {"" eq $version} { + proj-fatal "Cannot determine the SQLite version number" + } + + proj-assert {$::sqlite__Config(is-canonical) > -1} + proj-assert {[string match 3.*.* $version]} \ + "Unexpected SQLite version: $version" + + set pragmas {} + if {$::sqlite__Config(is-canonical)} { + # Disable "make dist" in the canonical tree. That tree is + # generated from several pieces and creating/testing working + # "dist" rules for that sub-build currently feels unnecessary. The + # copy in the teaish tree, though, should be able to "make dist". + lappend pragmas no-dist + } else { + lappend pragmas full-dist + } + + teaish-pkginfo-set -vars { + -name sqlite + -name.pkg sqlite3 + -version $version + -name.dist $distname + -libDir sqlite$version + -pragmas $pragmas + -src generic/tclsqlite3.c + } + # We should also have: + # -vsatisfies 8.6- + # But at least one platform is failing this vsatisfies check + # for no apparent reason: + # https://sqlite.org/forum/forumpost/fde857fb8101a4be +}} [teaish-get -dir] + + +# +# Must return either an empty string or a list in the form accepted by +# autosetup's [options] function. +# +proc teaish-options {} { + # These flags and defaults mostly derive from the historical TEA + # build. Some, like ICU, are taken from the canonical SQLite tree. + return [subst -nocommands -nobackslashes { + with-system-sqlite=0 + => {Use the system-level SQLite instead of the copy in this tree. + Also requires use of --override-sqlite-version so that the build + knows what version number to associate with the system-level SQLite.} + override-sqlite-version:VERSION + => {For use with --with-system-sqlite to set the version number.} + threadsafe=1 => {Disable mutexing} + with-tempstore:=no => {Use an in-RAM database for temporary tables: never,no,yes,always} + load-extension=0 => {Enable loading of external extensions} + math=1 => {Disable math functions} + json=1 => {Disable JSON functions} + fts3 => {Enable the FTS3 extension} + fts4 => {Enable the FTS4 extension} + fts5 => {Enable the FTS5 extension} + update-limit => {Enable the UPDATE/DELETE LIMIT clause} + geopoly => {Enable the GEOPOLY extension} + rtree => {Enable the RTREE extension} + session => {Enable the SESSION extension} + all=1 => {Disable $::sqlite__Config(all-flag-enables)} + with-icu-ldflags:LDFLAGS + => {Enable SQLITE_ENABLE_ICU and add the given linker flags for the + ICU libraries. e.g. on Ubuntu systems, try '-licui18n -licuuc -licudata'.} + with-icu-cflags:CFLAGS + => {Apply extra CFLAGS/CPPFLAGS necessary for building with ICU. + e.g. -I/usr/local/include} + with-icu-config:=auto + => {Enable SQLITE_ENABLE_ICU. Value must be one of: auto, pkg-config, + /path/to/icu-config} + icu-collations=0 + => {Enable SQLITE_ENABLE_ICU_COLLATIONS. Requires --with-icu-ldflags=... + or --with-icu-config} + }] +} + +# +# Gets called by tea-configure-core. Must perform any configuration +# work needed for this extension. +# +proc teaish-configure {} { + use teaish/feature + + if {[proj-opt-was-provided override-sqlite-version]} { + teaish-pkginfo-set -version [opt-val override-sqlite-version] + proj-warn "overriding sqlite version number:" [teaish-pkginfo-get -version] + } elseif {[proj-opt-was-provided with-system-sqlite] + && [opt-val with-system-sqlite] ne "0"} { + proj-fatal "when using --with-system-sqlite also use" \ + "--override-sqlite-version to specify a library version number." + } + + define CFLAGS [proj-get-env CFLAGS {-O2}] + sqlite-munge-cflags + + # + # Add feature flags from legacy configure.ac which are not covered by + # --flags. + # + sqlite-add-feature-flag { + -DSQLITE_3_SUFFIX_ONLY=1 + -DSQLITE_ENABLE_DESERIALIZE=1 + -DSQLITE_ENABLE_DBPAGE_VTAB=1 + -DSQLITE_ENABLE_BYTECODE_VTAB=1 + -DSQLITE_ENABLE_DBSTAT_VTAB=1 + } + + if {[opt-bool with-system-sqlite]} { + msg-result "Using system-level sqlite3." + teaish-cflags-add -DUSE_SYSTEM_SQLITE + teaish-ldflags-add -lsqlite3 + } elseif {$::sqlite__Config(is-canonical)} { + teaish-cflags-add -I[teaish-get -dir]/.. + } + + teaish-check-librt + teaish-check-libz + sqlite-handle-threadsafe + sqlite-handle-tempstore + sqlite-handle-load-extension + sqlite-handle-math + sqlite-handle-icu + + sqlite-handle-common-feature-flags; # must be late in the process +}; # teaish-configure + +define OPT_FEATURE_FLAGS {} ; # -DSQLITE_OMIT/ENABLE flags. +# +# Adds $args, if not empty, to OPT_FEATURE_FLAGS. This is intended only for holding +# -DSQLITE_ENABLE/OMIT/... flags, but that is not enforced here. +proc sqlite-add-feature-flag {args} { + if {"" ne $args} { + define-append OPT_FEATURE_FLAGS {*}$args + } +} + +# +# Check for log(3) in libm and die with an error if it is not +# found. $featureName should be the feature name which requires that +# function (it's used only in error messages). defines LDFLAGS_MATH to +# the required linker flags (which may be empty even if the math APIs +# are found, depending on the OS). +proc sqlite-affirm-have-math {featureName} { + if {"" eq [get-define LDFLAGS_MATH ""]} { + if {![msg-quiet proj-check-function-in-lib log m]} { + user-error "Missing math APIs for $featureName" + } + set lfl [get-define lib_log ""] + undefine lib_log + if {"" ne $lfl} { + user-notice "Forcing requirement of $lfl for $featureName" + } + define LDFLAGS_MATH $lfl + teaish-ldflags-prepend $lfl + } +} + +# +# Handle various SQLITE_ENABLE/OMIT_... feature flags. +proc sqlite-handle-common-feature-flags {} { + msg-result "Feature flags..." + if {![opt-bool all]} { + # Special handling for --disable-all + foreach flag $::sqlite__Config(all-flag-enables) { + if {![proj-opt-was-provided $flag]} { + proj-opt-set $flag 0 + } + } + } + foreach {boolFlag featureFlag ifSetEvalThis} [proj-strip-hash-comments { + all {} { + # The 'all' option must be first in this list. This impl makes + # an effort to only apply flags which the user did not already + # apply, so that combinations like (--all --disable-geopoly) + # will indeed disable geopoly. There are corner cases where + # flags which depend on each other will behave in non-intuitive + # ways: + # + # --all --disable-rtree + # + # Will NOT disable geopoly, though geopoly depends on rtree. + # The --geopoly flag, though, will automatically re-enable + # --rtree, so --disable-rtree won't actually disable anything in + # that case. + foreach k $::sqlite__Config(all-flag-enables) { + if {![proj-opt-was-provided $k]} { + proj-opt-set $k 1 + } + } + } + fts3 -DSQLITE_ENABLE_FTS3 {sqlite-affirm-have-math fts3} + fts4 -DSQLITE_ENABLE_FTS4 {sqlite-affirm-have-math fts4} + fts5 -DSQLITE_ENABLE_FTS5 {sqlite-affirm-have-math fts5} + geopoly -DSQLITE_ENABLE_GEOPOLY {proj-opt-set rtree} + rtree -DSQLITE_ENABLE_RTREE {} + session {-DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_PREUPDATE_HOOK} {} + update-limit -DSQLITE_ENABLE_UPDATE_DELETE_LIMIT {} + scanstatus -DSQLITE_ENABLE_STMT_SCANSTATUS {} + }] { + if {$boolFlag ni $::autosetup(options)} { + # Skip flags which are in the canonical build but not + # the autoconf bundle. + continue + } + proj-if-opt-truthy $boolFlag { + sqlite-add-feature-flag $featureFlag + if {0 != [eval $ifSetEvalThis] && "all" ne $boolFlag} { + msg-result " + $boolFlag" + } + } { + if {"all" ne $boolFlag} { + msg-result " - $boolFlag" + } + } + } + # + # Invert the above loop's logic for some SQLITE_OMIT_... cases. If + # config option $boolFlag is false, [sqlite-add-feature-flag + # $featureFlag], where $featureFlag is intended to be + # -DSQLITE_OMIT_... + foreach {boolFlag featureFlag} { + json -DSQLITE_OMIT_JSON + } { + if {[proj-opt-truthy $boolFlag]} { + msg-result " + $boolFlag" + } else { + sqlite-add-feature-flag $featureFlag + msg-result " - $boolFlag" + } + } + + ## + # Remove duplicates from the final feature flag sets and show them + # to the user. + set oFF [get-define OPT_FEATURE_FLAGS] + if {"" ne $oFF} { + define OPT_FEATURE_FLAGS [lsort -unique $oFF] + msg-result "Library feature flags: [get-define OPT_FEATURE_FLAGS]" + } + if {[lsearch [get-define TARGET_DEBUG ""] -DSQLITE_DEBUG=1] > -1} { + msg-result "Note: this is a debug build, so performance will suffer." + } + teaish-cflags-add -define OPT_FEATURE_FLAGS +}; # sqlite-handle-common-feature-flags + +# +# If --enable-threadsafe is set, this adds -DSQLITE_THREADSAFE=1 to +# OPT_FEATURE_FLAGS and sets LDFLAGS_PTHREAD to the linker flags +# needed for linking pthread (possibly an empty string). If +# --enable-threadsafe is not set, adds -DSQLITE_THREADSAFE=0 to +# OPT_FEATURE_FLAGS and sets LDFLAGS_PTHREAD to an empty string. +# +# It prepends the flags to the global LDFLAGS. +proc sqlite-handle-threadsafe {} { + msg-checking "Support threadsafe operation? " + define LDFLAGS_PTHREAD "" + set enable 0 + if {[proj-opt-was-provided threadsafe]} { + proj-if-opt-truthy threadsafe { + if {[proj-check-function-in-lib pthread_create pthread] + && [proj-check-function-in-lib pthread_mutexattr_init pthread]} { + incr enable + set ldf [get-define lib_pthread_create] + define LDFLAGS_PTHREAD $ldf + teaish-ldflags-prepend $ldf + undefine lib_pthread_create + undefine lib_pthread_mutexattr_init + } else { + user-error "Missing required pthread libraries. Use --disable-threadsafe to disable this check." + } + # Recall that LDFLAGS_PTHREAD might be empty even if pthreads if + # found because it's in -lc on some platforms. + } { + msg-result "Disabled using --disable-threadsafe" + } + } else { + # + # If user does not specify --[disable-]threadsafe then select a + # default based on whether it looks like Tcl has threading + # support. + # + catch { + scan [exec echo {puts [tcl::pkgconfig get threaded]} | [get-define TCLSH_CMD]] \ + %d enable + } + if {$enable} { + set flagName "--threadsafe" + set lblAbled "enabled" + msg-result yes + } else { + set flagName "--disable-threadsafe" + set lblAbled "disabled" + msg-result no + } + msg-result "Defaulting to ${flagName} because Tcl has threading ${lblAbled}." + # ^^^ We (probably) don't need to link against -lpthread in the + # is-enabled case. We might in the case of static linking. Unsure. + } + sqlite-add-feature-flag -DSQLITE_THREADSAFE=${enable} + return $enable +} + +# +# Handles the --enable-load-extension flag. Returns 1 if the support +# is enabled, else 0. If support for that feature is not found, a +# fatal error is triggered if --enable-load-extension is explicitly +# provided, else a loud warning is instead emitted. If +# --disable-load-extension is used, no check is performed. +# +# Makes the following environment changes: +# +# - defines LDFLAGS_DLOPEN to any linker flags needed for this +# feature. It may legally be empty on some systems where dlopen() +# is in libc. +# +# - If the feature is not available, adds +# -DSQLITE_OMIT_LOAD_EXTENSION=1 to the feature flags list. +proc sqlite-handle-load-extension {} { + define LDFLAGS_DLOPEN "" + set found 0 + proj-if-opt-truthy load-extension { + set found [proj-check-function-in-lib dlopen dl] + if {$found} { + set ldf [get-define lib_dlopen] + define LDFLAGS_DLOPEN $ldf + teaish-ldflags-prepend $ldf + undefine lib_dlopen + } else { + if {[proj-opt-was-provided load-extension]} { + # Explicit --enable-load-extension: fail if not found + proj-indented-notice -error { + --enable-load-extension was provided but dlopen() + not found. Use --disable-load-extension to bypass this + check. + } + } else { + # It was implicitly enabled: warn if not found + proj-indented-notice { + WARNING: dlopen() not found, so loadable module support will + be disabled. Use --disable-load-extension to bypass this + check. + } + } + } + } + if {$found} { + msg-result "Loadable extension support enabled." + } else { + msg-result "Disabling loadable extension support. Use --enable-load-extension to enable them." + sqlite-add-feature-flag -DSQLITE_OMIT_LOAD_EXTENSION=1 + } + return $found +} + +# +# ICU - International Components for Unicode +# +# Handles these flags: +# +# --with-icu-ldflags=LDFLAGS +# --with-icu-cflags=CFLAGS +# --with-icu-config[=auto | pkg-config | /path/to/icu-config] +# --enable-icu-collations +# +# --with-icu-config values: +# +# - auto: use the first one of (pkg-config, icu-config) found on the +# system. +# - pkg-config: use only pkg-config to determine flags +# - /path/to/icu-config: use that to determine flags +# +# If --with-icu-config is used as neither pkg-config nor icu-config +# are found, fail fatally. +# +# If both --with-icu-ldflags and --with-icu-config are provided, they +# are cumulative. If neither are provided, icu-collations is not +# honored and a warning is emitted if it is provided. +# +# Design note: though we could automatically enable ICU if the +# icu-config binary or (pkg-config icu-io) are found, we specifically +# do not. ICU is always an opt-in feature. +proc sqlite-handle-icu {} { + define LDFLAGS_LIBICU [join [opt-val with-icu-ldflags ""]] + define CFLAGS_LIBICU [join [opt-val with-icu-cflags ""]] + if {[proj-opt-was-provided with-icu-config]} { + msg-result "Checking for ICU support..." + set icuConfigBin [opt-val with-icu-config] + set tryIcuConfigBin 1; # set to 0 if we end up using pkg-config + if {$icuConfigBin in {auto pkg-config}} { + uplevel 3 { use pkg-config } + if {[pkg-config-init 0] && [pkg-config icu-io]} { + # Maintenance reminder: historical docs say to use both of + # (icu-io, icu-uc). icu-uc lacks a required lib and icu-io has + # all of them on tested OSes. + set tryIcuConfigBin 0 + define LDFLAGS_LIBICU [get-define PKG_ICU_IO_LDFLAGS] + define-append LDFLAGS_LIBICU [get-define PKG_ICU_IO_LIBS] + define CFLAGS_LIBICU [get-define PKG_ICU_IO_CFLAGS] + } elseif {"pkg-config" eq $icuConfigBin} { + proj-fatal "pkg-config cannot find package icu-io" + } else { + proj-assert {"auto" eq $icuConfigBin} + } + } + if {$tryIcuConfigBin} { + if {"auto" eq $icuConfigBin} { + set icuConfigBin [proj-first-bin-of \ + /usr/local/bin/icu-config \ + /usr/bin/icu-config] + if {"" eq $icuConfigBin} { + proj-indented-notice -error { + --with-icu-config=auto cannot find (pkg-config icu-io) or icu-config binary. + On Ubuntu-like systems try: + --with-icu-ldflags='-licui18n -licuuc -licudata' + } + } + } + if {[file-isexec $icuConfigBin]} { + set x [exec $icuConfigBin --ldflags] + if {"" eq $x} { + proj-indented-notice -error \ + [subst { + $icuConfigBin --ldflags returned no data. + On Ubuntu-like systems try: + --with-icu-ldflags='-licui18n -licuuc -licudata' + }] + } + define-append LDFLAGS_LIBICU $x + set x [exec $icuConfigBin --cppflags] + define-append CFLAGS_LIBICU $x + } else { + proj-fatal "--with-icu-config=$icuConfigBin does not refer to an executable" + } + } + } + set ldflags [define LDFLAGS_LIBICU [string trim [get-define LDFLAGS_LIBICU]]] + set cflags [define CFLAGS_LIBICU [string trim [get-define CFLAGS_LIBICU]]] + if {"" ne $ldflags} { + sqlite-add-feature-flag -DSQLITE_ENABLE_ICU + msg-result "Enabling ICU support with flags: $ldflags $cflags" + if {[opt-bool icu-collations]} { + msg-result "Enabling ICU collations." + sqlite-add-feature-flag -DSQLITE_ENABLE_ICU_COLLATIONS + } + teaish-ldflags-prepend $ldflags + teaish-cflags-add $cflags + } elseif {[opt-bool icu-collations]} { + proj-warn "ignoring --enable-icu-collations because neither --with-icu-ldflags nor --with-icu-config provided any linker flags" + } else { + msg-result "ICU support is disabled." + } +}; # sqlite-handle-icu + + +# +# Handles the --with-tempstore flag. +# +# The test fixture likes to set SQLITE_TEMP_STORE on its own, so do +# not set that feature flag unless it was explicitly provided to the +# configure script. +proc sqlite-handle-tempstore {} { + if {[proj-opt-was-provided with-tempstore]} { + set ts [opt-val with-tempstore no] + set tsn 1 + msg-checking "Use an in-RAM database for temporary tables? " + switch -exact -- $ts { + never { set tsn 0 } + no { set tsn 1 } + yes { set tsn 2 } + always { set tsn 3 } + default { + user-error "Invalid --with-tempstore value '$ts'. Use one of: never, no, yes, always" + } + } + msg-result $ts + sqlite-add-feature-flag -DSQLITE_TEMP_STORE=$tsn + } +} + +# +# Handles the --enable-math flag. +proc sqlite-handle-math {} { + proj-if-opt-truthy math { + if {![proj-check-function-in-lib ceil m]} { + user-error "Cannot find libm functions. Use --disable-math to bypass this." + } + set lfl [get-define lib_ceil] + undefine lib_ceil + define LDFLAGS_MATH $lfl + teaish-ldflags-prepend $lfl + sqlite-add-feature-flag -DSQLITE_ENABLE_MATH_FUNCTIONS + msg-result "Enabling math SQL functions" + } { + define LDFLAGS_MATH "" + msg-result "Disabling math SQL functions" + } +} + +# +# Move -DSQLITE_OMIT... and -DSQLITE_ENABLE... flags from CFLAGS and +# CPPFLAGS to OPT_FEATURE_FLAGS and remove them from BUILD_CFLAGS. +proc sqlite-munge-cflags {} { + # Move CFLAGS and CPPFLAGS entries matching -DSQLITE_OMIT* and + # -DSQLITE_ENABLE* to OPT_FEATURE_FLAGS. This behavior is derived + # from the pre-3.48 build. + # + # If any configure flags for features are in conflict with + # CFLAGS/CPPFLAGS-specified feature flags, all bets are off. There + # are no guarantees about which one will take precedence. + foreach flagDef {CFLAGS CPPFLAGS} { + set tmp "" + foreach cf [get-define $flagDef ""] { + switch -glob -- $cf { + -DSQLITE_OMIT* - + -DSQLITE_ENABLE* { + sqlite-add-feature-flag $cf + } + default { + lappend tmp $cf + } + } + } + define $flagDef $tmp + } +} diff --git a/autoconf/tea/teaish.test.tcl b/autoconf/tea/teaish.test.tcl new file mode 100644 index 0000000000..b63c9426e3 --- /dev/null +++ b/autoconf/tea/teaish.test.tcl @@ -0,0 +1,14 @@ +test-expect 1.0-open { + sqlite3 db :memory: +} {} + +test-assert 1.1-version-3.x { + [string match 3.* [db eval {select sqlite_version()}]] +} + +test-expect 1.2-select { + db eval {select 'hi, world',1,2,3} +} {{hi, world} 1 2 3} + + +test-expect 99.0-db-close {db close} {} diff --git a/autoconf/tea/win/makefile.vc b/autoconf/tea/win/makefile.vc deleted file mode 100644 index d92a8428bf..0000000000 --- a/autoconf/tea/win/makefile.vc +++ /dev/null @@ -1,419 +0,0 @@ -# makefile.vc -- -*- Makefile -*- -# -# Microsoft Visual C++ makefile for use with nmake.exe v1.62+ (VC++ 5.0+) -# -# This makefile is based upon the Tcl 8.4 Makefile.vc and modified to -# make it suitable as a general package makefile. Look for the word EDIT -# which marks sections that may need modification. As a minumum you will -# need to change the PROJECT, DOTVERSION and DLLOBJS variables to values -# relevant to your package. -# -# See the file "license.terms" for information on usage and redistribution -# of this file, and for a DISCLAIMER OF ALL WARRANTIES. -# -# Copyright (c) 1995-1996 Sun Microsystems, Inc. -# Copyright (c) 1998-2000 Ajuba Solutions. -# Copyright (c) 2001 ActiveState Corporation. -# Copyright (c) 2001-2002 David Gravereaux. -# Copyright (c) 2003 Pat Thoyts -# -#------------------------------------------------------------------------- -# RCS: @(#)$Id: makefile.vc,v 1.4 2004/07/26 08:22:05 patthoyts Exp $ -#------------------------------------------------------------------------- - -!if !defined(MSDEVDIR) && !defined(MSVCDIR) && !defined(VCINSTALLDIR) && !defined(MSSDK) && !defined(WINDOWSSDKDIR) -MSG = ^ -You will need to run vcvars32.bat from Developer Studio, first, to setup^ -the environment. Jump to this line to read the new instructions. -!error $(MSG) -!endif - -#------------------------------------------------------------------------------ -# HOW TO USE this makefile: -# -# 1) It is now necessary to have %MSVCDir% set in the environment. This is -# used as a check to see if vcvars32.bat had been run prior to running -# nmake or during the installation of Microsoft Visual C++, MSVCDir had -# been set globally and the PATH adjusted. Either way is valid. -# -# You'll need to run vcvars32.bat contained in the MsDev's vc(98)/bin -# directory to setup the proper environment, if needed, for your current -# setup. This is a needed bootstrap requirement and allows the swapping of -# different environments to be easier. -# -# 2) To use the Platform SDK (not expressly needed), run setenv.bat after -# vcvars32.bat according to the instructions for it. This can also turn on -# the 64-bit compiler, if your SDK has it. -# -# 3) Targets are: -# all -- Builds everything. -# -- Builds the project (eg: nmake sample) -# test -- Builds and runs the test suite. -# install -- Installs the built binaries and libraries to $(INSTALLDIR) -# in an appropriate subdirectory. -# clean/realclean/distclean -- varying levels of cleaning. -# -# 4) Macros usable on the commandline: -# INSTALLDIR= -# Sets where to install Tcl from the built binaries. -# C:\Progra~1\Tcl is assumed when not specified. -# -# OPTS=static,msvcrt,staticpkg,threads,symbols,profile,loimpact,none -# Sets special options for the core. The default is for none. -# Any combination of the above may be used (comma separated). -# 'none' will over-ride everything to nothing. -# -# static = Builds a static library of the core instead of a -# dll. The shell will be static (and large), as well. -# msvcrt = Effects the static option only to switch it from -# using libcmt(d) as the C runtime [by default] to -# msvcrt(d). This is useful for static embedding -# support. -# staticpkg = Effects the static option only to switch -# tclshXX.exe to have the dde and reg extension linked -# inside it. -# threads = Turns on full multithreading support. -# thrdalloc = Use the thread allocator (shared global free pool). -# symbols = Adds symbols for step debugging. -# profile = Adds profiling hooks. Map file is assumed. -# loimpact = Adds a flag for how NT treats the heap to keep memory -# in use, low. This is said to impact alloc performance. -# -# STATS=memdbg,compdbg,none -# Sets optional memory and bytecode compiler debugging code added -# to the core. The default is for none. Any combination of the -# above may be used (comma separated). 'none' will over-ride -# everything to nothing. -# -# memdbg = Enables the debugging memory allocator. -# compdbg = Enables byte compilation logging. -# -# MACHINE=(IX86|IA64|ALPHA) -# Set the machine type used for the compiler, linker, and -# resource compiler. This hook is needed to tell the tools -# when alternate platforms are requested. IX86 is the default -# when not specified. -# -# TMP_DIR= -# OUT_DIR= -# Hooks to allow the intermediate and output directories to be -# changed. $(OUT_DIR) is assumed to be -# $(BINROOT)\(Release|Debug) based on if symbols are requested. -# $(TMP_DIR) will de $(OUT_DIR)\ by default. -# -# TESTPAT= -# Reads the tests requested to be run from this file. -# -# CFG_ENCODING=encoding -# name of encoding for configuration information. Defaults -# to cp1252 -# -# 5) Examples: -# -# Basic syntax of calling nmake looks like this: -# nmake [-nologo] -f makefile.vc [target|macrodef [target|macrodef] [...]] -# -# Standard (no frills) -# c:\tcl_src\win\>c:\progra~1\micros~1\vc98\bin\vcvars32.bat -# Setting environment for using Microsoft Visual C++ tools. -# c:\tcl_src\win\>nmake -f makefile.vc all -# c:\tcl_src\win\>nmake -f makefile.vc install INSTALLDIR=c:\progra~1\tcl -# -# Building for Win64 -# c:\tcl_src\win\>c:\progra~1\micros~1\vc98\bin\vcvars32.bat -# Setting environment for using Microsoft Visual C++ tools. -# c:\tcl_src\win\>c:\progra~1\platfo~1\setenv.bat /pre64 /RETAIL -# Targeting Windows pre64 RETAIL -# c:\tcl_src\win\>nmake -f makefile.vc MACHINE=IA64 -# -#------------------------------------------------------------------------------ -#============================================================================== -############################################################################### -#------------------------------------------------------------------------------ - -!if !exist("makefile.vc") -MSG = ^ -You must run this makefile only from the directory it is in.^ -Please `cd` to its location first. -!error $(MSG) -!endif - -#------------------------------------------------------------------------- -# Project specific information (EDIT) -# -# You should edit this with the name and version of your project. This -# information is used to generate the name of the package library and -# it's install location. -# -# For example, the sample extension is going to build sample04.dll and -# would install it into $(INSTALLDIR)\lib\sample04 -# -# You need to specify the object files that need to be linked into your -# binary here. -# -#------------------------------------------------------------------------- - -PROJECT = sqlite3 -!include "rules.vc" - -# nmakehelp -V will search the file for tag, skips until a -# number and returns all character until a character not in [0-9.ab] -# is read. - -!if [echo REM = This file is generated from Makefile.vc > versions.vc] -!endif -# get project version from row "AC_INIT([sqlite], [3.x.y])" -!if [echo DOTVERSION = \>> versions.vc] \ - && [nmakehlp -V ..\configure.ac AC_INIT >> versions.vc] -!endif -!include "versions.vc" - -VERSION = $(DOTVERSION:.=) -STUBPREFIX = $(PROJECT)stub - -#------------------------------------------------------------------------- -# Target names and paths ( shouldn't need changing ) -#------------------------------------------------------------------------- - -BINROOT = . -ROOT = .. - -PRJIMPLIB = $(OUT_DIR)\$(PROJECT)$(VERSION)$(SUFX).lib -PRJLIBNAME = $(PROJECT).$(EXT) -PRJLIB = $(OUT_DIR)\$(PRJLIBNAME) - -PRJSTUBLIBNAME = $(STUBPREFIX)$(VERSION).lib -PRJSTUBLIB = $(OUT_DIR)\$(PRJSTUBLIBNAME) - -### Make sure we use backslash only. -PRJ_INSTALL_DIR = $(_INSTALLDIR)\$(PROJECT)$(DOTVERSION) -LIB_INSTALL_DIR = $(PRJ_INSTALL_DIR) -BIN_INSTALL_DIR = $(PRJ_INSTALL_DIR) -DOC_INSTALL_DIR = $(PRJ_INSTALL_DIR) -SCRIPT_INSTALL_DIR = $(PRJ_INSTALL_DIR) -INCLUDE_INSTALL_DIR = $(_TCLDIR)\include - -### The following paths CANNOT have spaces in them. -GENERICDIR = $(ROOT)\generic -WINDIR = $(ROOT)\win -LIBDIR = $(ROOT)\library -DOCDIR = $(ROOT)\doc -TOOLSDIR = $(ROOT)\tools -COMPATDIR = $(ROOT)\compat - -### Figure out where the primary source code file(s) is/are. -!if exist("$(ROOT)\..\..\sqlite3.c") && exist("$(ROOT)\..\..\src\tclsqlite.c") -SQL_INCLUDES = -I"$(ROOT)\..\.." -SQLITE_SRCDIR = $(ROOT)\..\.. -TCLSQLITE_SRCDIR = $(ROOT)\..\..\src -DLLOBJS = $(TMP_DIR)\sqlite3.obj $(TMP_DIR)\tclsqlite.obj -!else -TCLSQLITE_SRCDIR = $(ROOT)\generic -DLLOBJS = $(TMP_DIR)\tclsqlite3.obj -!endif - -#--------------------------------------------------------------------- -# Compile flags -#--------------------------------------------------------------------- - -!if !$(DEBUG) -!if $(OPTIMIZING) -### This cranks the optimization level to maximize speed -cdebug = -O2 -Op -Gs -!else -cdebug = -!endif -!else if "$(MACHINE)" == "IA64" -### Warnings are too many, can't support warnings into errors. -cdebug = -Z7 -Od -GZ -!else -cdebug = -Z7 -WX -Od -GZ -!endif - -### Declarations common to all compiler options -cflags = -nologo -c -W3 -D_CRT_SECURE_NO_WARNINGS -YX -Fp$(TMP_DIR)^\ - -!if $(MSVCRT) -!if $(DEBUG) -crt = -MDd -!else -crt = -MD -!endif -!else -!if $(DEBUG) -crt = -MTd -!else -crt = -MT -!endif -!endif - -INCLUDES = $(SQL_INCLUDES) $(TCL_INCLUDES) -I"$(WINDIR)" \ - -I"$(GENERICDIR)" -I"$(ROOT)\.." -BASE_CLFAGS = $(cflags) $(cdebug) $(crt) $(INCLUDES) \ - -DSQLITE_3_SUFFIX_ONLY=1 -DSQLITE_ENABLE_RTREE=1 \ - -DSQLITE_ENABLE_FTS3=1 -DSQLITE_OMIT_DEPRECATED=1 -CON_CFLAGS = $(cflags) $(cdebug) $(crt) -DCONSOLE -DSQLITE_ENABLE_FTS3=1 -TCL_CFLAGS = -DBUILD_sqlite -DUSE_TCL_STUBS \ - -DPACKAGE_VERSION="\"$(DOTVERSION)\"" $(BASE_CLFAGS) \ - $(OPTDEFINES) - -#--------------------------------------------------------------------- -# Link flags -#--------------------------------------------------------------------- - -!if $(DEBUG) -ldebug = -debug:full -debugtype:cv -!else -ldebug = -release -opt:ref -opt:icf,3 -!endif - -### Declarations common to all linker options -lflags = -nologo -machine:$(MACHINE) $(ldebug) - -!if $(PROFILE) -lflags = $(lflags) -profile -!endif - -!if $(ALIGN98_HACK) && !$(STATIC_BUILD) -### Align sections for PE size savings. -lflags = $(lflags) -opt:nowin98 -!else if !$(ALIGN98_HACK) && $(STATIC_BUILD) -### Align sections for speed in loading by choosing the virtual page size. -lflags = $(lflags) -align:4096 -!endif - -!if $(LOIMPACT) -lflags = $(lflags) -ws:aggressive -!endif - -dlllflags = $(lflags) -dll -conlflags = $(lflags) -subsystem:console -guilflags = $(lflags) -subsystem:windows -baselibs = $(TCLSTUBLIB) - -#--------------------------------------------------------------------- -# TclTest flags -#--------------------------------------------------------------------- - -!IF "$(TESTPAT)" != "" -TESTFLAGS = $(TESTFLAGS) -file $(TESTPAT) -!ENDIF - -#--------------------------------------------------------------------- -# Project specific targets (EDIT) -#--------------------------------------------------------------------- - -all: setup $(PROJECT) -$(PROJECT): setup $(PRJLIB) -install: install-binaries install-libraries install-docs - -# Tests need to ensure we load the right dll file we -# have to handle the output differently on Win9x. -# -!if "$(OS)" == "Windows_NT" || "$(MSVCDIR)" == "IDE" -test: setup $(PROJECT) - set TCL_LIBRARY=$(ROOT)/library - $(TCLSH) << -load $(PRJLIB:\=/) -cd "$(ROOT)/tests" -set argv "$(TESTFLAGS)" -source all.tcl -<< -!else -test: setup $(PROJECT) - echo Please wait while the test results are collected - set TCL_LIBRARY=$(ROOT)/library - $(TCLSH) << >tests.log -load $(PRJLIB:\=/) -cd "$(ROOT)/tests" -set argv "$(TESTFLAGS)" -source all.tcl -<< - type tests.log | more -!endif - -setup: - @if not exist $(OUT_DIR)\nul mkdir $(OUT_DIR) - @if not exist $(TMP_DIR)\nul mkdir $(TMP_DIR) - -$(PRJLIB): $(DLLOBJS) - $(link32) $(dlllflags) -out:$@ $(baselibs) @<< -$** -<< - -@del $*.exp - -$(PRJSTUBLIB): $(PRJSTUBOBJS) - $(lib32) -nologo -out:$@ $(PRJSTUBOBJS) - -#--------------------------------------------------------------------- -# Implicit rules -#--------------------------------------------------------------------- - -$(TMP_DIR)\sqlite3.obj: $(SQLITE_SRCDIR)\sqlite3.c - $(cc32) $(TCL_CFLAGS) -DBUILD_$(PROJECT) -Fo$(TMP_DIR)\ \ - -c $(SQLITE_SRCDIR)\sqlite3.c - -$(TMP_DIR)\tclsqlite.obj: $(TCLSQLITE_SRCDIR)\tclsqlite.c - $(cc32) $(TCL_CFLAGS) -DBUILD_$(PROJECT) -Fo$(TMP_DIR)\ \ - -c $(TCLSQLITE_SRCDIR)\tclsqlite.c - -$(TMP_DIR)\tclsqlite3.obj: $(TCLSQLITE_SRCDIR)\tclsqlite3.c - $(cc32) $(TCL_CFLAGS) -DBUILD_$(PROJECT) -Fo$(TMP_DIR)\ \ - -c $(TCLSQLITE_SRCDIR)\tclsqlite3.c - -{$(WINDIR)}.rc{$(TMP_DIR)}.res: - $(rc32) -fo $@ -r -i "$(GENERICDIR)" -D__WIN32__ \ -!if $(DEBUG) - -d DEBUG \ -!endif -!if $(TCL_THREADS) - -d TCL_THREADS \ -!endif -!if $(STATIC_BUILD) - -d STATIC_BUILD \ -!endif - $< - -.SUFFIXES: -.SUFFIXES:.c .rc - -#--------------------------------------------------------------------- -# Installation. (EDIT) -# -# You may need to modify this section to reflect the final distribution -# of your files and possibly to generate documentation. -# -#--------------------------------------------------------------------- - -install-binaries: - @echo Installing binaries to '$(SCRIPT_INSTALL_DIR)' - @if not exist "$(SCRIPT_INSTALL_DIR)" mkdir "$(SCRIPT_INSTALL_DIR)" - @$(CPY) $(PRJLIB) "$(SCRIPT_INSTALL_DIR)" >NUL - -install-libraries: - @echo Installing libraries to '$(SCRIPT_INSTALL_DIR)' - @if exist $(LIBDIR) $(CPY) $(LIBDIR)\*.tcl "$(SCRIPT_INSTALL_DIR)" - @echo Installing package index in '$(SCRIPT_INSTALL_DIR)' - @type << >"$(SCRIPT_INSTALL_DIR)\pkgIndex.tcl" -package ifneeded $(PROJECT) $(DOTVERSION) \ - [list load [file join $$dir $(PRJLIBNAME)] sqlite3] -<< - -install-docs: - @echo Installing documentation files to '$(DOC_INSTALL_DIR)' - @if exist $(DOCDIR) $(CPY) $(DOCDIR)\*.n "$(DOC_INSTALL_DIR)" - -#--------------------------------------------------------------------- -# Clean up -#--------------------------------------------------------------------- - -clean: - @if exist $(TMP_DIR)\nul $(RMDIR) $(TMP_DIR) - @if exist $(WINDIR)\version.vc del $(WINDIR)\version.vc - -realclean: clean - @if exist $(OUT_DIR)\nul $(RMDIR) $(OUT_DIR) - -distclean: realclean - @if exist $(WINDIR)\nmakehlp.exe del $(WINDIR)\nmakehlp.exe - @if exist $(WINDIR)\nmakehlp.obj del $(WINDIR)\nmakehlp.obj diff --git a/autoconf/tea/win/nmakehlp.c b/autoconf/tea/win/nmakehlp.c deleted file mode 100644 index 2dc33cc657..0000000000 --- a/autoconf/tea/win/nmakehlp.c +++ /dev/null @@ -1,815 +0,0 @@ -/* - * ---------------------------------------------------------------------------- - * nmakehlp.c -- - * - * This is used to fix limitations within nmake and the environment. - * - * Copyright (c) 2002 by David Gravereaux. - * Copyright (c) 2006 by Pat Thoyts - * - * See the file "license.terms" for information on usage and redistribution of - * this file, and for a DISCLAIMER OF ALL WARRANTIES. - * ---------------------------------------------------------------------------- - */ - -#define _CRT_SECURE_NO_DEPRECATE -#include -#ifdef _MSC_VER -#pragma comment (lib, "user32.lib") -#pragma comment (lib, "kernel32.lib") -#endif -#include -#include - -/* - * This library is required for x64 builds with _some_ versions of MSVC - */ -#if defined(_M_IA64) || defined(_M_AMD64) -#if _MSC_VER >= 1400 && _MSC_VER < 1500 -#pragma comment(lib, "bufferoverflowU") -#endif -#endif - -/* ISO hack for dumb VC++ */ -#ifdef _MSC_VER -#define snprintf _snprintf -#endif - - -/* protos */ - -static int CheckForCompilerFeature(const char *option); -static int CheckForLinkerFeature(char **options, int count); -static int IsIn(const char *string, const char *substring); -static int SubstituteFile(const char *substs, const char *filename); -static int QualifyPath(const char *path); -static int LocateDependency(const char *keyfile); -static const char *GetVersionFromFile(const char *filename, const char *match, int numdots); -static DWORD WINAPI ReadFromPipe(LPVOID args); - -/* globals */ - -#define CHUNK 25 -#define STATICBUFFERSIZE 1000 -typedef struct { - HANDLE pipe; - char buffer[STATICBUFFERSIZE]; -} pipeinfo; - -pipeinfo Out = {INVALID_HANDLE_VALUE, ""}; -pipeinfo Err = {INVALID_HANDLE_VALUE, ""}; - -/* - * exitcodes: 0 == no, 1 == yes, 2 == error - */ - -int -main( - int argc, - char *argv[]) -{ - char msg[300]; - DWORD dwWritten; - int chars; - const char *s; - - /* - * Make sure children (cl.exe and link.exe) are kept quiet. - */ - - SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOOPENFILEERRORBOX); - - /* - * Make sure the compiler and linker aren't effected by the outside world. - */ - - SetEnvironmentVariable("CL", ""); - SetEnvironmentVariable("LINK", ""); - - if (argc > 1 && *argv[1] == '-') { - switch (*(argv[1]+1)) { - case 'c': - if (argc != 3) { - chars = snprintf(msg, sizeof(msg) - 1, - "usage: %s -c \n" - "Tests for whether cl.exe supports an option\n" - "exitcodes: 0 == no, 1 == yes, 2 == error\n", argv[0]); - WriteFile(GetStdHandle(STD_ERROR_HANDLE), msg, chars, - &dwWritten, NULL); - return 2; - } - return CheckForCompilerFeature(argv[2]); - case 'l': - if (argc < 3) { - chars = snprintf(msg, sizeof(msg) - 1, - "usage: %s -l ? ...?\n" - "Tests for whether link.exe supports an option\n" - "exitcodes: 0 == no, 1 == yes, 2 == error\n", argv[0]); - WriteFile(GetStdHandle(STD_ERROR_HANDLE), msg, chars, - &dwWritten, NULL); - return 2; - } - return CheckForLinkerFeature(&argv[2], argc-2); - case 'f': - if (argc == 2) { - chars = snprintf(msg, sizeof(msg) - 1, - "usage: %s -f \n" - "Find a substring within another\n" - "exitcodes: 0 == no, 1 == yes, 2 == error\n", argv[0]); - WriteFile(GetStdHandle(STD_ERROR_HANDLE), msg, chars, - &dwWritten, NULL); - return 2; - } else if (argc == 3) { - /* - * If the string is blank, there is no match. - */ - - return 0; - } else { - return IsIn(argv[2], argv[3]); - } - case 's': - if (argc == 2) { - chars = snprintf(msg, sizeof(msg) - 1, - "usage: %s -s \n" - "Perform a set of string map type substutitions on a file\n" - "exitcodes: 0\n", - argv[0]); - WriteFile(GetStdHandle(STD_ERROR_HANDLE), msg, chars, - &dwWritten, NULL); - return 2; - } - return SubstituteFile(argv[2], argv[3]); - case 'V': - if (argc != 4) { - chars = snprintf(msg, sizeof(msg) - 1, - "usage: %s -V filename matchstring\n" - "Extract a version from a file:\n" - "eg: pkgIndex.tcl \"package ifneeded http\"", - argv[0]); - WriteFile(GetStdHandle(STD_ERROR_HANDLE), msg, chars, - &dwWritten, NULL); - return 0; - } - s = GetVersionFromFile(argv[2], argv[3], *(argv[1]+2) - '0'); - if (s && *s) { - printf("%s\n", s); - return 0; - } else - return 1; /* Version not found. Return non-0 exit code */ - - case 'Q': - if (argc != 3) { - chars = snprintf(msg, sizeof(msg) - 1, - "usage: %s -Q path\n" - "Emit the fully qualified path\n" - "exitcodes: 0 == no, 1 == yes, 2 == error\n", argv[0]); - WriteFile(GetStdHandle(STD_ERROR_HANDLE), msg, chars, - &dwWritten, NULL); - return 2; - } - return QualifyPath(argv[2]); - - case 'L': - if (argc != 3) { - chars = snprintf(msg, sizeof(msg) - 1, - "usage: %s -L keypath\n" - "Emit the fully qualified path of directory containing keypath\n" - "exitcodes: 0 == success, 1 == not found, 2 == error\n", argv[0]); - WriteFile(GetStdHandle(STD_ERROR_HANDLE), msg, chars, - &dwWritten, NULL); - return 2; - } - return LocateDependency(argv[2]); - } - } - chars = snprintf(msg, sizeof(msg) - 1, - "usage: %s -c|-f|-l|-Q|-s|-V ...\n" - "This is a little helper app to equalize shell differences between WinNT and\n" - "Win9x and get nmake.exe to accomplish its job.\n", - argv[0]); - WriteFile(GetStdHandle(STD_ERROR_HANDLE), msg, chars, &dwWritten, NULL); - return 2; -} - -static int -CheckForCompilerFeature( - const char *option) -{ - STARTUPINFO si; - PROCESS_INFORMATION pi; - SECURITY_ATTRIBUTES sa; - DWORD threadID; - char msg[300]; - BOOL ok; - HANDLE hProcess, h, pipeThreads[2]; - char cmdline[100]; - - hProcess = GetCurrentProcess(); - - ZeroMemory(&pi, sizeof(PROCESS_INFORMATION)); - ZeroMemory(&si, sizeof(STARTUPINFO)); - si.cb = sizeof(STARTUPINFO); - si.dwFlags = STARTF_USESTDHANDLES; - si.hStdInput = INVALID_HANDLE_VALUE; - - ZeroMemory(&sa, sizeof(SECURITY_ATTRIBUTES)); - sa.nLength = sizeof(SECURITY_ATTRIBUTES); - sa.lpSecurityDescriptor = NULL; - sa.bInheritHandle = FALSE; - - /* - * Create a non-inheritible pipe. - */ - - CreatePipe(&Out.pipe, &h, &sa, 0); - - /* - * Dupe the write side, make it inheritible, and close the original. - */ - - DuplicateHandle(hProcess, h, hProcess, &si.hStdOutput, 0, TRUE, - DUPLICATE_SAME_ACCESS | DUPLICATE_CLOSE_SOURCE); - - /* - * Same as above, but for the error side. - */ - - CreatePipe(&Err.pipe, &h, &sa, 0); - DuplicateHandle(hProcess, h, hProcess, &si.hStdError, 0, TRUE, - DUPLICATE_SAME_ACCESS | DUPLICATE_CLOSE_SOURCE); - - /* - * Base command line. - */ - - lstrcpy(cmdline, "cl.exe -nologo -c -TC -Zs -X -Fp.\\_junk.pch "); - - /* - * Append our option for testing - */ - - lstrcat(cmdline, option); - - /* - * Filename to compile, which exists, but is nothing and empty. - */ - - lstrcat(cmdline, " .\\nul"); - - ok = CreateProcess( - NULL, /* Module name. */ - cmdline, /* Command line. */ - NULL, /* Process handle not inheritable. */ - NULL, /* Thread handle not inheritable. */ - TRUE, /* yes, inherit handles. */ - DETACHED_PROCESS, /* No console for you. */ - NULL, /* Use parent's environment block. */ - NULL, /* Use parent's starting directory. */ - &si, /* Pointer to STARTUPINFO structure. */ - &pi); /* Pointer to PROCESS_INFORMATION structure. */ - - if (!ok) { - DWORD err = GetLastError(); - int chars = snprintf(msg, sizeof(msg) - 1, - "Tried to launch: \"%s\", but got error [%u]: ", cmdline, err); - - FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS| - FORMAT_MESSAGE_MAX_WIDTH_MASK, 0L, err, 0, (LPSTR)&msg[chars], - (300-chars), 0); - WriteFile(GetStdHandle(STD_ERROR_HANDLE), msg, lstrlen(msg), &err,NULL); - return 2; - } - - /* - * Close our references to the write handles that have now been inherited. - */ - - CloseHandle(si.hStdOutput); - CloseHandle(si.hStdError); - - WaitForInputIdle(pi.hProcess, 5000); - CloseHandle(pi.hThread); - - /* - * Start the pipe reader threads. - */ - - pipeThreads[0] = CreateThread(NULL, 0, ReadFromPipe, &Out, 0, &threadID); - pipeThreads[1] = CreateThread(NULL, 0, ReadFromPipe, &Err, 0, &threadID); - - /* - * Block waiting for the process to end. - */ - - WaitForSingleObject(pi.hProcess, INFINITE); - CloseHandle(pi.hProcess); - - /* - * Wait for our pipe to get done reading, should it be a little slow. - */ - - WaitForMultipleObjects(2, pipeThreads, TRUE, 500); - CloseHandle(pipeThreads[0]); - CloseHandle(pipeThreads[1]); - - /* - * Look for the commandline warning code in both streams. - * - in MSVC 6 & 7 we get D4002, in MSVC 8 we get D9002. - */ - - return !(strstr(Out.buffer, "D4002") != NULL - || strstr(Err.buffer, "D4002") != NULL - || strstr(Out.buffer, "D9002") != NULL - || strstr(Err.buffer, "D9002") != NULL - || strstr(Out.buffer, "D2021") != NULL - || strstr(Err.buffer, "D2021") != NULL); -} - -static int -CheckForLinkerFeature( - char **options, - int count) -{ - STARTUPINFO si; - PROCESS_INFORMATION pi; - SECURITY_ATTRIBUTES sa; - DWORD threadID; - char msg[300]; - BOOL ok; - HANDLE hProcess, h, pipeThreads[2]; - int i; - char cmdline[255]; - - hProcess = GetCurrentProcess(); - - ZeroMemory(&pi, sizeof(PROCESS_INFORMATION)); - ZeroMemory(&si, sizeof(STARTUPINFO)); - si.cb = sizeof(STARTUPINFO); - si.dwFlags = STARTF_USESTDHANDLES; - si.hStdInput = INVALID_HANDLE_VALUE; - - ZeroMemory(&sa, sizeof(SECURITY_ATTRIBUTES)); - sa.nLength = sizeof(SECURITY_ATTRIBUTES); - sa.lpSecurityDescriptor = NULL; - sa.bInheritHandle = TRUE; - - /* - * Create a non-inheritible pipe. - */ - - CreatePipe(&Out.pipe, &h, &sa, 0); - - /* - * Dupe the write side, make it inheritible, and close the original. - */ - - DuplicateHandle(hProcess, h, hProcess, &si.hStdOutput, 0, TRUE, - DUPLICATE_SAME_ACCESS | DUPLICATE_CLOSE_SOURCE); - - /* - * Same as above, but for the error side. - */ - - CreatePipe(&Err.pipe, &h, &sa, 0); - DuplicateHandle(hProcess, h, hProcess, &si.hStdError, 0, TRUE, - DUPLICATE_SAME_ACCESS | DUPLICATE_CLOSE_SOURCE); - - /* - * Base command line. - */ - - lstrcpy(cmdline, "link.exe -nologo "); - - /* - * Append our option for testing. - */ - - for (i = 0; i < count; i++) { - lstrcat(cmdline, " \""); - lstrcat(cmdline, options[i]); - lstrcat(cmdline, "\""); - } - - ok = CreateProcess( - NULL, /* Module name. */ - cmdline, /* Command line. */ - NULL, /* Process handle not inheritable. */ - NULL, /* Thread handle not inheritable. */ - TRUE, /* yes, inherit handles. */ - DETACHED_PROCESS, /* No console for you. */ - NULL, /* Use parent's environment block. */ - NULL, /* Use parent's starting directory. */ - &si, /* Pointer to STARTUPINFO structure. */ - &pi); /* Pointer to PROCESS_INFORMATION structure. */ - - if (!ok) { - DWORD err = GetLastError(); - int chars = snprintf(msg, sizeof(msg) - 1, - "Tried to launch: \"%s\", but got error [%u]: ", cmdline, err); - - FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM|FORMAT_MESSAGE_IGNORE_INSERTS| - FORMAT_MESSAGE_MAX_WIDTH_MASK, 0L, err, 0, (LPSTR)&msg[chars], - (300-chars), 0); - WriteFile(GetStdHandle(STD_ERROR_HANDLE), msg, lstrlen(msg), &err,NULL); - return 2; - } - - /* - * Close our references to the write handles that have now been inherited. - */ - - CloseHandle(si.hStdOutput); - CloseHandle(si.hStdError); - - WaitForInputIdle(pi.hProcess, 5000); - CloseHandle(pi.hThread); - - /* - * Start the pipe reader threads. - */ - - pipeThreads[0] = CreateThread(NULL, 0, ReadFromPipe, &Out, 0, &threadID); - pipeThreads[1] = CreateThread(NULL, 0, ReadFromPipe, &Err, 0, &threadID); - - /* - * Block waiting for the process to end. - */ - - WaitForSingleObject(pi.hProcess, INFINITE); - CloseHandle(pi.hProcess); - - /* - * Wait for our pipe to get done reading, should it be a little slow. - */ - - WaitForMultipleObjects(2, pipeThreads, TRUE, 500); - CloseHandle(pipeThreads[0]); - CloseHandle(pipeThreads[1]); - - /* - * Look for the commandline warning code in the stderr stream. - */ - - return !(strstr(Out.buffer, "LNK1117") != NULL || - strstr(Err.buffer, "LNK1117") != NULL || - strstr(Out.buffer, "LNK4044") != NULL || - strstr(Err.buffer, "LNK4044") != NULL || - strstr(Out.buffer, "LNK4224") != NULL || - strstr(Err.buffer, "LNK4224") != NULL); -} - -static DWORD WINAPI -ReadFromPipe( - LPVOID args) -{ - pipeinfo *pi = (pipeinfo *) args; - char *lastBuf = pi->buffer; - DWORD dwRead; - BOOL ok; - - again: - if (lastBuf - pi->buffer + CHUNK > STATICBUFFERSIZE) { - CloseHandle(pi->pipe); - return (DWORD)-1; - } - ok = ReadFile(pi->pipe, lastBuf, CHUNK, &dwRead, 0L); - if (!ok || dwRead == 0) { - CloseHandle(pi->pipe); - return 0; - } - lastBuf += dwRead; - goto again; - - return 0; /* makes the compiler happy */ -} - -static int -IsIn( - const char *string, - const char *substring) -{ - return (strstr(string, substring) != NULL); -} - -/* - * GetVersionFromFile -- - * Looks for a match string in a file and then returns the version - * following the match where a version is anything acceptable to - * package provide or package ifneeded. - */ - -static const char * -GetVersionFromFile( - const char *filename, - const char *match, - int numdots) -{ - static char szBuffer[100]; - char *szResult = NULL; - FILE *fp = fopen(filename, "rt"); - - if (fp != NULL) { - /* - * Read data until we see our match string. - */ - - while (fgets(szBuffer, sizeof(szBuffer), fp) != NULL) { - LPSTR p, q; - - p = strstr(szBuffer, match); - if (p != NULL) { - /* - * Skip to first digit after the match. - */ - - p += strlen(match); - while (*p && !isdigit((unsigned char)*p)) { - ++p; - } - - /* - * Find ending whitespace. - */ - - q = p; - while (*q && (strchr("0123456789.ab", *q)) && (((!strchr(".ab", *q) - && !strchr("ab", q[-1])) || --numdots))) { - ++q; - } - - *q = 0; - szResult = p; - break; - } - } - fclose(fp); - } - return szResult; -} - -/* - * List helpers for the SubstituteFile function - */ - -typedef struct list_item_t { - struct list_item_t *nextPtr; - char * key; - char * value; -} list_item_t; - -/* insert a list item into the list (list may be null) */ -static list_item_t * -list_insert(list_item_t **listPtrPtr, const char *key, const char *value) -{ - list_item_t *itemPtr = (list_item_t *)malloc(sizeof(list_item_t)); - if (itemPtr) { - itemPtr->key = strdup(key); - itemPtr->value = strdup(value); - itemPtr->nextPtr = NULL; - - while(*listPtrPtr) { - listPtrPtr = &(*listPtrPtr)->nextPtr; - } - *listPtrPtr = itemPtr; - } - return itemPtr; -} - -static void -list_free(list_item_t **listPtrPtr) -{ - list_item_t *tmpPtr, *listPtr = *listPtrPtr; - while (listPtr) { - tmpPtr = listPtr; - listPtr = listPtr->nextPtr; - free(tmpPtr->key); - free(tmpPtr->value); - free(tmpPtr); - } -} - -/* - * SubstituteFile -- - * As windows doesn't provide anything useful like sed and it's unreliable - * to use the tclsh you are building against (consider x-platform builds - - * eg compiling AMD64 target from IX86) we provide a simple substitution - * option here to handle autoconf style substitutions. - * The substitution file is whitespace and line delimited. The file should - * consist of lines matching the regular expression: - * \s*\S+\s+\S*$ - * - * Usage is something like: - * nmakehlp -S << $** > $@ - * @PACKAGE_NAME@ $(PACKAGE_NAME) - * @PACKAGE_VERSION@ $(PACKAGE_VERSION) - * << - */ - -static int -SubstituteFile( - const char *substitutions, - const char *filename) -{ - static char szBuffer[1024], szCopy[1024]; - list_item_t *substPtr = NULL; - FILE *fp, *sp; - - fp = fopen(filename, "rt"); - if (fp != NULL) { - - /* - * Build a list of substutitions from the first filename - */ - - sp = fopen(substitutions, "rt"); - if (sp != NULL) { - while (fgets(szBuffer, sizeof(szBuffer), sp) != NULL) { - unsigned char *ks, *ke, *vs, *ve; - ks = (unsigned char*)szBuffer; - while (ks && *ks && isspace(*ks)) ++ks; - ke = ks; - while (ke && *ke && !isspace(*ke)) ++ke; - vs = ke; - while (vs && *vs && isspace(*vs)) ++vs; - ve = vs; - while (ve && *ve && !(*ve == '\r' || *ve == '\n')) ++ve; - *ke = 0, *ve = 0; - list_insert(&substPtr, (char*)ks, (char*)vs); - } - fclose(sp); - } - - /* debug: dump the list */ -#ifndef NDEBUG - { - int n = 0; - list_item_t *p = NULL; - for (p = substPtr; p != NULL; p = p->nextPtr, ++n) { - fprintf(stderr, "% 3d '%s' => '%s'\n", n, p->key, p->value); - } - } -#endif - - /* - * Run the substitutions over each line of the input - */ - - while (fgets(szBuffer, sizeof(szBuffer), fp) != NULL) { - list_item_t *p = NULL; - for (p = substPtr; p != NULL; p = p->nextPtr) { - char *m = strstr(szBuffer, p->key); - if (m) { - char *cp, *op, *sp; - cp = szCopy; - op = szBuffer; - while (op != m) *cp++ = *op++; - sp = p->value; - while (sp && *sp) *cp++ = *sp++; - op += strlen(p->key); - while (*op) *cp++ = *op++; - *cp = 0; - memcpy(szBuffer, szCopy, sizeof(szCopy)); - } - } - printf("%s", szBuffer); - } - - list_free(&substPtr); - } - fclose(fp); - return 0; -} - -BOOL FileExists(LPCTSTR szPath) -{ -#ifndef INVALID_FILE_ATTRIBUTES - #define INVALID_FILE_ATTRIBUTES ((DWORD)-1) -#endif - DWORD pathAttr = GetFileAttributes(szPath); - return (pathAttr != INVALID_FILE_ATTRIBUTES && - !(pathAttr & FILE_ATTRIBUTE_DIRECTORY)); -} - - -/* - * QualifyPath -- - * - * This composes the current working directory with a provided path - * and returns the fully qualified and normalized path. - * Mostly needed to setup paths for testing. - */ - -static int -QualifyPath( - const char *szPath) -{ - char szCwd[MAX_PATH + 1]; - - GetFullPathName(szPath, sizeof(szCwd)-1, szCwd, NULL); - printf("%s\n", szCwd); - return 0; -} - -/* - * Implements LocateDependency for a single directory. See that command - * for an explanation. - * Returns 0 if found after printing the directory. - * Returns 1 if not found but no errors. - * Returns 2 on any kind of error - * Basically, these are used as exit codes for the process. - */ -static int LocateDependencyHelper(const char *dir, const char *keypath) -{ - HANDLE hSearch; - char path[MAX_PATH+1]; - size_t dirlen; - int keylen, ret; - WIN32_FIND_DATA finfo; - - if (dir == NULL || keypath == NULL) - return 2; /* Have no real error reporting mechanism into nmake */ - dirlen = strlen(dir); - if ((dirlen + 3) > sizeof(path)) - return 2; - strncpy(path, dir, dirlen); - strncpy(path+dirlen, "\\*", 3); /* Including terminating \0 */ - keylen = strlen(keypath); - -#if 0 /* This function is not available in Visual C++ 6 */ - /* - * Use numerics 0 -> FindExInfoStandard, - * 1 -> FindExSearchLimitToDirectories, - * as these are not defined in Visual C++ 6 - */ - hSearch = FindFirstFileEx(path, 0, &finfo, 1, NULL, 0); -#else - hSearch = FindFirstFile(path, &finfo); -#endif - if (hSearch == INVALID_HANDLE_VALUE) - return 1; /* Not found */ - - /* Loop through all subdirs checking if the keypath is under there */ - ret = 1; /* Assume not found */ - do { - int sublen; - /* - * We need to check it is a directory despite the - * FindExSearchLimitToDirectories in the above call. See SDK docs - */ - if ((finfo.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) == 0) - continue; - sublen = strlen(finfo.cFileName); - if ((dirlen+1+sublen+1+keylen+1) > sizeof(path)) - continue; /* Path does not fit, assume not matched */ - strncpy(path+dirlen+1, finfo.cFileName, sublen); - path[dirlen+1+sublen] = '\\'; - strncpy(path+dirlen+1+sublen+1, keypath, keylen+1); - if (FileExists(path)) { - /* Found a match, print to stdout */ - path[dirlen+1+sublen] = '\0'; - QualifyPath(path); - ret = 0; - break; - } - } while (FindNextFile(hSearch, &finfo)); - FindClose(hSearch); - return ret; -} - -/* - * LocateDependency -- - * - * Locates a dependency for a package. - * keypath - a relative path within the package directory - * that is used to confirm it is the correct directory. - * The search path for the package directory is currently only - * the parent and grandparent of the current working directory. - * If found, the command prints - * name_DIRPATH= - * and returns 0. If not found, does not print anything and returns 1. - */ -static int LocateDependency(const char *keypath) -{ - size_t i; - int ret; - static const char *paths[] = {"..", "..\\..", "..\\..\\.."}; - - for (i = 0; i < (sizeof(paths)/sizeof(paths[0])); ++i) { - ret = LocateDependencyHelper(paths[i], keypath); - if (ret == 0) - return ret; - } - return ret; -} - - -/* - * Local variables: - * mode: c - * c-basic-offset: 4 - * fill-column: 78 - * indent-tabs-mode: t - * tab-width: 8 - * End: - */ diff --git a/autoconf/tea/win/rules.vc b/autoconf/tea/win/rules.vc deleted file mode 100644 index 99471053c8..0000000000 --- a/autoconf/tea/win/rules.vc +++ /dev/null @@ -1,711 +0,0 @@ -#------------------------------------------------------------------------------ -# rules.vc -- -# -# Microsoft Visual C++ makefile include for decoding the commandline -# macros. This file does not need editing to build Tcl. -# -# See the file "license.terms" for information on usage and redistribution -# of this file, and for a DISCLAIMER OF ALL WARRANTIES. -# -# Copyright (c) 2001-2003 David Gravereaux. -# Copyright (c) 2003-2008 Patrick Thoyts -#------------------------------------------------------------------------------ - -!ifndef _RULES_VC -_RULES_VC = 1 - -cc32 = $(CC) # built-in default. -link32 = link -lib32 = lib -rc32 = $(RC) # built-in default. - -!ifndef INSTALLDIR -### Assume the normal default. -_INSTALLDIR = C:\Program Files\Tcl -!else -### Fix the path separators. -_INSTALLDIR = $(INSTALLDIR:/=\) -!endif - -#---------------------------------------------------------- -# Set the proper copy method to avoid overwrite questions -# to the user when copying files and selecting the right -# "delete all" method. -#---------------------------------------------------------- - -!if "$(OS)" == "Windows_NT" -RMDIR = rmdir /S /Q -ERRNULL = 2>NUL -!if ![ver | find "4.0" > nul] -CPY = echo y | xcopy /i >NUL -COPY = copy >NUL -!else -CPY = xcopy /i /y >NUL -COPY = copy /y >NUL -!endif -!else # "$(OS)" != "Windows_NT" -CPY = xcopy /i >_JUNK.OUT # On Win98 NUL does not work here. -COPY = copy >_JUNK.OUT # On Win98 NUL does not work here. -RMDIR = deltree /Y -NULL = \NUL # Used in testing directory existence -ERRNULL = >NUL # Win9x shell cannot redirect stderr -!endif -MKDIR = mkdir - -#------------------------------------------------------------------------------ -# Determine the host and target architectures and compiler version. -#------------------------------------------------------------------------------ - -_HASH=^# -_VC_MANIFEST_EMBED_EXE= -_VC_MANIFEST_EMBED_DLL= -VCVER=0 -!if ![echo VCVERSION=_MSC_VER > vercl.x] \ - && ![echo $(_HASH)if defined(_M_IX86) >> vercl.x] \ - && ![echo ARCH=IX86 >> vercl.x] \ - && ![echo $(_HASH)elif defined(_M_AMD64) >> vercl.x] \ - && ![echo ARCH=AMD64 >> vercl.x] \ - && ![echo $(_HASH)endif >> vercl.x] \ - && ![cl -nologo -TC -P vercl.x $(ERRNULL)] -!include vercl.i -!if ![echo VCVER= ^\> vercl.vc] \ - && ![set /a $(VCVERSION) / 100 - 6 >> vercl.vc] -!include vercl.vc -!endif -!endif -!if ![del $(ERRNUL) /q/f vercl.x vercl.i vercl.vc] -!endif - -!if ![reg query HKLM\Hardware\Description\System\CentralProcessor\0 /v Identifier | findstr /i x86] -NATIVE_ARCH=IX86 -!else -NATIVE_ARCH=AMD64 -!endif - -# Since MSVC8 we must deal with manifest resources. -!if $(VCVERSION) >= 1400 -_VC_MANIFEST_EMBED_EXE=if exist $@.manifest mt -nologo -manifest $@.manifest -outputresource:$@;1 -_VC_MANIFEST_EMBED_DLL=if exist $@.manifest mt -nologo -manifest $@.manifest -outputresource:$@;2 -!endif - -!ifndef MACHINE -MACHINE=$(ARCH) -!endif - -!ifndef CFG_ENCODING -CFG_ENCODING = \"cp1252\" -!endif - -!message =============================================================================== - -#---------------------------------------------------------- -# build the helper app we need to overcome nmake's limiting -# environment. -#---------------------------------------------------------- - -!if !exist(nmakehlp.exe) -!if [$(cc32) -nologo nmakehlp.c -link -subsystem:console > nul] -!endif -!endif - -#---------------------------------------------------------- -# Test for compiler features -#---------------------------------------------------------- - -### test for optimizations -!if [nmakehlp -c -Ot] -!message *** Compiler has 'Optimizations' -OPTIMIZING = 1 -!else -!message *** Compiler does not have 'Optimizations' -OPTIMIZING = 0 -!endif - -OPTIMIZATIONS = - -!if [nmakehlp -c -Ot] -OPTIMIZATIONS = $(OPTIMIZATIONS) -Ot -!endif - -!if [nmakehlp -c -Oi] -OPTIMIZATIONS = $(OPTIMIZATIONS) -Oi -!endif - -!if [nmakehlp -c -Op] -OPTIMIZATIONS = $(OPTIMIZATIONS) -Op -!endif - -!if [nmakehlp -c -fp:strict] -OPTIMIZATIONS = $(OPTIMIZATIONS) -fp:strict -!endif - -!if [nmakehlp -c -Gs] -OPTIMIZATIONS = $(OPTIMIZATIONS) -Gs -!endif - -!if [nmakehlp -c -GS] -OPTIMIZATIONS = $(OPTIMIZATIONS) -GS -!endif - -!if [nmakehlp -c -GL] -OPTIMIZATIONS = $(OPTIMIZATIONS) -GL -!endif - -DEBUGFLAGS = - -!if [nmakehlp -c -RTC1] -DEBUGFLAGS = $(DEBUGFLAGS) -RTC1 -!elseif [nmakehlp -c -GZ] -DEBUGFLAGS = $(DEBUGFLAGS) -GZ -!endif - -COMPILERFLAGS =-W3 -DUNICODE -D_UNICODE - -# In v13 -GL and -YX are incompatible. -!if [nmakehlp -c -YX] -!if ![nmakehlp -c -GL] -OPTIMIZATIONS = $(OPTIMIZATIONS) -YX -!endif -!endif - -!if "$(MACHINE)" == "IX86" -### test for pentium errata -!if [nmakehlp -c -QI0f] -!message *** Compiler has 'Pentium 0x0f fix' -COMPILERFLAGS = $(COMPILERFLAGS) -QI0f -!else -!message *** Compiler does not have 'Pentium 0x0f fix' -!endif -!endif - -!if "$(MACHINE)" == "IA64" -### test for Itanium errata -!if [nmakehlp -c -QIA64_Bx] -!message *** Compiler has 'B-stepping errata workarounds' -COMPILERFLAGS = $(COMPILERFLAGS) -QIA64_Bx -!else -!message *** Compiler does not have 'B-stepping errata workarounds' -!endif -!endif - -!if "$(MACHINE)" == "IX86" -### test for -align:4096, when align:512 will do. -!if [nmakehlp -l -opt:nowin98] -!message *** Linker has 'Win98 alignment problem' -ALIGN98_HACK = 1 -!else -!message *** Linker does not have 'Win98 alignment problem' -ALIGN98_HACK = 0 -!endif -!else -ALIGN98_HACK = 0 -!endif - -LINKERFLAGS = - -!if [nmakehlp -l -ltcg] -LINKERFLAGS =-ltcg -!endif - -#---------------------------------------------------------- -# Decode the options requested. -#---------------------------------------------------------- - -!if "$(OPTS)" == "" || [nmakehlp -f "$(OPTS)" "none"] -STATIC_BUILD = 0 -TCL_THREADS = 1 -DEBUG = 0 -SYMBOLS = 0 -PROFILE = 0 -PGO = 0 -MSVCRT = 0 -LOIMPACT = 0 -TCL_USE_STATIC_PACKAGES = 0 -USE_THREAD_ALLOC = 1 -UNCHECKED = 0 -!else -!if [nmakehlp -f $(OPTS) "static"] -!message *** Doing static -STATIC_BUILD = 1 -!else -STATIC_BUILD = 0 -!endif -!if [nmakehlp -f $(OPTS) "msvcrt"] -!message *** Doing msvcrt -MSVCRT = 1 -!else -MSVCRT = 0 -!endif -!if [nmakehlp -f $(OPTS) "staticpkg"] -!message *** Doing staticpkg -TCL_USE_STATIC_PACKAGES = 1 -!else -TCL_USE_STATIC_PACKAGES = 0 -!endif -!if [nmakehlp -f $(OPTS) "nothreads"] -!message *** Compile explicitly for non-threaded tcl -TCL_THREADS = 0 -!else -TCL_THREADS = 1 -USE_THREAD_ALLOC= 1 -!endif -!if [nmakehlp -f $(OPTS) "symbols"] -!message *** Doing symbols -DEBUG = 1 -!else -DEBUG = 0 -!endif -!if [nmakehlp -f $(OPTS) "pdbs"] -!message *** Doing pdbs -SYMBOLS = 1 -!else -SYMBOLS = 0 -!endif -!if [nmakehlp -f $(OPTS) "profile"] -!message *** Doing profile -PROFILE = 1 -!else -PROFILE = 0 -!endif -!if [nmakehlp -f $(OPTS) "pgi"] -!message *** Doing profile guided optimization instrumentation -PGO = 1 -!elseif [nmakehlp -f $(OPTS) "pgo"] -!message *** Doing profile guided optimization -PGO = 2 -!else -PGO = 0 -!endif -!if [nmakehlp -f $(OPTS) "loimpact"] -!message *** Doing loimpact -LOIMPACT = 1 -!else -LOIMPACT = 0 -!endif -!if [nmakehlp -f $(OPTS) "thrdalloc"] -!message *** Doing thrdalloc -USE_THREAD_ALLOC = 1 -!endif -!if [nmakehlp -f $(OPTS) "tclalloc"] -!message *** Doing tclalloc -USE_THREAD_ALLOC = 0 -!endif -!if [nmakehlp -f $(OPTS) "unchecked"] -!message *** Doing unchecked -UNCHECKED = 1 -!else -UNCHECKED = 0 -!endif -!endif - - -!if !$(STATIC_BUILD) -# Make sure we don't build overly fat DLLs. -MSVCRT = 1 -# We shouldn't statically put the extensions inside the shell when dynamic. -TCL_USE_STATIC_PACKAGES = 0 -!endif - - -#---------------------------------------------------------- -# Figure-out how to name our intermediate and output directories. -# We wouldn't want different builds to use the same .obj files -# by accident. -#---------------------------------------------------------- - -#---------------------------------------- -# Naming convention: -# t = full thread support. -# s = static library (as opposed to an -# import library) -# g = linked to the debug enabled C -# run-time. -# x = special static build when it -# links to the dynamic C run-time. -#---------------------------------------- -SUFX = tsgx - -!if $(DEBUG) -BUILDDIRTOP = Debug -!else -BUILDDIRTOP = Release -!endif - -!if "$(MACHINE)" != "IX86" -BUILDDIRTOP =$(BUILDDIRTOP)_$(MACHINE) -!endif -!if $(VCVER) > 6 -BUILDDIRTOP =$(BUILDDIRTOP)_VC$(VCVER) -!endif - -!if !$(DEBUG) || $(DEBUG) && $(UNCHECKED) -SUFX = $(SUFX:g=) -!endif - -TMP_DIRFULL = .\$(BUILDDIRTOP)\$(PROJECT)_ThreadedDynamicStaticX - -!if !$(STATIC_BUILD) -TMP_DIRFULL = $(TMP_DIRFULL:Static=) -SUFX = $(SUFX:s=) -EXT = dll -!if $(MSVCRT) -TMP_DIRFULL = $(TMP_DIRFULL:X=) -SUFX = $(SUFX:x=) -!endif -!else -TMP_DIRFULL = $(TMP_DIRFULL:Dynamic=) -EXT = lib -!if !$(MSVCRT) -TMP_DIRFULL = $(TMP_DIRFULL:X=) -SUFX = $(SUFX:x=) -!endif -!endif - -!if !$(TCL_THREADS) -TMP_DIRFULL = $(TMP_DIRFULL:Threaded=) -SUFX = $(SUFX:t=) -!endif - -!ifndef TMP_DIR -TMP_DIR = $(TMP_DIRFULL) -!ifndef OUT_DIR -OUT_DIR = .\$(BUILDDIRTOP) -!endif -!else -!ifndef OUT_DIR -OUT_DIR = $(TMP_DIR) -!endif -!endif - - -#---------------------------------------------------------- -# Decode the statistics requested. -#---------------------------------------------------------- - -!if "$(STATS)" == "" || [nmakehlp -f "$(STATS)" "none"] -TCL_MEM_DEBUG = 0 -TCL_COMPILE_DEBUG = 0 -!else -!if [nmakehlp -f $(STATS) "memdbg"] -!message *** Doing memdbg -TCL_MEM_DEBUG = 1 -!else -TCL_MEM_DEBUG = 0 -!endif -!if [nmakehlp -f $(STATS) "compdbg"] -!message *** Doing compdbg -TCL_COMPILE_DEBUG = 1 -!else -TCL_COMPILE_DEBUG = 0 -!endif -!endif - - -#---------------------------------------------------------- -# Decode the checks requested. -#---------------------------------------------------------- - -!if "$(CHECKS)" == "" || [nmakehlp -f "$(CHECKS)" "none"] -TCL_NO_DEPRECATED = 0 -WARNINGS = -W3 -!else -!if [nmakehlp -f $(CHECKS) "nodep"] -!message *** Doing nodep check -TCL_NO_DEPRECATED = 1 -!else -TCL_NO_DEPRECATED = 0 -!endif -!if [nmakehlp -f $(CHECKS) "fullwarn"] -!message *** Doing full warnings check -WARNINGS = -W4 -!if [nmakehlp -l -warn:3] -LINKERFLAGS = $(LINKERFLAGS) -warn:3 -!endif -!else -WARNINGS = -W3 -!endif -!if [nmakehlp -f $(CHECKS) "64bit"] && [nmakehlp -c -Wp64] -!message *** Doing 64bit portability warnings -WARNINGS = $(WARNINGS) -Wp64 -!endif -!endif - -!if $(PGO) > 1 -!if [nmakehlp -l -ltcg:pgoptimize] -LINKERFLAGS = $(LINKERFLAGS:-ltcg=) -ltcg:pgoptimize -!else -MSG=^ -This compiler does not support profile guided optimization. -!error $(MSG) -!endif -!elseif $(PGO) > 0 -!if [nmakehlp -l -ltcg:pginstrument] -LINKERFLAGS = $(LINKERFLAGS:-ltcg=) -ltcg:pginstrument -!else -MSG=^ -This compiler does not support profile guided optimization. -!error $(MSG) -!endif -!endif - -#---------------------------------------------------------- -# Set our defines now armed with our options. -#---------------------------------------------------------- - -OPTDEFINES = -DTCL_CFGVAL_ENCODING=$(CFG_ENCODING) -DSTDC_HEADERS - -!if $(TCL_MEM_DEBUG) -OPTDEFINES = $(OPTDEFINES) -DTCL_MEM_DEBUG -!endif -!if $(TCL_COMPILE_DEBUG) -OPTDEFINES = $(OPTDEFINES) -DTCL_COMPILE_DEBUG -DTCL_COMPILE_STATS -!endif -!if $(TCL_THREADS) -OPTDEFINES = $(OPTDEFINES) -DTCL_THREADS=1 -!if $(USE_THREAD_ALLOC) -OPTDEFINES = $(OPTDEFINES) -DUSE_THREAD_ALLOC=1 -!endif -!endif -!if $(STATIC_BUILD) -OPTDEFINES = $(OPTDEFINES) -DSTATIC_BUILD -!endif -!if $(TCL_NO_DEPRECATED) -OPTDEFINES = $(OPTDEFINES) -DTCL_NO_DEPRECATED -!endif - -!if !$(DEBUG) -OPTDEFINES = $(OPTDEFINES) -DNDEBUG -!if $(OPTIMIZING) -OPTDEFINES = $(OPTDEFINES) -DTCL_CFG_OPTIMIZED -!endif -!endif -!if $(PROFILE) -OPTDEFINES = $(OPTDEFINES) -DTCL_CFG_PROFILED -!endif -!if "$(MACHINE)" == "IA64" || "$(MACHINE)" == "AMD64" -OPTDEFINES = $(OPTDEFINES) -DTCL_CFG_DO64BIT -!endif -!if $(VCVERSION) < 1300 -OPTDEFINES = $(OPTDEFINES) -DNO_STRTOI64 -!endif - -#---------------------------------------------------------- -# Locate the Tcl headers to build against -#---------------------------------------------------------- - -!if "$(PROJECT)" == "tcl" - -_TCL_H = ..\generic\tcl.h - -!else - -# If INSTALLDIR set to tcl root dir then reset to the lib dir. -!if exist("$(_INSTALLDIR)\include\tcl.h") -_INSTALLDIR=$(_INSTALLDIR)\lib -!endif - -!if !defined(TCLDIR) -!if exist("$(_INSTALLDIR)\..\include\tcl.h") -TCLINSTALL = 1 -_TCLDIR = $(_INSTALLDIR)\.. -_TCL_H = $(_INSTALLDIR)\..\include\tcl.h -TCLDIR = $(_INSTALLDIR)\.. -!else -MSG=^ -Failed to find tcl.h. Set the TCLDIR macro. -!error $(MSG) -!endif -!else -_TCLDIR = $(TCLDIR:/=\) -!if exist("$(_TCLDIR)\include\tcl.h") -TCLINSTALL = 1 -_TCL_H = $(_TCLDIR)\include\tcl.h -!elseif exist("$(_TCLDIR)\generic\tcl.h") -TCLINSTALL = 0 -_TCL_H = $(_TCLDIR)\generic\tcl.h -!else -MSG =^ -Failed to find tcl.h. The TCLDIR macro does not appear correct. -!error $(MSG) -!endif -!endif -!endif - -#-------------------------------------------------------------- -# Extract various version numbers from tcl headers -# The generated file is then included in the makefile. -#-------------------------------------------------------------- - -!if [echo REM = This file is generated from rules.vc > versions.vc] -!endif -!if [echo TCL_MAJOR_VERSION = \>> versions.vc] \ - && [nmakehlp -V "$(_TCL_H)" TCL_MAJOR_VERSION >> versions.vc] -!endif -!if [echo TCL_MINOR_VERSION = \>> versions.vc] \ - && [nmakehlp -V "$(_TCL_H)" TCL_MINOR_VERSION >> versions.vc] -!endif -!if [echo TCL_PATCH_LEVEL = \>> versions.vc] \ - && [nmakehlp -V "$(_TCL_H)" TCL_PATCH_LEVEL >> versions.vc] -!endif - -# If building the tcl core then we need additional package versions -!if "$(PROJECT)" == "tcl" -!if [echo PKG_HTTP_VER = \>> versions.vc] \ - && [nmakehlp -V ..\library\http\pkgIndex.tcl http >> versions.vc] -!endif -!if [echo PKG_TCLTEST_VER = \>> versions.vc] \ - && [nmakehlp -V ..\library\tcltest\pkgIndex.tcl tcltest >> versions.vc] -!endif -!if [echo PKG_MSGCAT_VER = \>> versions.vc] \ - && [nmakehlp -V ..\library\msgcat\pkgIndex.tcl msgcat >> versions.vc] -!endif -!if [echo PKG_PLATFORM_VER = \>> versions.vc] \ - && [nmakehlp -V ..\library\platform\pkgIndex.tcl "platform " >> versions.vc] -!endif -!if [echo PKG_SHELL_VER = \>> versions.vc] \ - && [nmakehlp -V ..\library\platform\pkgIndex.tcl "platform::shell" >> versions.vc] -!endif -!if [echo PKG_DDE_VER = \>> versions.vc] \ - && [nmakehlp -V ..\library\dde\pkgIndex.tcl "dde " >> versions.vc] -!endif -!if [echo PKG_REG_VER =\>> versions.vc] \ - && [nmakehlp -V ..\library\reg\pkgIndex.tcl registry >> versions.vc] -!endif -!endif - -!include versions.vc - -#-------------------------------------------------------------- -# Setup tcl version dependent stuff headers -#-------------------------------------------------------------- - -!if "$(PROJECT)" != "tcl" - -TCL_VERSION = $(TCL_MAJOR_VERSION)$(TCL_MINOR_VERSION) - -!if $(TCL_VERSION) < 81 -TCL_DOES_STUBS = 0 -!else -TCL_DOES_STUBS = 1 -!endif - -!if $(TCLINSTALL) -TCLSH = "$(_TCLDIR)\bin\tclsh$(TCL_VERSION)$(SUFX).exe" -!if !exist($(TCLSH)) && $(TCL_THREADS) -TCLSH = "$(_TCLDIR)\bin\tclsh$(TCL_VERSION)t$(SUFX).exe" -!endif -TCLSTUBLIB = "$(_TCLDIR)\lib\tclstub$(TCL_VERSION).lib" -TCLIMPLIB = "$(_TCLDIR)\lib\tcl$(TCL_VERSION)$(SUFX).lib" -TCL_LIBRARY = $(_TCLDIR)\lib -TCLREGLIB = "$(_TCLDIR)\lib\tclreg13$(SUFX:t=).lib" -TCLDDELIB = "$(_TCLDIR)\lib\tcldde14$(SUFX:t=).lib" -COFFBASE = \must\have\tcl\sources\to\build\this\target -TCLTOOLSDIR = \must\have\tcl\sources\to\build\this\target -TCL_INCLUDES = -I"$(_TCLDIR)\include" -!else -TCLSH = "$(_TCLDIR)\win\$(BUILDDIRTOP)\tclsh$(TCL_VERSION)$(SUFX).exe" -!if !exist($(TCLSH)) && $(TCL_THREADS) -TCLSH = "$(_TCLDIR)\win\$(BUILDDIRTOP)\tclsh$(TCL_VERSION)t$(SUFX).exe" -!endif -TCLSTUBLIB = "$(_TCLDIR)\win\$(BUILDDIRTOP)\tclstub$(TCL_VERSION).lib" -TCLIMPLIB = "$(_TCLDIR)\win\$(BUILDDIRTOP)\tcl$(TCL_VERSION)$(SUFX).lib" -TCL_LIBRARY = $(_TCLDIR)\library -TCLREGLIB = "$(_TCLDIR)\win\$(BUILDDIRTOP)\tclreg13$(SUFX:t=).lib" -TCLDDELIB = "$(_TCLDIR)\win\$(BUILDDIRTOP)\tcldde14$(SUFX:t=).lib" -COFFBASE = "$(_TCLDIR)\win\coffbase.txt" -TCLTOOLSDIR = $(_TCLDIR)\tools -TCL_INCLUDES = -I"$(_TCLDIR)\generic" -I"$(_TCLDIR)\win" -!endif - -!endif - -#------------------------------------------------------------------------- -# Locate the Tk headers to build against -#------------------------------------------------------------------------- - -!if "$(PROJECT)" == "tk" -_TK_H = ..\generic\tk.h -_INSTALLDIR = $(_INSTALLDIR)\.. -!endif - -!ifdef PROJECT_REQUIRES_TK -!if !defined(TKDIR) -!if exist("$(_INSTALLDIR)\..\include\tk.h") -TKINSTALL = 1 -_TKDIR = $(_INSTALLDIR)\.. -_TK_H = $(_TKDIR)\include\tk.h -TKDIR = $(_TKDIR) -!elseif exist("$(_TCLDIR)\include\tk.h") -TKINSTALL = 1 -_TKDIR = $(_TCLDIR) -_TK_H = $(_TKDIR)\include\tk.h -TKDIR = $(_TKDIR) -!endif -!else -_TKDIR = $(TKDIR:/=\) -!if exist("$(_TKDIR)\include\tk.h") -TKINSTALL = 1 -_TK_H = $(_TKDIR)\include\tk.h -!elseif exist("$(_TKDIR)\generic\tk.h") -TKINSTALL = 0 -_TK_H = $(_TKDIR)\generic\tk.h -!else -MSG =^ -Failed to find tk.h. The TKDIR macro does not appear correct. -!error $(MSG) -!endif -!endif -!endif - -#------------------------------------------------------------------------- -# Extract Tk version numbers -#------------------------------------------------------------------------- - -!if defined(PROJECT_REQUIRES_TK) || "$(PROJECT)" == "tk" - -!if [echo TK_MAJOR_VERSION = \>> versions.vc] \ - && [nmakehlp -V $(_TK_H) TK_MAJOR_VERSION >> versions.vc] -!endif -!if [echo TK_MINOR_VERSION = \>> versions.vc] \ - && [nmakehlp -V $(_TK_H) TK_MINOR_VERSION >> versions.vc] -!endif -!if [echo TK_PATCH_LEVEL = \>> versions.vc] \ - && [nmakehlp -V $(_TK_H) TK_PATCH_LEVEL >> versions.vc] -!endif - -!include versions.vc - -TK_DOTVERSION = $(TK_MAJOR_VERSION).$(TK_MINOR_VERSION) -TK_VERSION = $(TK_MAJOR_VERSION)$(TK_MINOR_VERSION) - -!if "$(PROJECT)" != "tk" -!if $(TKINSTALL) -WISH = "$(_TKDIR)\bin\wish$(TK_VERSION)$(SUFX).exe" -TKSTUBLIB = "$(_TKDIR)\lib\tkstub$(TK_VERSION).lib" -TKIMPLIB = "$(_TKDIR)\lib\tk$(TK_VERSION)$(SUFX).lib" -TK_INCLUDES = -I"$(_TKDIR)\include" -!else -WISH = "$(_TKDIR)\win\$(BUILDDIRTOP)\wish$(TCL_VERSION)$(SUFX).exe" -TKSTUBLIB = "$(_TKDIR)\win\$(BUILDDIRTOP)\tkstub$(TCL_VERSION).lib" -TKIMPLIB = "$(_TKDIR)\win\$(BUILDDIRTOP)\tk$(TCL_VERSION)$(SUFX).lib" -TK_INCLUDES = -I"$(_TKDIR)\generic" -I"$(_TKDIR)\win" -I"$(_TKDIR)\xlib" -!endif -!endif - -!endif - -#---------------------------------------------------------- -# Display stats being used. -#---------------------------------------------------------- - -!message *** Intermediate directory will be '$(TMP_DIR)' -!message *** Output directory will be '$(OUT_DIR)' -!message *** Suffix for binaries will be '$(SUFX)' -!message *** Optional defines are '$(OPTDEFINES)' -!message *** Compiler version $(VCVER). Target machine is $(MACHINE) -!message *** Host architecture is $(NATIVE_ARCH) -!message *** Compiler options '$(COMPILERFLAGS) $(OPTIMIZATIONS) $(DEBUGFLAGS) $(WARNINGS)' -!message *** Link options '$(LINKERFLAGS)' - -!endif - diff --git a/autosetup/LICENSE b/autosetup/LICENSE new file mode 100644 index 0000000000..4fe636c9d9 --- /dev/null +++ b/autosetup/LICENSE @@ -0,0 +1,35 @@ +Unless explicitly stated, all files which form part of autosetup +are released under the following license: + +--------------------------------------------------------------------- +autosetup - A build environment "autoconfigurator" + +Copyright (c) 2010-2011, WorkWare Systems + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials + provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE WORKWARE SYSTEMS ``AS IS'' AND ANY +EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WORKWARE +SYSTEMS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +The views and conclusions contained in the software and documentation +are those of the authors and should not be interpreted as representing +official policies, either expressed or implied, of WorkWare Systems. diff --git a/autosetup/README.autosetup b/autosetup/README.autosetup new file mode 100644 index 0000000000..3952980480 --- /dev/null +++ b/autosetup/README.autosetup @@ -0,0 +1,11 @@ +README.autosetup created by autosetup v0.7.2 + +This is the autosetup directory for a local install of autosetup. +It contains autosetup, support files and loadable modules. + +*.tcl files in this directory are optional modules which +can be loaded with the 'use' directive. + +*.auto files in this directory are auto-loaded. + +For more information, see https://msteveb.github.io/autosetup/ diff --git a/autosetup/README.md b/autosetup/README.md new file mode 100644 index 0000000000..c8da5c643a --- /dev/null +++ b/autosetup/README.md @@ -0,0 +1,454 @@ +Maintaining Autosetup in the SQLite Tree +======================================================================== + +This document provides some tips and reminders for the SQLite +developers regarding using and maintaining the [Autosetup][]-based +build infrastructure. It is not an [Autosetup][] reference. + +**Table of Contents**: + +- [Autosetup API Reference](#apiref) +- [API Tips](#apitips) +- [Ensuring TCL Compatibility](#tclcompat) +- [Design Conventions](#conventions) + - Symbolic Names of Feature Flags + - Do Not Update Global Shared State +- [Updating Autosetup](#updating) + - ***[Patching Autosetup for Project-local changes](#patching)*** +- [Branch-specific Customization](#branch-customization) + + +------------------------------------------------------------------------ + + +Autosetup API Reference +======================================================================== + +The Autosetup API is quite extensive and can be read either in +the [files in the `autosetup` dir](/dir/autosetup) or using: + +> +``` +$ ./configure --reference | less +``` + +That will include any docs from any TCL files in the `./autosetup` dir +which contain certain (simple) markup defined by autosetup. + +This project's own configuration-related TCL code is spread across the +following files: + +- [proj.tcl][]: project-agnostic utility code for autosetup-driven + projects. This file is designed to be shared between this project, + other projects managed under the SQLite/Hwaci umbrella + (e.g. Fossil), and personal projects of SQLite's developers. It is + essentially an amalgamation of a decade's worth of autosetup-related + utility code. +- [sqlite-config.tcl][]: utility code which is too project-specific + for `proj.tcl`. We split this out of `auto.def` so that it can be + used by both `auto.def` and... +- [auto.def][]: the primary driver for the `./configure` process. + When we talk about "the configure script," we're technically + referring to this file, though it actually contains very little + of the TCL code. +- [autoconf/auto.def][]: the main driver script for the "autoconf" + bundle's configure script. It is essentially a slightly trimmed-down + version of the main `auto.def` file. The `autoconf` dir was ported + from the Autotools to Autosetup in the 3.49.0 dev cycle but retains + the "autoconf" name to minimize downstream disruption. + + + +Autosetup API Tips +======================================================================== + +This section briefly covers only APIs which are frequently useful in +day-to-day maintenance and might not be immediately recognized as such +from a casual perusal of the relevant TCL files. The complete docs of +those with `proj-` prefix can be found in [proj.tcl][] and those with +an `sqlite-` prefix are in [sqlite-config.tcl][]. The others are part +of Autosetup's core packages and are scattered around [the TCL files +in ./autosetup](/dir/autosetup). + +In (mostly) alphabetical order: + +- **`file-isexec filename`**\ + Should be used in place of `[file executable]`, as it will also + check for `${filename}.exe` on Windows platforms. However, on such + platforms it also assumes that _any_ existing file is executable. + +- **`get-env VAR ?default?`**\ + Will fetch an "environment variable" from the first of either: (1) a + KEY=VALUE passed to the configure script or (2) the system's + environment variables. Not to be confused with `getenv`, which only + does the latter and is rarely, if ever, useful in this tree. + - **`proj-get-env VAR ?default?`**\ + Works like `get-env` but will, if that function finds no match, + look for a file named `./.env-$VAR` and, if found, return its + trimmed contents. This can be used, e.g., to set a developer's + local preferences for the default `CFLAGS`.\ + Tip: adding `-O0` to `.env-CFLAGS` reduces rebuild times + considerably at the cost of performance in `make devtest` and the + like. + +- **`proj-fatal msg`**\ + Emits `$msg` to stderr and exits with non-zero. Its differences from + autosetup's `user-error` are purely cosmetic. + +- **`proj-if-opt-truthy flag thenScript ?elseScript?`**\ + Evals `thenScript` if the given `--flag` is truthy, else it + evals the optional `elseScript`. + +- **`proj-indented-notice ?-error? ?-notice? msg`**\ + Breaks its `msg` argument into lines, trims them, and emits them + with consistent indentation. Exactly how it emits depends on the + flags passed to it (or not), as covered in its docs. This will stick + out starkly from normal output and is intended to be used only for + important notices. + +- **`proj-opt-truthy flag`**\ + Returns 1 if `--flag`'s value is "truthy," i.e. one of (1, on, + enabled, yes, true). + +- **`proj-opt-was-provided FLAG`**\ + Returns 1 if `--FLAG` was explicitly provided to configure, + else 0. This distinction can be used to determine, e.g., whether + `--with-readline` was provided or whether we're searching for + readline by default. In the former case, failure to find it should + be treated as fatal, where in the latter case it's not.\ + Unlike most functions which deal with `--flags`, this one does not + validate that `$FLAG` is a registered flag so will not fail fatally + if `$FLAG` is not registered as an Autosetup option. + +- **`proj-val-truthy value`**\ + Returns 1 if `$value` is "truthy," See `proj-opt-truthy` for the definition + of "truthy." + +- **`proj-warn msg`**\ + Emits `$msg` to stderr. Closely-related is autosetup's `user-notice` + (described below). + +- **`sqlite-add-feature-flag ?-shell? FLAG...`**\ + Adds the given feature flag to the CFLAGS which are specific to + building libsqlite3. It's intended to be passed one or more + `-DSQLITE_ENABLE_...`, or similar, flags. If the `-shell` flag is + used then it also passes its arguments to + `sqlite-add-shell-opt`. This is a no-op if `FLAG` is not provided or + is empty. + +- **`sqlite-add-shell-opt FLAG...`**\ + The shell-specific counterpart of `sqlite-add-feature-flag` which + only adds the given flag(s) to the CLI-shell-specific CFLAGS. + +- **`sqlite-configure BUILD-NAME {script}`**\ + This is where all configure `--flags` are defined for all known + build modes ("canonical" or "autoconf"). After processing all flags, + this function runs `$script`, which contains the build-mode-specific + configuration bits, and then runs any finalization bits which are + common to all build modes. The `auto.def` files are intended to contain + exactly two commands: + `use sqlite-config; sqlite-configure BUILD-NAME {script}` + +- **`user-notice msg`**\ + Queues `$msg` to be sent to stderr, but does not emit it until + either `show-notices` is called or the next time autosetup would + output something (it internally calls `show-notices`). This can be + used to generate warnings between a "checking for..." message and + its resulting "yes/no/whatever" message in such a way as to not + spoil the layout of such messages. + + + +Ensuring TCL Compatibility +======================================================================== + +One of the significant benefits of using Autosetup is that (A) this +project uses many TCL scripts in the build process and (B) Autosetup +comes with a TCL interpreter named [JimTCL][]. + +It is important that any TCL files used by the configure process and +makefiles remain compatible with both [JimTCL][] and the canonical +TCL. Though JimTCL has outstanding compatibility with canonical TCL, +it does have a few corners with incompatibilities, e.g. regular +expressions. If a script runs in JimTCL without using any +JimTCL-specific features, then it's a certainty that it will run in +canonical TCL as well. The opposite, however, is not _always_ the +case. + +When [`./configure`](/file/configure) is run, it goes through a +bootstrapping process to find a suitable TCL with which to run the +autosetup framework. The first step involves [finding or building a +TCL shell](/file/autosetup/autosetup-find-tclsh). That will first +search for an available `tclsh` (under several common names, +e.g. `tclsh8.6`) before falling back to compiling the copy of +`jimsh0.c` included in the source tree. i.e. it will prefer to use a +system-installed TCL for running the configure script. Once it finds +(or builds) a TCL shell, it then runs [a sanity test to ensure that +the shell is suitable](/file/autosetup/autosetup-test-tclsh) before +using it to run the main autosetup app. + +There are two simple ways to ensure that running of the configure +process uses JimTCL instead of the canonical `tclsh`, and either +approach provides equally high assurances about configure script +compatibility across TCL implementations: + +1. Build on a system with no `tclsh` installed in the `$PATH`. In that + case, the configure process will fall back to building the in-tree + copy of JimTCL. + +2. Manually build `./jimsh0` in the top of the checkout with:\ + `cc -o jimsh0 autosetup/jimsh0.c`\ + With that in place, the configure script will prefer to use that + before looking for a system-level `tclsh`. Be aware, though, that + `make distclean` will remove that file. + +**Note that `./jimsh0` is distinctly different from the `./jimsh`** +which gets built for code-generation purposes. The latter requires +non-default build flags to enable features which are +platform-dependent, most notably to make its `[file normalize]` work. +This means, for example, that the configure script and its utility +APIs must not use `[file normalize]`, but autosetup provides a +TCL-only implementation of `[file-normalize]` (note the dash) for +portable use in the configure script. Contrariwise, code-generation +scripts invoked via `make` may use `[file normalize]`, as they'll use +`./jimsh` or `tclsh` instead of `./jimsh0`. + + +Known TCL Incompatibilities +------------------------------------------------------------------------ + +A summary of known incompatibilities in JimTCL + +- **CRNL line endings**: prior to 2025-02-05 `fconfigure -translation ...` + was a no-op in JimTCL, and it emits CRNL line endings by default on + Windows. Since then, it supports `-translation binary`, which is + close enough to `-translation lf` for our purposes. When working + with files using the `open` command, it is important to use mode + `"rb"` or `"wb"`, as appropriate, so that the output does not get + CRNL-mangled on Windows. + +- **`file copy`** does not support multiple source files. See + [](/info/61f18c96183867fe) for a workaround. + +- **Regular expressions**: + + - Patterns treat `\nnn` octal values as back-references (which it + does not support). Those can be reformulated as demonstrated in + [](/info/aeac23359bb681c0). + + - `regsub` does not support the `\y` flag. A workaround is demonstrated + in [](/info/c2e5dd791cce3ec4). + + + +Design Conventions +======================================================================== + +This section describes the motivations for the most glaring of the +build's design decisions, in particular how they deviate from +historical, or even widely-conventional, practices. + +Symbolic Names of Feature Flags +------------------------------------------------------------------------ + +Historically, the project's makefile has exclusively used +`UPPER_UNDERSCORE` form for makefile variables. This build, however, +primarily uses `X.y` format, where `X` is often a category label, +e.g. `CFLAGS`, and `y` is the specific instance of that category, +e.g. `CFLAGS.readline`. + +When the configure script exports flags for consumption by filtered +files, e.g. [Makefile.in][] and the generated +`sqlite_cfg.h`, it does so in the more conventional `X_Y` form because +those flags get exported as as C `#define`s to `sqlite_cfg.h`, where +dots are not permitted. + +The `X.y` convention is used in the makefiles primarily because the +person who did the initial port finds that considerably easier on the +eyes and fingers. In practice, the `X_Y` form of such exports is used +exactly once in [Makefile.in][], where it's translated from `@X_Y@` +into into `X.y` form for consumption by [Makefile.in][] and +[main.mk][]. For example: + +> +``` +LDFLAGS.shobj = @SHOBJ_LDFLAGS@ +LDFLAGS.zlib = @LDFLAGS_ZLIB@ +LDFLAGS.math = @LDFLAGS_MATH@ +``` + +(That first one is defined by autosetup, and thus applies "LDFLAGS" as +the suffix rather than the prefix. Which is more legible is a matter +of taste, for which there is no accounting.) + + +Do Not Update Global Shared State +------------------------------------------------------------------------ + +In both the legacy Autotools-driven build and common Autosetup usage, +feature tests performed by the configure script may amend global flags +such as `LIBS`, `LDFLAGS`, and `CFLAGS`[^as-cflags]. That's +appropriate for a makefile which builds a single deliverable, but less +so for makefiles which produce multiple deliverables. Drawbacks of +that approach include: + +- It's unlikely that every single deliverable will require the same + core set of those flags. +- It can be difficult to determine the origin of any given change to + that global state because those changes are hidden behind voodoo + performed outside the immediate visibility of the configure script's + maintainer. +- It can force the maintainers of the configure script to place tests + in a specific order so that the resulting flags get applied at + the correct time and/or in the correct order.\ + (A real-life example: before the approach described below was taken + to collecting build-time flags, the test for `-rpath` had to come + _after_ the test for zlib because the results of the `-rpath` test + implicitly modified global state which broke the zlib feature + test. Because the feature tests no longer (intentionally) modify + shared global state, that is not an issue.) + +In this build, cases where feature tests modify global state in such a +way that it may impact later feature tests are either (A) very +intentionally defined to do so (e.g. the `--with-wasi-sdk` flag has +invasive side-effects) or (B) are oversights (i.e. bugs). + +This tree's [configure script][auto.def], [utility APIs][proj.tcl], +[Makefile.in][], and [main.mk][] therefore strive to separate the +results of any given feature test into its own well-defined +variables. For example: + +- The linker flags for zlib are exported from the configure script as + `LDFLAGS_ZLIB`, which [Makefile.in][] and [main.mk][] then expose as + `LDFLAGS.zlib`. +- `CFLAGS_READLINE` (a.k.a. `CFLAGS.readline`) contains the `CFLAGS` + needed for including `libreadline`, `libedit`, or `linenoise`, and + `LDFLAGS_READLINE` (a.k.a. `LDFLAGS.readline`) is its link-time + counterpart. + +It is then up to the Makefile to apply and order the flags however is +appropriate. + +At the end of the configure script, the global `CFLAGS` _ideally_ +holds only flags which are either relevant to all targets or, failing +that, will have no unintended side-effects on any targets. That said: +clients frequently pass custom `CFLAGS` to `./configure` or `make` to +set library-level feature toggles, e.g. `-DSQLITE_OMIT_FOO`, in which +case there is no practical way to avoid "polluting" the builds of +arbitrary makefile targets with those. _C'est la vie._ + + +[^as-cflags]: But see this article for a detailed discussion of how + autosetup currently deals specifically with CFLAGS: + + + + +Updating Autosetup +======================================================================== + +Updating autosetup is, more often than not, painless. It requires having +a checked-out copy of [the autosetup git repository][autosetup-git]: + +> +``` +$ git clone https://github.com/msteveb/autosetup +$ cd autosetup +# Or, if it's already checked out: +$ git pull +``` + +Then, from the top-most directory of an SQLite checkout: + +> +``` +$ /path/to/autosetup-checkout/autosetup --install . +$ fossil status # show the modified files +``` + +Unless the upgrade made any incompatible changes (which is exceedingly +rare), that's all there is to it. After that's done, **apply a patch +for the change described in the following section**, test the +configure process, and check it in. + + +Patching Autosetup for Project-local Changes +------------------------------------------------------------------------ + +Autosetup reserves the flag name **`--debug`** for its own purposes, +and its own special handling of `--enable-...` flags makes `--debug` +an alias for `--enable-debug`. As this project has a long history of +using `--enable-debug`, we patch autosetup to use the name +`--autosetup-debug` in place of `--debug`. That requires (as of this +writing) four small edits in +[/autosetup/autosetup](/file/autosetup/autosetup), as demonstrated in +[check-in 3296c8d3](/info/3296c8d3). + +If autosetup is upgraded and this patch is _not_ applied the invoking +`./configure` will fail loudly because of the declaration of the +`debug` flag in `auto.def` - duplicated flags are not permitted. + + +Branch-specific Customization +======================================================================== + +Certain vendor-specific branches require slight configure script +customization. Rather than editing `sqlite-config.tcl` for this, +which frequently leads to merge conflicts, the following approach +is recommended: + +In the vendor-specific branch, create a file named +`autosetup/sqlite-custom.tcl`. + +That file should contain the following content... + +If flag customization is required, add: + +> +``` +proc sqlite-custom-flags {} { + # If any existing --flags require different default values + # then call: + options-defaults { + flag-name new-default-value + ... + } + # ^^^ That will replace the default value but will not update + # the --help text, which may lead to some confusion: + # https://github.com/msteveb/autosetup/issues/77 + + return { + {*} { + new-flag-name => {Help text} + ... + } + }; #see below +} +``` + +That function must return either an empty string or a list in the form +used internally by [sqlite-config.tcl][]'s `sqlite-configure`. + +Next, define: + +> +``` +proc sqlite-custom-handle-flags {} { + ... do any custom flag handling here ... +} +``` + +That function, if defined, will be called relatively late in the +configure process, before any filtered files are generated but after +all other significant processing. + + +[Autosetup]: https://msteveb.github.io/autosetup/ +[auto.def]: /file/auto.def +[autoconf/auto.def]: /file/autoconf/auto.def +[autosetup-git]: https://github.com/msteveb/autosetup +[proj.tcl]: /file/autosetup/proj.tcl +[sqlite-config.tcl]: /file/autosetup/sqlite-config.tcl +[Makefile.in]: /file/Makefile.in +[main.mk]: /file/main.mk +[JimTCL]: https://jim.tcl.tk diff --git a/autosetup/autosetup b/autosetup/autosetup new file mode 100755 index 0000000000..c3a31bec58 --- /dev/null +++ b/autosetup/autosetup @@ -0,0 +1,2544 @@ +#!/bin/sh +# Copyright (c) 2006-2011 WorkWare Systems http://www.workware.net.au/ +# All rights reserved +# vim:se syntax=tcl: +# \ +dir=`dirname "$0"`; exec "`$dir/autosetup-find-tclsh`" "$0" "$@" + +# Note that the version has a trailing + on unreleased versions +set autosetup(version) 0.7.2 + +# Can be set to 1 to debug early-init problems +set autosetup(debug) [expr {"--autosetup-debug" in $argv}] + +################################################################## +# +# Main flow of control, option handling +# +proc main {argv} { + global autosetup define + + # There are 3 potential directories involved: + # 1. The directory containing autosetup (this script) + # 2. The directory containing auto.def + # 3. The current directory + + # From this we need to determine: + # a. The path to this script (and related support files) + # b. The path to auto.def + # c. The build directory, where output files are created + + # This is also complicated by the fact that autosetup may + # have been run via the configure wrapper ([getenv WRAPPER] is set) + + # Here are the rules. + # a. This script is $::argv0 + # => dir, prog, exe, libdir + # b. auto.def is in the directory containing the configure wrapper, + # otherwise it is in the current directory. + # => srcdir, autodef + # c. The build directory is the current directory + # => builddir, [pwd] + + # 'misc' is needed before we can do anything, so set a temporary libdir + # in case this is the development version + set autosetup(libdir) [file dirname $::argv0]/lib + use misc + + # (a) + set autosetup(dir) [realdir [file dirname [realpath $::argv0]]] + set autosetup(prog) [file join $autosetup(dir) [file tail $::argv0]] + set autosetup(exe) [getenv WRAPPER $autosetup(prog)] + if {$autosetup(installed)} { + set autosetup(libdir) $autosetup(dir) + } else { + set autosetup(libdir) [file join $autosetup(dir) lib] + } + autosetup_add_dep $autosetup(prog) + + # (b) + if {[getenv WRAPPER ""] eq ""} { + # Invoked directly + set autosetup(srcdir) [pwd] + } else { + # Invoked via the configure wrapper + set autosetup(srcdir) [file-normalize [file dirname $autosetup(exe)]] + } + set autosetup(autodef) [relative-path $autosetup(srcdir)/auto.def] + + # (c) + set autosetup(builddir) [pwd] + + set autosetup(argv) $argv + set autosetup(cmdline) {} + # options is a list of known options + set autosetup(options) {} + # optset is a dictionary of option values set by the user based on getopt + set autosetup(optset) {} + # optdefault is a dictionary of default values + set autosetup(optdefault) {} + # options-defaults is a dictionary of overrides for default values for options + set autosetup(options-defaults) {} + set autosetup(optionhelp) {} + set autosetup(showhelp) 0 + + use util + + # Parse options + use getopt + + # At the is point we don't know what is a valid option + # We simply parse anything that looks like an option + set autosetup(getopt) [getopt argv] + + #"=Core Options:" + options-add { + help:=all => "display help and options. Optional: module name, such as --help=system" + licence license => "display the autosetup license" + version => "display the version of autosetup" + ref:=text manual:=text + reference:=text => "display the autosetup command reference. 'text', 'wiki', 'asciidoc' or 'markdown'" + autosetup-debug => "display debugging output as autosetup runs" + install:=. => "install autosetup to the current or given directory" + } + if {$autosetup(installed)} { + # hidden options so we can produce a nice error + options-add { + sysinstall:path + } + } else { + options-add { + sysinstall:path => "install standalone autosetup to the given directory (e.g.: /usr/local)" + } + } + options-add { + force init:=help => "create initial auto.def, etc. Use --init=help for known types" + # Undocumented options + option-checking=1 + nopager + quiet + timing + conf: + } + + if {[opt-bool version]} { + puts $autosetup(version) + exit 0 + } + + # autosetup --conf=alternate-auto.def + if {[opt-str conf o]} { + set autosetup(autodef) $o + } + + # Debugging output (set this early) + incr autosetup(debug) [opt-bool autosetup-debug] + incr autosetup(force) [opt-bool force] + incr autosetup(msg-quiet) [opt-bool quiet] + incr autosetup(msg-timing) [opt-bool timing] + + # If the local module exists, source it now to allow for + # project-local customisations + if {[file exists $autosetup(libdir)/local.tcl]} { + use local + } + + # Now any auto-load modules + autosetup_load_auto_modules + + if {[opt-str help o]} { + incr autosetup(showhelp) + use help + autosetup_help $o + } + + if {[opt-bool licence license]} { + use help + autosetup_show_license + exit 0 + } + + if {[opt-str {manual ref reference} o]} { + use help + autosetup_reference $o + } + + # Allow combining --install and --init + set earlyexit 0 + if {[opt-str install o]} { + use install + autosetup_install $o + incr earlyexit + } + + if {[opt-str init o]} { + use init + autosetup_init $o + incr earlyexit + } + + if {$earlyexit} { + exit 0 + } + if {[opt-str sysinstall o]} { + use install + autosetup_install $o 1 + exit 0 + } + + if {![file exists $autosetup(autodef)]} { + # Check for invalid option first + options {} + user-error "No auto.def found in \"$autosetup(srcdir)\" (use [file tail $::autosetup(exe)] --init to create one)" + } + + # Parse extra arguments into autosetup(cmdline) + foreach arg $argv { + if {[regexp {([^=]*)=(.*)} $arg -> n v]} { + dict set autosetup(cmdline) $n $v + define $n $v + } else { + user-error "Unexpected parameter: $arg" + } + } + + autosetup_add_dep $autosetup(autodef) + + # Add $argv to CONFIGURE_OPTS + define-append-argv CONFIGURE_OPTS {*}$autosetup(argv) + # Set up AUTOREMAKE to reconfigure with the same args + define-append-argv AUTOREMAKE {*}$autosetup(exe) {*}$autosetup(argv) + + # Log how we were invoked + configlog "Invoked as: [getenv WRAPPER $::argv0] [quote-argv $autosetup(argv)]" + configlog "Tclsh: [info nameofexecutable]" + + # Load auto.def as module "auto.def" + autosetup_load_module auto.def source $autosetup(autodef) + + # Could warn here if options {} was not specified + + show-notices + + if {$autosetup(debug)} { + msg-result "Writing all defines to config.log" + configlog "================ defines ======================" + foreach n [lsort [array names define]] { + configlog "define $n $define($n)" + } + } + + exit 0 +} + +# @section Option Handling + +# @opt-bool ?-nodefault? option ... +# +# Check each of the named, boolean options and if any have been explicitly enabled +# or disabled by the user, return 1 or 0 accordingly. +# +# If the option was specified more than once, the last value wins. +# e.g. With '--enable-foo --disable-foo', '[opt-bool foo]' will return 0 +# +# If no value was specified by the user, returns the default value for the +# first option. If '-nodefault' is given, this behaviour changes and +# -1 is returned instead. +# +proc opt-bool {args} { + set nodefault 0 + if {[lindex $args 0] eq "-nodefault"} { + set nodefault 1 + set args [lrange $args 1 end] + } + option-check-names {*}$args + + foreach opt $args { + if {[dict exists $::autosetup(optset) $opt]} { + return [dict get $::autosetup(optset) $opt] + } + } + + if {$nodefault} { + return -1 + } + # Default value is the default for the first option + return [dict get $::autosetup(optdefault) [lindex $args 0]] +} + +# @opt-val optionlist ?default=""? +# +# Returns a list containing all the values given for the non-boolean options in '$optionlist'. +# There will be one entry in the list for each option given by the user, including if the +# same option was used multiple times. +# +# If no options were set, '$default' is returned (exactly, not as a list). +# +# Note: For most use cases, 'opt-str' should be preferred. +# +proc opt-val {names {default ""}} { + option-check-names {*}$names + + foreach opt $names { + if {[dict exists $::autosetup(optset) $opt]} { + lappend result {*}[dict get $::autosetup(optset) $opt] + } + } + if {[info exists result]} { + return $result + } + return $default +} + +# @opt-str optionlist varname ?default? +# +# Sets '$varname' in the callers scope to the value for one of the given options. +# +# For the list of options given in '$optionlist', if any value is set for any option, +# the option value is taken to be the *last* value of the last option (in the order given). +# +# If no option was given, and a default was specified with 'options-defaults', +# that value is used. +# +# If no 'options-defaults' value was given and '$default' was given, it is used. +# +# If none of the above provided a value, no value is set. +# +# The return value depends on whether '$default' was specified. +# If it was, the option value is returned. +# If it was not, 1 is returns if a value was set, or 0 if not. +# +# Typical usage is as follows: +# +## if {[opt-str {myopt altname} o]} { +## do something with $o +## } +# +# Or: +## define myname [opt-str {myopt altname} o "/usr/local"] +# +proc opt-str {names varname args} { + global autosetup + + option-check-names {*}$names + upvar $varname value + + if {[llength $args]} { + # A default was given, so always return the string value of the option + set default [lindex $args 0] + set retopt 1 + } else { + # No default, so return 0 or 1 to indicate if a value was found + set retopt 0 + } + + foreach opt $names { + if {[dict exists $::autosetup(optset) $opt]} { + set result [lindex [dict get $::autosetup(optset) $opt] end] + } + } + + if {![info exists result]} { + # No user-specified value. Has options-defaults been set? + foreach opt $names { + if {[dict exists $::autosetup(optdefault) $opt]} { + set result [dict get $autosetup(optdefault) $opt] + } + } + } + + if {[info exists result]} { + set value $result + if {$retopt} { + return $value + } + return 1 + } + + if {$retopt} { + set value $default + return $value + } + + return 0 +} + +proc option-check-names {args} { + foreach o $args { + if {$o ni $::autosetup(options)} { + autosetup-error "Request for undeclared option --$o" + } + } +} + +# Parse the option definition in $opts and update +# ::autosetup(setoptions) and ::autosetup(optionhelp) appropriately +# +proc options-add {opts} { + global autosetup + + # First weed out comment lines + set realopts {} + foreach line [split $opts \n] { + if {![string match "#*" [string trimleft $line]]} { + append realopts $line \n + } + } + set opts $realopts + + for {set i 0} {$i < [llength $opts]} {incr i} { + set opt [lindex $opts $i] + if {[string match =* $opt]} { + # This is a special heading + lappend autosetup(optionhelp) [list $opt $autosetup(module)] + continue + } + unset -nocomplain defaultvalue equal value + + #puts "i=$i, opt=$opt" + regexp {^([^:=]*)(:)?(=)?(.*)$} $opt -> name colon equal value + if {$name in $autosetup(options)} { + autosetup-error "Option $name already specified" + } + + #puts "$opt => $name $colon $equal $value" + + # Find the corresponding value in the user options + # and set the default if necessary + if {[string match "-*" $opt]} { + # We no longer support documentation-only options, like "-C " + autosetup-error "Option $opt is not supported" + } elseif {$colon eq ""} { + # Boolean option + lappend autosetup(options) $name + + # Check for override + if {[dict exists $autosetup(options-defaults) $name]} { + # A default was specified with options-defaults, so use it + set value [dict get $autosetup(options-defaults) $name] + } + + if {$value eq "1"} { + set opthelp "--disable-$name" + } else { + set opthelp "--$name" + } + + # Set the default + if {$value eq ""} { + set value 0 + } + set defaultvalue $value + dict set autosetup(optdefault) $name $defaultvalue + + if {[dict exists $autosetup(getopt) $name]} { + # The option was specified by the user. Look at the last value. + lassign [lindex [dict get $autosetup(getopt) $name] end] type setvalue + if {$type eq "str"} { + # Can we convert the value to a boolean? + if {$setvalue in {1 enabled yes}} { + set setvalue 1 + } elseif {$setvalue in {0 disabled no}} { + set setvalue 0 + } else { + user-error "Boolean option $name given as --$name=$setvalue" + } + } + dict set autosetup(optset) $name $setvalue + #puts "Found boolean option --$name=$setvalue" + } + } else { + # String option. + lappend autosetup(options) $name + + if {$equal ne "="} { + # Was the option given as "name:value=default"? + # If so, set $value to the display name and $defaultvalue to the default + # (This is the preferred way to set a default value for a string option) + if {[regexp {^([^=]+)=(.*)$} $value -> value defaultvalue]} { + dict set autosetup(optdefault) $name $defaultvalue + } + } + + # Maybe override the default value + if {[dict exists $autosetup(options-defaults) $name]} { + # A default was specified with options-defaults, so use it + set defaultvalue [dict get $autosetup(options-defaults) $name] + dict set autosetup(optdefault) $name $defaultvalue + } elseif {![info exists defaultvalue]} { + # No default value was given by value=default or options-defaults + # so use the value as the default when the plain option with no + # value is given (.e.g. just --opt instead of --opt=value) + set defaultvalue $value + } + + if {$equal eq "="} { + # String option with optional value + set opthelp "--$name?=$value?" + } else { + # String option with required value + set opthelp "--$name=$value" + } + + # Get the values specified by the user + if {[dict exists $autosetup(getopt) $name]} { + set listvalue {} + + foreach pair [dict get $autosetup(getopt) $name] { + lassign $pair type setvalue + if {$type eq "bool" && $setvalue} { + if {$equal ne "="} { + user-error "Option --$name requires a value" + } + # If given as a boolean, use the default value + set setvalue $defaultvalue + } + lappend listvalue $setvalue + } + + #puts "Found string option --$name=$listvalue" + dict set autosetup(optset) $name $listvalue + } + } + + # Now create the help for this option if appropriate + if {[lindex $opts $i+1] eq "=>"} { + set desc [lindex $opts $i+2] + if {[info exists defaultvalue]} { + set desc [string map [list @default@ $defaultvalue] $desc] + } + # A multi-line description + lappend autosetup(optionhelp) [list $opthelp $autosetup(module) $desc] + incr i 2 + } + } +} + +# @module-options optionlist +# +# Deprecated. Simply use 'options' from within a module. +proc module-options {opts} { + options $opts +} + +proc max {a b} { + expr {$a > $b ? $a : $b} +} + +proc options-wrap-desc {text length firstprefix nextprefix initial} { + set len $initial + set space $firstprefix + foreach word [split $text] { + set word [string trim $word] + if {$word == ""} { + continue + } + if {$len && [string length $space$word] + $len >= $length} { + puts "" + set len 0 + set space $nextprefix + } + incr len [string length $space$word] + puts -nonewline $space$word + set space " " + } + if {$len} { + puts "" + } +} + +# Display options (from $autosetup(optionhelp)) for modules that match +# glob pattern $what +proc options-show {what} { + set local 0 + # Determine the max option width + set max 0 + foreach help $::autosetup(optionhelp) { + lassign $help opt module desc + if {![string match $what $module]} { + continue + } + if {[string match =* $opt] || [string match \n* $desc]} { + continue + } + set max [max $max [string length $opt]] + } + set indent [string repeat " " [expr {$max+4}]] + set cols [getenv COLUMNS 80] + catch { + lassign [exec stty size] _ sttycols + if {[string is integer -strict $sttycols]} { + set cols $sttycols + } + } + incr cols -1 + # Now output + foreach help $::autosetup(optionhelp) { + lassign $help opt module desc + if {![string match $what $module]} { + continue + } + if {$local == 0 && $module eq "auto.def"} { + puts "Local Options:" + incr local + } + if {[string match =* $opt]} { + # Output a special heading line" + puts [string range $opt 1 end] + continue + } + puts -nonewline " [format %-${max}s $opt]" + if {[string match \n* $desc]} { + # Output a pre-formatted help description as-is + puts $desc + } else { + options-wrap-desc [string trim $desc] $cols " " $indent [expr {$max+2}] + } + } +} + +# @options optionspec +# +# Specifies configuration-time options which may be selected by the user +# and checked with 'opt-str' and 'opt-bool'. '$optionspec' contains a series +# of options specifications separated by newlines, as follows: +# +# A boolean option is of the form: +# +## name[=0|1] => "Description of this boolean option" +# +# The default is 'name=0', meaning that the option is disabled by default. +# If 'name=1' is used to make the option enabled by default, the description should reflect +# that with text like "Disable support for ...". +# +# An argument option (one which takes a parameter) is of one of the following forms: +# +## name:value => "Description of this option" +## name:value=default => "Description of this option with a default value" +## name:=value => "Description of this option with an optional value" +# +# If the 'name:value' form is used, the value must be provided with the option (as '--name=myvalue'). +# If the 'name:value=default' form is used, the option has the given default value even if not +# specified by the user. +# If the 'name:=value' form is used, the value is optional and the given value is used +# if it is not provided. +# +# The description may contain '@default@', in which case it will be replaced with the default +# value for the option (taking into account defaults specified with 'options-defaults'. +# +# Undocumented options are also supported by omitting the '=> description'. +# These options are not displayed with '--help' and can be useful for internal options or as aliases. +# +# For example, '--disable-lfs' is an alias for '--disable=largefile': +# +## lfs=1 largefile=1 => "Disable large file support" +# +proc options {optlist} { + global autosetup + + options-add $optlist + + if {$autosetup(showhelp)} { + # If --help, stop now to show help + return -code break + } + + if {$autosetup(module) eq "auto.def"} { + # Check for invalid options + if {[opt-bool option-checking]} { + foreach o [dict keys $::autosetup(getopt)] { + if {$o ni $::autosetup(options)} { + user-error "Unknown option --$o" + } + } + } + } +} + +# @options-defaults dictionary +# +# Specifies a dictionary of options and a new default value for each of those options. +# Use before any 'use' statements in 'auto.def' to change the defaults for +# subsequently included modules. +proc options-defaults {dict} { + foreach {n v} $dict { + dict set ::autosetup(options-defaults) $n $v + } +} + +proc config_guess {} { + if {[file-isexec $::autosetup(dir)/autosetup-config.guess]} { + if {[catch {exec-with-stderr sh $::autosetup(dir)/autosetup-config.guess} alias]} { + user-error $alias + } + return $alias + } else { + configlog "No autosetup-config.guess, so using uname" + string tolower [exec uname -p]-unknown-[exec uname -s][exec uname -r] + } +} + +proc config_sub {alias} { + if {[file-isexec $::autosetup(dir)/autosetup-config.sub]} { + if {[catch {exec-with-stderr sh $::autosetup(dir)/autosetup-config.sub $alias} alias]} { + user-error $alias + } + } + return $alias +} + +# @section Variable Definitions (defines) + +# @define name ?value=1? +# +# Defines the named variable to the given value. +# These (name, value) pairs represent the results of the configuration check +# and are available to be subsequently checked, modified and substituted. +# +proc define {name {value 1}} { + set ::define($name) $value + #dputs "$name <= $value" +} + +# @define-push {name ...} script +# +# Save the values of the given defines, evaluation the script, then restore. +# For example, to avoid updating AS_FLAGS and AS_CXXFLAGS: +## define-push {AS_CFLAGS AS_CXXFLAGS} { +## cc-check-flags -Wno-error +## } +proc define-push {names script} { + array set unset {} + foreach name $names { + if {[is-defined $name]} { + set save($name) [get-define $name] + } else { + set unset($name) 1 + } + } + uplevel 1 $script + array set ::define [array get save] + foreach name [array names unset] { + unset -nocomplain ::define($name) + } +} + +# @undefine name +# +# Undefine the named variable. +# +proc undefine {name} { + unset -nocomplain ::define($name) + #dputs "$name <= " +} + +# @define-append name value ... +# +# Appends the given value(s) to the given "defined" variable. +# If the variable is not defined or empty, it is set to '$value'. +# Otherwise the value is appended, separated by a space. +# Any extra values are similarly appended. +# +# Note that define-append is not designed to add values containing spaces. +# If values may contain spaces, consider define-append-argv instead. +# +proc define-append {name args} { + if {[get-define $name ""] ne ""} { + foreach arg $args { + if {$arg eq ""} { + continue + } + append ::define($name) " " $arg + } + } else { + set ::define($name) [join $args] + } + #dputs "$name += [join $args] => $::define($name)" +} + +# @define-append-argv name value ... +# +# Similar to define-append except designed to construct shell command +# lines, including correct handling of parameters with spaces. +# +# Each non-empty value is quoted if necessary and then appended to the given variable +# if it does not already exist. +# +proc define-append-argv {name args} { + set seen {} + set new {} + foreach val [list {*}[get-define $name ""] {*}$args] { + if {$val ne {} && ![dict exists $seen $val]} { + lappend new [quote-if-needed $val] + dict set seen $val 1 + } + } + set ::define($name) [join $new " "] + #dputs "$name += [join $args] => $::define($name)" +} + +# @get-define name ?default=0? +# +# Returns the current value of the "defined" variable, or '$default' +# if not set. +# +proc get-define {name {default 0}} { + if {[info exists ::define($name)]} { + #dputs "$name => $::define($name)" + return $::define($name) + } + #dputs "$name => $default" + return $default +} + +# @is-defined name +# +# Returns 1 if the given variable is defined. +# +proc is-defined {name} { + info exists ::define($name) +} + +# @is-define-set name +# +# Returns 1 if the given variable is defined and is set +# to a value other than "" or 0 +# +proc is-define-set {name} { + if {[get-define $name] in {0 ""}} { + return 0 + } + return 1 +} + +# @all-defines +# +# Returns a dictionary (name, value list) of all defined variables. +# +# This is suitable for use with 'dict', 'array set' or 'foreach' +# and allows for arbitrary processing of the defined variables. +# +proc all-defines {} { + array get ::define +} + +# @section Environment/Helpers + +# @get-env name default +# +# If '$name' was specified on the command line, return it. +# Otherwise if '$name' was set in the environment, return it. +# Otherwise return '$default'. +# +proc get-env {name default} { + if {[dict exists $::autosetup(cmdline) $name]} { + return [dict get $::autosetup(cmdline) $name] + } + getenv $name $default +} + +# @env-is-set name +# +# Returns 1 if '$name' was specified on the command line or in the environment. +# Note that an empty environment variable is not considered to be set. +# +proc env-is-set {name} { + if {[dict exists $::autosetup(cmdline) $name]} { + return 1 + } + if {[getenv $name ""] ne ""} { + return 1 + } + return 0 +} + +# @readfile filename ?default=""? +# +# Return the contents of the file, without the trailing newline. +# If the file doesn't exist or can't be read, returns '$default'. +# +proc readfile {filename {default_value ""}} { + set result $default_value + catch { + set f [open $filename] + set result [read -nonewline $f] + close $f + } + return $result +} + +# @writefile filename value +# +# Creates the given file containing '$value'. +# Does not add an extra newline. +# +proc writefile {filename value} { + set f [open $filename w] + puts -nonewline $f $value + close $f +} + +proc quote-if-needed {str} { + if {[string match {*[\" ]*} $str]} { + return \"[string map [list \" \\" \\ \\\\] $str]\" + } + return $str +} + +proc quote-argv {argv} { + set args {} + foreach arg $argv { + lappend args [quote-if-needed $arg] + } + join $args +} + +# @list-non-empty list +# +# Returns a copy of the given list with empty elements removed +proc list-non-empty {list} { + set result {} + foreach p $list { + if {$p ne ""} { + lappend result $p + } + } + return $result +} + +# @section Paths, Searching + +# @find-executable-path name +# +# Searches the path for an executable with the given name. +# Note that the name may include some parameters, e.g. 'cc -mbig-endian', +# in which case the parameters are ignored. +# Returns the full path to the executable if found, or "" if not found. +# +proc find-executable-path {name} { + # Ignore any parameters + set name [lindex $name 0] + # The empty string is never a valid executable + if {$name ne ""} { + foreach p [split-path] { + dputs "Looking for $name in $p" + set exec [file join $p $name] + if {[file-isexec $exec]} { + dputs "Found $name -> $exec" + return $exec + } + } + } + return {} +} + +# @find-executable name +# +# Searches the path for an executable with the given name. +# Note that the name may include some parameters, e.g. 'cc -mbig-endian', +# in which case the parameters are ignored. +# Returns 1 if found, or 0 if not. +# +proc find-executable {name} { + if {[find-executable-path $name] eq {}} { + return 0 + } + return 1 +} + +# @find-an-executable ?-required? name ... +# +# Given a list of possible executable names, +# searches for one of these on the path. +# +# Returns the name found, or "" if none found. +# If the first parameter is '-required', an error is generated +# if no executable is found. +# +proc find-an-executable {args} { + set required 0 + if {[lindex $args 0] eq "-required"} { + set args [lrange $args 1 end] + incr required + } + foreach name $args { + if {[find-executable $name]} { + return $name + } + } + if {$required} { + if {[llength $args] == 1} { + user-error "failed to find: [join $args]" + } else { + user-error "failed to find one of: [join $args]" + } + } + return "" +} + +# @section Logging, Messages and Errors + +# @configlog msg +# +# Writes the given message to the configuration log, 'config.log'. +# +proc configlog {msg} { + if {![info exists ::autosetup(logfh)]} { + set ::autosetup(logfh) [open config.log w] + } + puts $::autosetup(logfh) $msg +} + +# @msg-checking msg +# +# Writes the message with no newline to stdout. +# +proc msg-checking {msg} { + if {$::autosetup(msg-quiet) == 0} { + maybe-show-timestamp + puts -nonewline $msg + set ::autosetup(msg-checking) 1 + } +} + +# @msg-result msg +# +# Writes the message to stdout. +# +proc msg-result {msg} { + if {$::autosetup(msg-quiet) == 0} { + maybe-show-timestamp + puts $msg + set ::autosetup(msg-checking) 0 + show-notices + } +} + +# @msg-quiet command ... +# +# 'msg-quiet' evaluates it's arguments as a command with output +# from 'msg-checking' and 'msg-result' suppressed. +# +# This is useful if a check needs to run a subcheck which isn't +# of interest to the user. +proc msg-quiet {args} { + incr ::autosetup(msg-quiet) + set rc [uplevel 1 $args] + incr ::autosetup(msg-quiet) -1 + return $rc +} + +# Will be overridden by 'use misc' +proc error-stacktrace {msg} { + return $msg +} + +proc error-location {msg} { + return $msg +} + +################################################################## +# +# Debugging output +# +proc dputs {msg} { + if {$::autosetup(debug)} { + puts $msg + } +} + +################################################################## +# +# User and system warnings and errors +# +# Usage errors such as wrong command line options + +# @user-error msg +# +# Indicate incorrect usage to the user, including if required components +# or features are not found. +# 'autosetup' exits with a non-zero return code. +# +proc user-error {msg} { + show-notices + puts stderr "Error: $msg" + puts stderr "Try: '[file tail $::autosetup(exe)] --help' for options" + exit 1 +} + +# @user-notice msg +# +# Output the given message to stderr. +# +proc user-notice {msg} { + lappend ::autosetup(notices) $msg +} + +# Incorrect usage in the auto.def file. Identify the location. +proc autosetup-error {msg} { + autosetup-full-error [error-location $msg] +} + +# Like autosetup-error, except $msg is the full error message. +proc autosetup-full-error {msg} { + show-notices + puts stderr $msg + exit 1 +} + +proc show-notices {} { + if {$::autosetup(msg-checking)} { + puts "" + set ::autosetup(msg-checking) 0 + } + flush stdout + if {[info exists ::autosetup(notices)]} { + puts stderr [join $::autosetup(notices) \n] + unset ::autosetup(notices) + } +} + +proc maybe-show-timestamp {} { + if {$::autosetup(msg-timing) && $::autosetup(msg-checking) == 0} { + puts -nonewline [format {[%6.2f] } [expr {([clock millis] - $::autosetup(start)) % 10000 / 1000.0}]] + } +} + +# @autosetup-require-version required +# +# Checks the current version of 'autosetup' against '$required'. +# A fatal error is generated if the current version is less than that required. +# +proc autosetup-require-version {required} { + if {[compare-versions $::autosetup(version) $required] < 0} { + user-error "autosetup version $required is required, but this is $::autosetup(version)" + } +} + +proc autosetup_version {} { + return "autosetup v$::autosetup(version)" +} + +################################################################## +# +# Directory/path handling +# + +proc realdir {dir} { + set oldpwd [pwd] + cd $dir + set pwd [pwd] + cd $oldpwd + return $pwd +} + +# Follow symlinks until we get to something which is not a symlink +proc realpath {path} { + while {1} { + if {[catch { + set path [file readlink $path] + }]} { + # Not a link + break + } + } + return $path +} + +# Convert absolute path, $path into a path relative +# to the given directory (or the current dir, if not given). +# +proc relative-path {path {pwd {}}} { + set diff 0 + set same 0 + set newf {} + set prefix {} + set path [file-normalize $path] + if {$pwd eq ""} { + set pwd [pwd] + } else { + set pwd [file-normalize $pwd] + } + + if {$path eq $pwd} { + return . + } + + # Try to make the filename relative to the current dir + foreach p [split $pwd /] f [split $path /] { + if {$p ne $f} { + incr diff + } elseif {!$diff} { + incr same + } + if {$diff} { + if {$p ne ""} { + # Add .. for sibling or parent dir + lappend prefix .. + } + if {$f ne ""} { + lappend newf $f + } + } + } + if {$same == 1 || [llength $prefix] > 3} { + return $path + } + + file join [join $prefix /] [join $newf /] +} + +# Add filename as a dependency to rerun autosetup +# The name will be normalised (converted to a full path) +# +proc autosetup_add_dep {filename} { + lappend ::autosetup(deps) [file-normalize $filename] +} + +# @section Modules Support + +################################################################## +# +# Library module support +# + +# @use module ... +# +# Load the given library modules. +# e.g. 'use cc cc-shared' +# +# Note that module 'X' is implemented in either 'autosetup/X.tcl' +# or 'autosetup/X/init.tcl' +# +# The latter form is useful for a complex module which requires additional +# support file. In this form, '$::usedir' is set to the module directory +# when it is loaded. +# +proc use {args} { + global autosetup libmodule modsource + + set dirs [list $autosetup(libdir)] + if {[info exists autosetup(srcdir)]} { + lappend dirs $autosetup(srcdir)/autosetup + } + foreach m $args { + if {[info exists libmodule($m)]} { + continue + } + set libmodule($m) 1 + + if {[info exists modsource(${m}.tcl)]} { + autosetup_load_module $m eval $modsource(${m}.tcl) + } else { + set locs [list ${m}.tcl ${m}/init.tcl] + set found 0 + foreach dir $dirs { + foreach loc $locs { + set source $dir/$loc + if {[file exists $source]} { + incr found + break + } + } + if {$found} { + break + } + } + if {$found} { + # For the convenience of the "use" source, point to the directory + # it is being loaded from + set ::usedir [file dirname $source] + autosetup_load_module $m source $source + autosetup_add_dep $source + } else { + autosetup-error "use: No such module: $m" + } + } + } +} + +proc autosetup_load_auto_modules {} { + global autosetup modsource + # First load any embedded auto modules + foreach mod [array names modsource *.auto] { + autosetup_load_module $mod eval $modsource($mod) + } + # Now any external auto modules + foreach file [glob -nocomplain $autosetup(libdir)/*.auto $autosetup(libdir)/*/*.auto] { + autosetup_load_module [file tail $file] source $file + } +} + +# Load module source in the global scope by executing the given command +proc autosetup_load_module {module args} { + global autosetup + set prev $autosetup(module) + set autosetup(module) $module + + if {[catch [list uplevel #0 $args] msg opts] ni {0 2 3}} { + autosetup-full-error [error-dump $msg $opts $::autosetup(debug)] + } + set autosetup(module) $prev +} + +# Initial settings +set autosetup(exe) $::argv0 +set autosetup(istcl) 1 +set autosetup(start) [clock millis] +set autosetup(installed) 0 +set autosetup(sysinstall) 0 +set autosetup(msg-checking) 0 +set autosetup(msg-quiet) 0 +set autosetup(inittypes) {} +set autosetup(module) autosetup + +# Embedded modules are inserted below here +set autosetup(installed) 1 +set autosetup(sysinstall) 0 +# ----- @module asciidoc-formatting.tcl ----- + +set modsource(asciidoc-formatting.tcl) { +# Copyright (c) 2010 WorkWare Systems http://www.workware.net.au/ +# All rights reserved + +# Module which provides text formatting +# asciidoc format + +use formatting + +proc para {text} { + regsub -all "\[ \t\n\]+" [string trim $text] " " +} +proc title {text} { + underline [para $text] = + nl +} +proc p {text} { + puts [para $text] + nl +} +proc code {text} { + foreach line [parse_code_block $text] { + puts " $line" + } + nl +} +proc codelines {lines} { + foreach line $lines { + puts " $line" + } + nl +} +proc nl {} { + puts "" +} +proc underline {text char} { + regexp "^(\[ \t\]*)(.*)" $text -> indent words + puts $text + puts $indent[string repeat $char [string length $words]] +} +proc section {text} { + underline "[para $text]" - + nl +} +proc subsection {text} { + underline "$text" ~ + nl +} +proc bullet {text} { + puts "* [para $text]" +} +proc indent {text} { + puts " :: " + puts [para $text] +} +proc defn {first args} { + set sep "" + if {$first ne ""} { + puts "${first}::" + } else { + puts " :: " + } + set defn [string trim [join $args \n]] + regsub -all "\n\n" $defn "\n ::\n" defn + puts $defn +} +} + +# ----- @module formatting.tcl ----- + +set modsource(formatting.tcl) { +# Copyright (c) 2010 WorkWare Systems http://www.workware.net.au/ +# All rights reserved + +# Module which provides common text formatting + +# This is designed for documentation which looks like: +# code {...} +# or +# code { +# ... +# ... +# } +# In the second case, we need to work out the indenting +# and strip it from all lines but preserve the remaining indenting. +# Note that all lines need to be indented with the same initial +# spaces/tabs. +# +# Returns a list of lines with the indenting removed. +# +proc parse_code_block {text} { + # If the text begins with newline, take the following text, + # otherwise just return the original + if {![regexp "^\n(.*)" $text -> text]} { + return [list [string trim $text]] + } + + # And trip spaces off the end + set text [string trimright $text] + + set min 100 + # Examine each line to determine the minimum indent + foreach line [split $text \n] { + if {$line eq ""} { + # Ignore empty lines for the indent calculation + continue + } + regexp "^(\[ \t\]*)" $line -> indent + set len [string length $indent] + if {$len < $min} { + set min $len + } + } + + # Now make a list of lines with this indent removed + set lines {} + foreach line [split $text \n] { + lappend lines [string range $line $min end] + } + + # Return the result + return $lines +} +} + +# ----- @module getopt.tcl ----- + +set modsource(getopt.tcl) { +# Copyright (c) 2006 WorkWare Systems http://www.workware.net.au/ +# All rights reserved + +# Simple getopt module + +# Parse everything out of the argv list which looks like an option +# Everything which doesn't look like an option, or is after --, is left unchanged +# Understands --enable-xxx as a synonym for --xxx to enable the boolean option xxx. +# Understands --disable-xxx to disable the boolean option xxx. +# +# The returned value is a dictionary keyed by option name +# Each value is a list of {type value} ... where type is "bool" or "str". +# The value for a boolean option is 0 or 1. The value of a string option is the value given. +proc getopt {argvname} { + upvar $argvname argv + set nargv {} + + set opts {} + + for {set i 0} {$i < [llength $argv]} {incr i} { + set arg [lindex $argv $i] + + #dputs arg=$arg + + if {$arg eq "--"} { + # End of options + incr i + lappend nargv {*}[lrange $argv $i end] + break + } + + if {[regexp {^--([^=][^=]+)=(.*)$} $arg -> name value]} { + # --name=value + dict lappend opts $name [list str $value] + } elseif {[regexp {^--(enable-|disable-)?([^=]*)$} $arg -> prefix name]} { + if {$prefix in {enable- ""}} { + set value 1 + } else { + set value 0 + } + dict lappend opts $name [list bool $value] + } else { + lappend nargv $arg + } + } + + #puts "getopt: argv=[join $argv] => [join $nargv]" + #array set getopt $opts + #parray getopt + + set argv $nargv + + return $opts +} +} + +# ----- @module help.tcl ----- + +set modsource(help.tcl) { +# Copyright (c) 2010 WorkWare Systems http://workware.net.au/ +# All rights reserved + +# Module which provides usage, help and the command reference + +proc autosetup_help {what} { + use_pager + + puts "Usage: [file tail $::autosetup(exe)] \[options\] \[settings\]\n" + puts "This is [autosetup_version], a build environment \"autoconfigurator\"" + puts "See the documentation online at https://msteveb.github.io/autosetup/\n" + + if {$what in {all local}} { + # Need to load auto.def now + if {[file exists $::autosetup(autodef)]} { + # Load auto.def as module "auto.def" + autosetup_load_module auto.def source $::autosetup(autodef) + } + if {$what eq "all"} { + set what * + } else { + set what auto.def + } + } else { + use $what + puts "Options for module $what:" + } + options-show $what + exit 0 +} + +proc autosetup_show_license {} { + global modsource autosetup + use_pager + + if {[info exists modsource(LICENSE)]} { + puts $modsource(LICENSE) + return + } + foreach dir [list $autosetup(libdir) $autosetup(srcdir)] { + set path [file join $dir LICENSE] + if {[file exists $path]} { + puts [readfile $path] + return + } + } + puts "LICENSE not found" +} + +# If not already paged and stdout is a tty, pipe the output through the pager +# This is done by reinvoking autosetup with --nopager added +proc use_pager {} { + if {![opt-bool nopager] && [getenv PAGER ""] ne "" && [isatty? stdin] && [isatty? stdout]} { + if {[catch { + exec [info nameofexecutable] $::argv0 --nopager {*}$::argv |& {*}[getenv PAGER] >@stdout <@stdin 2>@stderr + } msg opts] == 1} { + if {[dict get $opts -errorcode] eq "NONE"} { + # an internal/exec error + puts stderr $msg + exit 1 + } + } + exit 0 + } +} + +# Outputs the autosetup references in one of several formats +proc autosetup_reference {{type text}} { + + use_pager + + switch -glob -- $type { + wiki {use wiki-formatting} + ascii* {use asciidoc-formatting} + md - markdown {use markdown-formatting} + default {use text-formatting} + } + + title "[autosetup_version] -- Command Reference" + + section {Introduction} + + p { + See https://msteveb.github.io/autosetup/ for the online documentation for 'autosetup'. + This documentation can also be accessed locally with `autosetup --ref`. + } + + p { + 'autosetup' provides a number of built-in commands which + are documented below. These may be used from 'auto.def' to test + for features, define variables, create files from templates and + other similar actions. + } + + automf_command_reference + + exit 0 +} + +proc autosetup_output_block {type lines} { + if {[llength $lines]} { + switch $type { + section { + section $lines + } + subsection { + subsection $lines + } + code { + codelines $lines + } + p { + p [join $lines] + } + list { + foreach line $lines { + bullet $line + } + nl + } + } + } +} + +# Generate a command reference from inline documentation +proc automf_command_reference {} { + lappend files $::autosetup(prog) + lappend files {*}[lsort [glob -nocomplain $::autosetup(libdir)/{*/*.tcl,*.tcl}]] + + # We want to process all non-module files before module files + # and then modules in alphabetical order. + # So examine all files and extract docs into doc($modulename) and doc(_core_) + # + # Each entry is a list of {type data} where $type is one of: section, subsection, code, list, p + # and $data is a string for section, subsection or a list of text lines for other types. + + # XXX: Should commands be in alphabetical order too? Currently they are in file order. + + set doc(_core_) {} + lappend doc(_core_) [list section "Core Commands"] + + foreach file $files { + set modulename [file rootname [file tail $file]] + set current _core_ + set f [open $file] + while {![eof $f]} { + set line [gets $f] + + if {[regexp {^#.*@section (.*)$} $line -> section]} { + lappend doc($current) [list section $section] + continue + } + + # Find embedded module names + if {[regexp {^#.*@module ([^ ]*)} $line -> modulename]} { + continue + } + + # Find lines starting with "# @*" and continuing through the remaining comment lines + if {![regexp {^# @(.*)} $line -> cmd]} { + continue + } + + # Synopsis or command? + if {$cmd eq "synopsis:"} { + set current $modulename + lappend doc($current) [list section "Module: $modulename"] + } else { + lappend doc($current) [list subsection $cmd] + } + + set lines {} + set type p + + # Now the description + while {![eof $f]} { + set line [gets $f] + + if {![regexp {^#(#)? ?(.*)} $line -> hash cmd]} { + break + } + if {$hash eq "#"} { + set t code + } elseif {[regexp {^- (.*)} $cmd -> cmd]} { + set t list + } else { + set t p + } + + #puts "hash=$hash, oldhash=$oldhash, lines=[llength $lines], cmd=$cmd" + + if {$t ne $type || $cmd eq ""} { + # Finish the current block + lappend doc($current) [list $type $lines] + set lines {} + set type $t + } + if {$cmd ne ""} { + lappend lines $cmd + } + } + + lappend doc($current) [list $type $lines] + } + close $f + } + + # Now format and output the results + + # _core_ will sort first + foreach module [lsort [array names doc]] { + foreach item $doc($module) { + autosetup_output_block {*}$item + } + } +} +} + +# ----- @module init.tcl ----- + +set modsource(init.tcl) { +# Copyright (c) 2010 WorkWare Systems http://www.workware.net.au/ +# All rights reserved + +# Module to help create auto.def and configure + +proc autosetup_init {type} { + set help 0 + if {$type in {? help}} { + incr help + } elseif {![dict exists $::autosetup(inittypes) $type]} { + puts "Unknown type, --init=$type" + incr help + } + if {$help} { + puts "Use one of the following types (e.g. --init=make)\n" + foreach type [lsort [dict keys $::autosetup(inittypes)]] { + lassign [dict get $::autosetup(inittypes) $type] desc + # XXX: Use the options-show code to wrap the description + puts [format "%-10s %s" $type $desc] + } + return + } + lassign [dict get $::autosetup(inittypes) $type] desc script + + puts "Initialising $type: $desc\n" + + # All initialisations happens in the top level srcdir + cd $::autosetup(srcdir) + + uplevel #0 $script +} + +proc autosetup_add_init_type {type desc script} { + dict set ::autosetup(inittypes) $type [list $desc $script] +} + +# This is for in creating build-system init scripts +# +# If the file doesn't exist, create it containing $contents +# If the file does exist, only overwrite if --force is specified. +# +proc autosetup_check_create {filename contents} { + if {[file exists $filename]} { + if {!$::autosetup(force)} { + puts "I see $filename already exists." + return + } else { + puts "I will overwrite the existing $filename because you used --force." + } + } else { + puts "I don't see $filename, so I will create it." + } + writefile $filename $contents +} +} + +# ----- @module install.tcl ----- + +set modsource(install.tcl) { +# Copyright (c) 2006-2010 WorkWare Systems http://www.workware.net.au/ +# All rights reserved + +# Module which can install autosetup + +# autosetup(installed)=1 means that autosetup is not running from source +# autosetup(sysinstall)=1 means that autosetup is running from a sysinstall version +# shared=1 means that we are trying to do a sysinstall. This is only possible from the development source. + +proc autosetup_install {dir {shared 0}} { + global autosetup + if {$shared} { + if {$autosetup(installed) || $autosetup(sysinstall)} { + user-error "Can only --sysinstall from development sources" + } + } elseif {$autosetup(installed) && !$autosetup(sysinstall)} { + user-error "Can't --install from project install" + } + + if {$autosetup(sysinstall)} { + # This is the sysinstall version, so install just uses references + cd $dir + + puts "[autosetup_version] creating configure to use system-installed autosetup" + autosetup_create_configure 1 + puts "Creating autosetup/README.autosetup" + file mkdir autosetup + autosetup_install_readme autosetup/README.autosetup 1 + return + } + + if {[catch { + if {$shared} { + set target $dir/bin/autosetup + set installedas $target + } else { + if {$dir eq "."} { + set installedas autosetup + } else { + set installedas $dir/autosetup + } + cd $dir + file mkdir autosetup + set target autosetup/autosetup + } + set targetdir [file dirname $target] + file mkdir $targetdir + + set f [open $target w] + + set publicmodules {} + + # First the main script, but only up until "CUT HERE" + set in [open $autosetup(dir)/autosetup] + while {[gets $in buf] >= 0} { + if {$buf ne "##-- CUT HERE --##"} { + puts $f $buf + continue + } + + # Insert the static modules here + # i.e. those which don't contain @synopsis: + # All modules are inserted if $shared is set + puts $f "set autosetup(installed) 1" + puts $f "set autosetup(sysinstall) $shared" + foreach file [lsort [glob $autosetup(libdir)/*.{tcl,auto}]] { + set modname [file tail $file] + set ext [file ext $modname] + set buf [readfile $file] + if {!$shared} { + if {$ext eq ".auto" || [string match "*\n# @synopsis:*" $buf]} { + lappend publicmodules $file + continue + } + } + dputs "install: importing lib/[file tail $file]" + puts $f "# ----- @module $modname -----" + puts $f "\nset modsource($modname) \{" + puts $f $buf + puts $f "\}\n" + } + if {$shared} { + foreach {srcname destname} [list $autosetup(libdir)/README.autosetup-lib README.autosetup \ + $autosetup(srcdir)/LICENSE LICENSE] { + dputs "install: importing $srcname as $destname" + puts $f "\nset modsource($destname) \\\n[list [readfile $srcname]\n]\n" + } + } + } + close $in + close $f + catch {exec chmod 755 $target} + + set installfiles {autosetup-config.guess autosetup-config.sub autosetup-test-tclsh} + set removefiles {} + + if {!$shared} { + autosetup_install_readme $targetdir/README.autosetup 0 + + # Install public modules + foreach file $publicmodules { + set tail [file tail $file] + autosetup_install_file $file $targetdir/$tail + } + lappend installfiles jimsh0.c autosetup-find-tclsh LICENSE + lappend removefiles config.guess config.sub test-tclsh find-tclsh + } else { + lappend installfiles {sys-find-tclsh autosetup-find-tclsh} + } + + # Install support files + foreach fileinfo $installfiles { + if {[llength $fileinfo] == 2} { + lassign $fileinfo source dest + } else { + lassign $fileinfo source + set dest $source + } + autosetup_install_file $autosetup(dir)/$source $targetdir/$dest + } + + # Remove obsolete files + foreach file $removefiles { + if {[file exists $targetdir/$file]} { + file delete $targetdir/$file + } + } + } error]} { + user-error "Failed to install autosetup: $error" + } + if {$shared} { + set type "system" + } else { + set type "local" + } + puts "Installed $type [autosetup_version] to $installedas" + + if {!$shared} { + # Now create 'configure' if necessary + autosetup_create_configure 0 + } +} + +proc autosetup_create_configure {shared} { + if {[file exists configure]} { + if {!$::autosetup(force)} { + # Could this be an autosetup configure? + if {![string match "*\nWRAPPER=*" [readfile configure]]} { + puts "I see configure, but not created by autosetup, so I won't overwrite it." + puts "Remove it or use --force to overwrite." + return + } + } else { + puts "I will overwrite the existing configure because you used --force." + } + } else { + puts "I don't see configure, so I will create it." + } + if {$shared} { + writefile configure \ +{#!/bin/sh +WRAPPER="$0"; export WRAPPER; "autosetup" "$@" +} + } else { + writefile configure \ +{#!/bin/sh +dir="`dirname "$0"`/autosetup" +#@@INITCHECK@@# +WRAPPER="$0"; export WRAPPER; exec "`"$dir/autosetup-find-tclsh"`" "$dir/autosetup" "$@" +} + } + catch {exec chmod 755 configure} +} + +# Append the contents of $file to filehandle $f +proc autosetup_install_append {f file} { + dputs "install: include $file" + set in [open $file] + puts $f [read $in] + close $in +} + +proc autosetup_install_file {source target} { + dputs "install: $source => $target" + if {![file exists $source]} { + error "Missing installation file '$source'" + } + writefile $target [readfile $source]\n + # If possible, copy the file mode + file stat $source stat + set mode [format %o [expr {$stat(mode) & 0x1ff}]] + catch {exec chmod $mode $target} +} + +proc autosetup_install_readme {target sysinstall} { + set readme "README.autosetup created by [autosetup_version]\n\n" + if {$sysinstall} { + append readme \ +{This is the autosetup directory for a system install of autosetup. +Loadable modules can be added here. +} + } else { + append readme \ +{This is the autosetup directory for a local install of autosetup. +It contains autosetup, support files and loadable modules. +} +} + + append readme { +*.tcl files in this directory are optional modules which +can be loaded with the 'use' directive. + +*.auto files in this directory are auto-loaded. + +For more information, see https://msteveb.github.io/autosetup/ +} + dputs "install: autosetup/README.autosetup" + writefile $target $readme +} +} + +# ----- @module markdown-formatting.tcl ----- + +set modsource(markdown-formatting.tcl) { +# Copyright (c) 2010 WorkWare Systems http://www.workware.net.au/ +# All rights reserved + +# Module which provides text formatting +# markdown format (kramdown syntax) + +use formatting + +proc para {text} { + regsub -all "\[ \t\n\]+" [string trim $text] " " text + regsub -all {([^a-zA-Z])'([^']*)'} $text {\1**`\2`**} text + regsub -all {^'([^']*)'} $text {**`\1`**} text + regsub -all {(http[^ \t\n]*)} $text {[\1](\1)} text + return $text +} +proc title {text} { + underline [para $text] = + nl +} +proc p {text} { + puts [para $text] + nl +} +proc codelines {lines} { + puts "~~~~~~~~~~~~" + foreach line $lines { + puts $line + } + puts "~~~~~~~~~~~~" + nl +} +proc code {text} { + puts "~~~~~~~~~~~~" + foreach line [parse_code_block $text] { + puts $line + } + puts "~~~~~~~~~~~~" + nl +} +proc nl {} { + puts "" +} +proc underline {text char} { + regexp "^(\[ \t\]*)(.*)" $text -> indent words + puts $text + puts $indent[string repeat $char [string length $words]] +} +proc section {text} { + underline "[para $text]" - + nl +} +proc subsection {text} { + puts "### `$text`" + nl +} +proc bullet {text} { + puts "* [para $text]" +} +proc defn {first args} { + puts "^" + set defn [string trim [join $args \n]] + if {$first ne ""} { + puts "**${first}**" + puts -nonewline ": " + regsub -all "\n\n" $defn "\n: " defn + } + puts "$defn" +} +} + +# ----- @module misc.tcl ----- + +set modsource(misc.tcl) { +# Copyright (c) 2007-2010 WorkWare Systems http://www.workware.net.au/ +# All rights reserved + +# Module containing misc procs useful to modules +# Largely for platform compatibility + +set autosetup(istcl) [info exists ::tcl_library] +set autosetup(iswin) [string equal windows $tcl_platform(platform)] + +if {$autosetup(iswin)} { + # mingw/windows separates $PATH with semicolons + # and doesn't have an executable bit + proc split-path {} { + split [getenv PATH .] {;} + } + proc file-isexec {exec} { + # Basic test for windows. We ignore .bat + if {[file isfile $exec] || [file isfile $exec.exe]} { + return 1 + } + return 0 + } +} else { + # unix separates $PATH with colons and has and executable bit + proc split-path {} { + split [getenv PATH .] : + } + # Check for an executable file + proc file-isexec {exec} { + if {[file executable $exec] && [file isfile $exec]} { + return 1 + } + return 0 + } +} + +# Assume that exec can return stdout and stderr +proc exec-with-stderr {args} { + exec {*}$args 2>@1 +} + +if {$autosetup(istcl)} { + # Tcl doesn't have the env command + proc getenv {name args} { + if {[info exists ::env($name)]} { + return $::env($name) + } + if {[llength $args]} { + return [lindex $args 0] + } + return -code error "environment variable \"$name\" does not exist" + } + proc isatty? {channel} { + dict exists [fconfigure $channel] -xchar + } + # Jim-compatible stacktrace using info frame + proc stacktrace {} { + set stacktrace {} + # 2 to skip the current frame + for {set i 2} {$i < [info frame]} {incr i} { + set frame [info frame -$i] + if {[dict exists $frame file]} { + # We don't need proc, so use "" + lappend stacktrace "" [dict get $frame file] [dict get $frame line] "" + } + } + return $stacktrace + } +} else { + if {$autosetup(iswin)} { + # On Windows, backslash convert all environment variables + # (Assume that Tcl does this for us) + proc getenv {name args} { + string map {\\ /} [env $name {*}$args] + } + } else { + # Jim on unix is simple + alias getenv env + } + proc isatty? {channel} { + set tty 0 + catch { + # isatty is a recent addition to Jim Tcl + set tty [$channel isatty] + } + return $tty + } +} + +# In case 'file normalize' doesn't exist +# +proc file-normalize {path} { + if {[catch {file normalize $path} result]} { + if {$path eq ""} { + return "" + } + set oldpwd [pwd] + if {[file isdir $path]} { + cd $path + set result [pwd] + } else { + cd [file dirname $path] + set result [file join [pwd] [file tail $path]] + } + cd $oldpwd + } + return $result +} + +# If everything is working properly, the only errors which occur +# should be generated in user code (e.g. auto.def). +# By default, we only want to show the error location in user code. +# We use [info frame] to achieve this, but it works differently on Tcl and Jim. +# +# This is designed to be called for incorrect usage in auto.def, via autosetup-error +# +proc error-location {msg} { + if {$::autosetup(debug)} { + return -code error $msg + } + set vars {p f l cmd} + if {!$::autosetup(istcl) && ![dict exists $::tcl_platform stackFormat]} { + # Older versions of Jim had a 3 element stacktrace + set vars {p f l} + } + foreach $vars [stacktrace] { + if {[string match *.def $f]} { + return "[relative-path $f]:$l: Error: $msg" + } + #puts "Skipping $f:$l" + } + return $msg +} + +# If everything is working properly, the only errors which occur +# should be generated in user code (e.g. auto.def). +# By default, we only want to show the error location in user code. +# We use [info frame] to achieve this, but it works differently on Tcl and Jim. +# +# This is designed to be called for incorrect usage in auto.def, via autosetup-error +# +proc error-stacktrace {msg} { + if {$::autosetup(debug)} { + return -code error $msg + } + # Search back through the stack trace for the first error in a .def file + for {set i 1} {$i < [info level]} {incr i} { + if {$::autosetup(istcl)} { + array set info [info frame -$i] + } else { + lassign [info frame -$i] info(caller) info(file) info(line) + } + if {[string match *.def $info(file)]} { + return "[relative-path $info(file)]:$info(line): Error: $msg" + } + #puts "Skipping $info(file):$info(line)" + } + return $msg +} + +# Given the return from [catch {...} msg opts], returns an appropriate +# error message. A nice one for Jim and a less-nice one for Tcl. +# If 'fulltrace' is set, a full stack trace is provided. +# Otherwise a simple message is provided. +# +# This is designed for developer errors, e.g. in module code or auto.def code +# +# +proc error-dump {msg opts fulltrace} { + if {$::autosetup(istcl)} { + if {$fulltrace} { + return "Error: [dict get $opts -errorinfo]" + } else { + return "Error: $msg" + } + } else { + lassign $opts(-errorinfo) p f l + if {$f ne ""} { + set result "$f:$l: Error: " + } + append result "$msg\n" + if {$fulltrace} { + append result [stackdump $opts(-errorinfo)] + } + + # Remove the trailing newline + string trim $result + } +} +} + +# ----- @module text-formatting.tcl ----- + +set modsource(text-formatting.tcl) { +# Copyright (c) 2010 WorkWare Systems http://www.workware.net.au/ +# All rights reserved + +# Module which provides text formatting + +use formatting + +proc wordwrap {text length {firstprefix ""} {nextprefix ""}} { + set len 0 + set space $firstprefix + + foreach word [split $text] { + set word [string trim $word] + if {$word eq ""} { + continue + } + if {[info exists partial]} { + append partial " " $word + if {[string first $quote $word] < 0} { + # Haven't found end of quoted word + continue + } + # Finished quoted word + set word $partial + unset partial + unset quote + } else { + set quote [string index $word 0] + if {$quote in {' *}} { + if {[string first $quote $word 1] < 0} { + # Haven't found end of quoted word + # Not a whole word. + set first [string index $word 0] + # Start of quoted word + set partial $word + continue + } + } + } + + if {$len && [string length $space$word] + $len >= $length} { + puts "" + set len 0 + set space $nextprefix + } + incr len [string length $space$word] + + # Use man-page conventions for highlighting 'quoted' and *quoted* + # single words. + # Use x^Hx for *bold* and _^Hx for 'underline'. + # + # less and more will both understand this. + # Pipe through 'col -b' to remove them. + if {[regexp {^'(.*)'(.*)} $word -> quoted after]} { + set quoted [string map {~ " "} $quoted] + regsub -all . $quoted "&\b&" quoted + set word $quoted$after + } elseif {[regexp {^[*](.*)[*](.*)} $word -> quoted after]} { + set quoted [string map {~ " "} $quoted] + regsub -all . $quoted "_\b&" quoted + set word $quoted$after + } + puts -nonewline $space$word + set space " " + } + if {[info exists partial]} { + # Missing end of quote + puts -nonewline $space$partial + } + if {$len} { + puts "" + } +} +proc title {text} { + underline [string trim $text] = + nl +} +proc p {text} { + wordwrap $text 80 + nl +} +proc codelines {lines} { + foreach line $lines { + puts " $line" + } + nl +} +proc nl {} { + puts "" +} +proc underline {text char} { + regexp "^(\[ \t\]*)(.*)" $text -> indent words + puts $text + puts $indent[string repeat $char [string length $words]] +} +proc section {text} { + underline "[string trim $text]" - + nl +} +proc subsection {text} { + underline "$text" ~ + nl +} +proc bullet {text} { + wordwrap $text 76 " * " " " +} +proc indent {text} { + wordwrap $text 76 " " " " +} +proc defn {first args} { + if {$first ne ""} { + underline " $first" ~ + } + foreach p $args { + if {$p ne ""} { + indent $p + } + } +} +} + +# ----- @module util.tcl ----- + +set modsource(util.tcl) { +# Copyright (c) 2012 WorkWare Systems http://www.workware.net.au/ +# All rights reserved + +# Module which contains miscellaneous utility functions + +# @section Utilities + +# @compare-versions version1 version2 +# +# Versions are of the form 'a.b.c' (may be any number of numeric components) +# +# Compares the two versions and returns: +## -1 if v1 < v2 +## 0 if v1 == v2 +## 1 if v1 > v2 +# +# If one version has fewer components than the other, 0 is substituted to the right. e.g. +## 0.2 < 0.3 +## 0.2.5 > 0.2 +## 1.1 == 1.1.0 +# +proc compare-versions {v1 v2} { + foreach c1 [split $v1 .] c2 [split $v2 .] { + if {$c1 eq ""} { + set c1 0 + } + if {$c2 eq ""} { + set c2 0 + } + if {$c1 < $c2} { + return -1 + } + if {$c1 > $c2} { + return 1 + } + } + return 0 +} + +# @suffix suf list +# +# Takes a list and returns a new list with '$suf' appended +# to each element +# +## suffix .c {a b c} => {a.c b.c c.c} +# +proc suffix {suf list} { + set result {} + foreach p $list { + lappend result $p$suf + } + return $result +} + +# @prefix pre list +# +# Takes a list and returns a new list with '$pre' prepended +# to each element +# +## prefix jim- {a.c b.c} => {jim-a.c jim-b.c} +# +proc prefix {pre list} { + set result {} + foreach p $list { + lappend result $pre$p + } + return $result +} + +# @lpop list +# +# Removes the last entry from the given list and returns it. +proc lpop {listname} { + upvar $listname list + set val [lindex $list end] + set list [lrange $list 0 end-1] + return $val +} +} + +# ----- @module wiki-formatting.tcl ----- + +set modsource(wiki-formatting.tcl) { +# Copyright (c) 2010 WorkWare Systems http://www.workware.net.au/ +# All rights reserved + +# Module which provides text formatting +# wiki.tcl.tk format output + +use formatting + +proc joinlines {text} { + set lines {} + foreach l [split [string trim $text] \n] { + lappend lines [string trim $l] + } + join $lines +} +proc p {text} { + puts [joinlines $text] + puts "" +} +proc title {text} { + puts "*** [joinlines $text] ***" + puts "" +} +proc codelines {lines} { + puts "======" + foreach line $lines { + puts " $line" + } + puts "======" +} +proc code {text} { + puts "======" + foreach line [parse_code_block $text] { + puts " $line" + } + puts "======" +} +proc nl {} { +} +proc section {text} { + puts "'''$text'''" + puts "" +} +proc subsection {text} { + puts "''$text''" + puts "" +} +proc bullet {text} { + puts " * [joinlines $text]" +} +proc indent {text} { + puts " : [joinlines $text]" +} +proc defn {first args} { + if {$first ne ""} { + indent '''$first''' + } + + foreach p $args { + p $p + } +} +} + + +################################################################## +# +# Entry/Exit +# +if {$autosetup(debug)} { + main $argv +} +if {[catch {main $argv} msg opts] == 1} { + show-notices + autosetup-full-error [error-dump $msg $opts $autosetup(debug)] + if {!$autosetup(debug)} { + puts stderr "Try: '[file tail $autosetup(exe)] --debug' for a full stack trace" + } + exit 1 +} diff --git a/config.guess b/autosetup/autosetup-config.guess old mode 100644 new mode 100755 similarity index 64% rename from config.guess rename to autosetup/autosetup-config.guess index ae713942d8..48a684601b --- a/config.guess +++ b/autosetup/autosetup-config.guess @@ -1,12 +1,14 @@ #! /bin/sh # Attempt to guess a canonical system name. -# Copyright 1992-2019 Free Software Foundation, Inc. +# Copyright 1992-2024 Free Software Foundation, Inc. -timestamp='2019-05-28' +# shellcheck disable=SC2006,SC2268 # see below for rationale + +timestamp='2024-07-27' # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 3 of the License, or +# the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but @@ -27,17 +29,25 @@ timestamp='2019-05-28' # Originally written by Per Bothner; maintained since 2000 by Ben Elliston. # # You can get the latest version of this script from: -# https://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess +# https://git.savannah.gnu.org/cgit/config.git/plain/config.guess # # Please send patches to . +# The "shellcheck disable" line above the timestamp inhibits complaints +# about features and limitations of the classic Bourne shell that were +# superseded or lifted in POSIX. However, this script identifies a wide +# variety of pre-POSIX systems that do not have POSIX shells at all, and +# even some reasonably current systems (Solaris 10 as case-in-point) still +# have a pre-POSIX /bin/sh. + + me=`echo "$0" | sed -e 's,.*/,,'` usage="\ Usage: $0 [OPTION] -Output the configuration name of the system \`$me' is run on. +Output the configuration name of the system '$me' is run on. Options: -h, --help print this help, then exit @@ -50,13 +60,13 @@ version="\ GNU config.guess ($timestamp) Originally written by Per Bothner. -Copyright 1992-2019 Free Software Foundation, Inc. +Copyright 1992-2024 Free Software Foundation, Inc. This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." help=" -Try \`$me --help' for more information." +Try '$me --help' for more information." # Parse command line while test $# -gt 0 ; do @@ -84,13 +94,16 @@ if test $# != 0; then exit 1 fi +# Just in case it came from the environment. +GUESS= + # CC_FOR_BUILD -- compiler used by this script. Note that the use of a # compiler to aid in system detection is discouraged as it requires # temporary files to be created and, as you can see below, it is a # headache to deal with in a portable fashion. -# Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still -# use `HOST_CC' if defined, but it is deprecated. +# Historically, 'CC_FOR_BUILD' used to be named 'HOST_CC'. We still +# use 'HOST_CC' if defined, but it is deprecated. # Portable tmp directory creation inspired by the Autoconf team. @@ -99,8 +112,10 @@ tmp= trap 'test -z "$tmp" || rm -fr "$tmp"' 0 1 2 13 15 set_cc_for_build() { + # prevent multiple calls if $tmp is already set + test "$tmp" && return 0 : "${TMPDIR=/tmp}" - # shellcheck disable=SC2039 + # shellcheck disable=SC2039,SC3028 { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } || { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir "$tmp" 2>/dev/null) ; } || { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir "$tmp" 2>/dev/null) && echo "Warning: creating insecure temp directory" >&2 ; } || @@ -108,9 +123,9 @@ set_cc_for_build() { dummy=$tmp/dummy case ${CC_FOR_BUILD-},${HOST_CC-},${CC-} in ,,) echo "int x;" > "$dummy.c" - for driver in cc gcc c89 c99 ; do + for driver in cc gcc c17 c99 c89 ; do if ($driver -c -o "$dummy.o" "$dummy.c") >/dev/null 2>&1 ; then - CC_FOR_BUILD="$driver" + CC_FOR_BUILD=$driver break fi done @@ -131,40 +146,57 @@ fi UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown -UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown +UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown -case "$UNAME_SYSTEM" in +case $UNAME_SYSTEM in Linux|GNU|GNU/*) - # If the system lacks a compiler, then just pick glibc. - # We could probably try harder. - LIBC=gnu + LIBC=unknown set_cc_for_build cat <<-EOF > "$dummy.c" + #if defined(__ANDROID__) + LIBC=android + #else #include #if defined(__UCLIBC__) LIBC=uclibc #elif defined(__dietlibc__) LIBC=dietlibc - #else + #elif defined(__GLIBC__) LIBC=gnu + #elif defined(__LLVM_LIBC__) + LIBC=llvm + #else + #include + /* First heuristic to detect musl libc. */ + #ifdef __DEFINED_va_list + LIBC=musl + #endif + #endif #endif EOF - eval "`$CC_FOR_BUILD -E "$dummy.c" 2>/dev/null | grep '^LIBC' | sed 's, ,,g'`" + cc_set_libc=`$CC_FOR_BUILD -E "$dummy.c" 2>/dev/null | grep '^LIBC' | sed 's, ,,g'` + eval "$cc_set_libc" - # If ldd exists, use it to detect musl libc. - if command -v ldd >/dev/null && \ - ldd --version 2>&1 | grep -q ^musl - then - LIBC=musl + # Second heuristic to detect musl libc. + if [ "$LIBC" = unknown ] && + command -v ldd >/dev/null && + ldd --version 2>&1 | grep -q ^musl; then + LIBC=musl + fi + + # If the system lacks a compiler, then just pick glibc. + # We could probably try harder. + if [ "$LIBC" = unknown ]; then + LIBC=gnu fi ;; esac # Note: order is significant - the case branches are not exclusive. -case "$UNAME_MACHINE:$UNAME_SYSTEM:$UNAME_RELEASE:$UNAME_VERSION" in +case $UNAME_MACHINE:$UNAME_SYSTEM:$UNAME_RELEASE:$UNAME_VERSION in *:NetBSD:*:*) # NetBSD (nbsd) targets should (where applicable) match one or # more of the tuples: *-*-netbsdelf*, *-*-netbsdaout*, @@ -176,12 +208,12 @@ case "$UNAME_MACHINE:$UNAME_SYSTEM:$UNAME_RELEASE:$UNAME_VERSION" in # # Note: NetBSD doesn't particularly care about the vendor # portion of the name. We always set it to "unknown". - sysctl="sysctl -n hw.machine_arch" UNAME_MACHINE_ARCH=`(uname -p 2>/dev/null || \ - "/sbin/$sysctl" 2>/dev/null || \ - "/usr/sbin/$sysctl" 2>/dev/null || \ + /sbin/sysctl -n hw.machine_arch 2>/dev/null || \ + /usr/sbin/sysctl -n hw.machine_arch 2>/dev/null || \ echo unknown)` - case "$UNAME_MACHINE_ARCH" in + case $UNAME_MACHINE_ARCH in + aarch64eb) machine=aarch64_be-unknown ;; armeb) machine=armeb-unknown ;; arm*) machine=arm-unknown ;; sh3el) machine=shl-unknown ;; @@ -190,13 +222,13 @@ case "$UNAME_MACHINE:$UNAME_SYSTEM:$UNAME_RELEASE:$UNAME_VERSION" in earmv*) arch=`echo "$UNAME_MACHINE_ARCH" | sed -e 's,^e\(armv[0-9]\).*$,\1,'` endian=`echo "$UNAME_MACHINE_ARCH" | sed -ne 's,^.*\(eb\)$,\1,p'` - machine="${arch}${endian}"-unknown + machine=${arch}${endian}-unknown ;; - *) machine="$UNAME_MACHINE_ARCH"-unknown ;; + *) machine=$UNAME_MACHINE_ARCH-unknown ;; esac # The Operating System including object format, if it has switched # to ELF recently (or will in the future) and ABI. - case "$UNAME_MACHINE_ARCH" in + case $UNAME_MACHINE_ARCH in earm*) os=netbsdelf ;; @@ -217,7 +249,7 @@ case "$UNAME_MACHINE:$UNAME_SYSTEM:$UNAME_RELEASE:$UNAME_VERSION" in ;; esac # Determine ABI tags. - case "$UNAME_MACHINE_ARCH" in + case $UNAME_MACHINE_ARCH in earm*) expr='s/^earmv[0-9]/-eabi/;s/eb$//' abi=`echo "$UNAME_MACHINE_ARCH" | sed -e "$expr"` @@ -228,7 +260,7 @@ case "$UNAME_MACHINE:$UNAME_SYSTEM:$UNAME_RELEASE:$UNAME_VERSION" in # thus, need a distinct triplet. However, they do not need # kernel version information, so it can be replaced with a # suitable tag, in the style of linux-gnu. - case "$UNAME_VERSION" in + case $UNAME_VERSION in Debian*) release='-gnu' ;; @@ -239,45 +271,57 @@ case "$UNAME_MACHINE:$UNAME_SYSTEM:$UNAME_RELEASE:$UNAME_VERSION" in # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM: # contains redundant information, the shorter form: # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used. - echo "$machine-${os}${release}${abi-}" - exit ;; + GUESS=$machine-${os}${release}${abi-} + ;; *:Bitrig:*:*) UNAME_MACHINE_ARCH=`arch | sed 's/Bitrig.//'` - echo "$UNAME_MACHINE_ARCH"-unknown-bitrig"$UNAME_RELEASE" - exit ;; + GUESS=$UNAME_MACHINE_ARCH-unknown-bitrig$UNAME_RELEASE + ;; *:OpenBSD:*:*) UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'` - echo "$UNAME_MACHINE_ARCH"-unknown-openbsd"$UNAME_RELEASE" - exit ;; + GUESS=$UNAME_MACHINE_ARCH-unknown-openbsd$UNAME_RELEASE + ;; + *:SecBSD:*:*) + UNAME_MACHINE_ARCH=`arch | sed 's/SecBSD.//'` + GUESS=$UNAME_MACHINE_ARCH-unknown-secbsd$UNAME_RELEASE + ;; *:LibertyBSD:*:*) UNAME_MACHINE_ARCH=`arch | sed 's/^.*BSD\.//'` - echo "$UNAME_MACHINE_ARCH"-unknown-libertybsd"$UNAME_RELEASE" - exit ;; + GUESS=$UNAME_MACHINE_ARCH-unknown-libertybsd$UNAME_RELEASE + ;; *:MidnightBSD:*:*) - echo "$UNAME_MACHINE"-unknown-midnightbsd"$UNAME_RELEASE" - exit ;; + GUESS=$UNAME_MACHINE-unknown-midnightbsd$UNAME_RELEASE + ;; *:ekkoBSD:*:*) - echo "$UNAME_MACHINE"-unknown-ekkobsd"$UNAME_RELEASE" - exit ;; + GUESS=$UNAME_MACHINE-unknown-ekkobsd$UNAME_RELEASE + ;; *:SolidBSD:*:*) - echo "$UNAME_MACHINE"-unknown-solidbsd"$UNAME_RELEASE" - exit ;; + GUESS=$UNAME_MACHINE-unknown-solidbsd$UNAME_RELEASE + ;; + *:OS108:*:*) + GUESS=$UNAME_MACHINE-unknown-os108_$UNAME_RELEASE + ;; macppc:MirBSD:*:*) - echo powerpc-unknown-mirbsd"$UNAME_RELEASE" - exit ;; + GUESS=powerpc-unknown-mirbsd$UNAME_RELEASE + ;; *:MirBSD:*:*) - echo "$UNAME_MACHINE"-unknown-mirbsd"$UNAME_RELEASE" - exit ;; + GUESS=$UNAME_MACHINE-unknown-mirbsd$UNAME_RELEASE + ;; *:Sortix:*:*) - echo "$UNAME_MACHINE"-unknown-sortix - exit ;; + GUESS=$UNAME_MACHINE-unknown-sortix + ;; + *:Twizzler:*:*) + GUESS=$UNAME_MACHINE-unknown-twizzler + ;; *:Redox:*:*) - echo "$UNAME_MACHINE"-unknown-redox - exit ;; + GUESS=$UNAME_MACHINE-unknown-redox + ;; mips:OSF1:*.*) - echo mips-dec-osf1 - exit ;; + GUESS=mips-dec-osf1 + ;; alpha:OSF1:*:*) + # Reset EXIT trap before exiting to avoid spurious non-zero exit code. + trap '' 0 case $UNAME_RELEASE in *4.0) UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'` @@ -291,7 +335,7 @@ case "$UNAME_MACHINE:$UNAME_SYSTEM:$UNAME_RELEASE:$UNAME_VERSION" in # covers most systems running today. This code pipes the CPU # types through head -n 1, so we only detect the type of CPU 0. ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1` - case "$ALPHA_CPU_TYPE" in + case $ALPHA_CPU_TYPE in "EV4 (21064)") UNAME_MACHINE=alpha ;; "EV4.5 (21064)") @@ -328,117 +372,121 @@ case "$UNAME_MACHINE:$UNAME_SYSTEM:$UNAME_RELEASE:$UNAME_VERSION" in # A Tn.n version is a released field test version. # A Xn.n version is an unreleased experimental baselevel. # 1.2 uses "1.2" for uname -r. - echo "$UNAME_MACHINE"-dec-osf"`echo "$UNAME_RELEASE" | sed -e 's/^[PVTX]//' | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz`" - # Reset EXIT trap before exiting to avoid spurious non-zero exit code. - exitcode=$? - trap '' 0 - exit $exitcode ;; + OSF_REL=`echo "$UNAME_RELEASE" | sed -e 's/^[PVTX]//' | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz` + GUESS=$UNAME_MACHINE-dec-osf$OSF_REL + ;; Amiga*:UNIX_System_V:4.0:*) - echo m68k-unknown-sysv4 - exit ;; + GUESS=m68k-unknown-sysv4 + ;; *:[Aa]miga[Oo][Ss]:*:*) - echo "$UNAME_MACHINE"-unknown-amigaos - exit ;; + GUESS=$UNAME_MACHINE-unknown-amigaos + ;; *:[Mm]orph[Oo][Ss]:*:*) - echo "$UNAME_MACHINE"-unknown-morphos - exit ;; + GUESS=$UNAME_MACHINE-unknown-morphos + ;; *:OS/390:*:*) - echo i370-ibm-openedition - exit ;; + GUESS=i370-ibm-openedition + ;; *:z/VM:*:*) - echo s390-ibm-zvmoe - exit ;; + GUESS=s390-ibm-zvmoe + ;; *:OS400:*:*) - echo powerpc-ibm-os400 - exit ;; + GUESS=powerpc-ibm-os400 + ;; arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*) - echo arm-acorn-riscix"$UNAME_RELEASE" - exit ;; + GUESS=arm-acorn-riscix$UNAME_RELEASE + ;; arm*:riscos:*:*|arm*:RISCOS:*:*) - echo arm-unknown-riscos - exit ;; + GUESS=arm-unknown-riscos + ;; SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*) - echo hppa1.1-hitachi-hiuxmpp - exit ;; + GUESS=hppa1.1-hitachi-hiuxmpp + ;; Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*) # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE. - if test "`(/bin/universe) 2>/dev/null`" = att ; then - echo pyramid-pyramid-sysv3 - else - echo pyramid-pyramid-bsd - fi - exit ;; + case `(/bin/universe) 2>/dev/null` in + att) GUESS=pyramid-pyramid-sysv3 ;; + *) GUESS=pyramid-pyramid-bsd ;; + esac + ;; NILE*:*:*:dcosx) - echo pyramid-pyramid-svr4 - exit ;; + GUESS=pyramid-pyramid-svr4 + ;; DRS?6000:unix:4.0:6*) - echo sparc-icl-nx6 - exit ;; + GUESS=sparc-icl-nx6 + ;; DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*) case `/usr/bin/uname -p` in - sparc) echo sparc-icl-nx7; exit ;; - esac ;; + sparc) GUESS=sparc-icl-nx7 ;; + esac + ;; s390x:SunOS:*:*) - echo "$UNAME_MACHINE"-ibm-solaris2"`echo "$UNAME_RELEASE" | sed -e 's/[^.]*//'`" - exit ;; + SUN_REL=`echo "$UNAME_RELEASE" | sed -e 's/[^.]*//'` + GUESS=$UNAME_MACHINE-ibm-solaris2$SUN_REL + ;; sun4H:SunOS:5.*:*) - echo sparc-hal-solaris2"`echo "$UNAME_RELEASE"|sed -e 's/[^.]*//'`" - exit ;; + SUN_REL=`echo "$UNAME_RELEASE" | sed -e 's/[^.]*//'` + GUESS=sparc-hal-solaris2$SUN_REL + ;; sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*) - echo sparc-sun-solaris2"`echo "$UNAME_RELEASE" | sed -e 's/[^.]*//'`" - exit ;; + SUN_REL=`echo "$UNAME_RELEASE" | sed -e 's/[^.]*//'` + GUESS=sparc-sun-solaris2$SUN_REL + ;; i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*) - echo i386-pc-auroraux"$UNAME_RELEASE" - exit ;; + GUESS=i386-pc-auroraux$UNAME_RELEASE + ;; i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*) set_cc_for_build SUN_ARCH=i386 # If there is a compiler, see if it is configured for 64-bit objects. # Note that the Sun cc does not turn __LP64__ into 1 like gcc does. # This test works for both compilers. - if [ "$CC_FOR_BUILD" != no_compiler_found ]; then + if test "$CC_FOR_BUILD" != no_compiler_found; then if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \ - (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ + (CCOPTS="" $CC_FOR_BUILD -m64 -E - 2>/dev/null) | \ grep IS_64BIT_ARCH >/dev/null then SUN_ARCH=x86_64 fi fi - echo "$SUN_ARCH"-pc-solaris2"`echo "$UNAME_RELEASE"|sed -e 's/[^.]*//'`" - exit ;; + SUN_REL=`echo "$UNAME_RELEASE" | sed -e 's/[^.]*//'` + GUESS=$SUN_ARCH-pc-solaris2$SUN_REL + ;; sun4*:SunOS:6*:*) # According to config.sub, this is the proper way to canonicalize # SunOS6. Hard to guess exactly what SunOS6 will be like, but # it's likely to be more like Solaris than SunOS4. - echo sparc-sun-solaris3"`echo "$UNAME_RELEASE"|sed -e 's/[^.]*//'`" - exit ;; + SUN_REL=`echo "$UNAME_RELEASE" | sed -e 's/[^.]*//'` + GUESS=sparc-sun-solaris3$SUN_REL + ;; sun4*:SunOS:*:*) - case "`/usr/bin/arch -k`" in + case `/usr/bin/arch -k` in Series*|S4*) UNAME_RELEASE=`uname -v` ;; esac - # Japanese Language versions have a version number like `4.1.3-JL'. - echo sparc-sun-sunos"`echo "$UNAME_RELEASE"|sed -e 's/-/_/'`" - exit ;; + # Japanese Language versions have a version number like '4.1.3-JL'. + SUN_REL=`echo "$UNAME_RELEASE" | sed -e 's/-/_/'` + GUESS=sparc-sun-sunos$SUN_REL + ;; sun3*:SunOS:*:*) - echo m68k-sun-sunos"$UNAME_RELEASE" - exit ;; + GUESS=m68k-sun-sunos$UNAME_RELEASE + ;; sun*:*:4.2BSD:*) UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null` test "x$UNAME_RELEASE" = x && UNAME_RELEASE=3 - case "`/bin/arch`" in + case `/bin/arch` in sun3) - echo m68k-sun-sunos"$UNAME_RELEASE" + GUESS=m68k-sun-sunos$UNAME_RELEASE ;; sun4) - echo sparc-sun-sunos"$UNAME_RELEASE" + GUESS=sparc-sun-sunos$UNAME_RELEASE ;; esac - exit ;; + ;; aushp:SunOS:*:*) - echo sparc-auspex-sunos"$UNAME_RELEASE" - exit ;; + GUESS=sparc-auspex-sunos$UNAME_RELEASE + ;; # The situation for MiNT is a little confusing. The machine name # can be virtually everything (everything which is not # "atarist" or "atariste" at least should have a processor @@ -448,41 +496,41 @@ case "$UNAME_MACHINE:$UNAME_SYSTEM:$UNAME_RELEASE:$UNAME_VERSION" in # MiNT. But MiNT is downward compatible to TOS, so this should # be no problem. atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*) - echo m68k-atari-mint"$UNAME_RELEASE" - exit ;; + GUESS=m68k-atari-mint$UNAME_RELEASE + ;; atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*) - echo m68k-atari-mint"$UNAME_RELEASE" - exit ;; + GUESS=m68k-atari-mint$UNAME_RELEASE + ;; *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*) - echo m68k-atari-mint"$UNAME_RELEASE" - exit ;; + GUESS=m68k-atari-mint$UNAME_RELEASE + ;; milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*) - echo m68k-milan-mint"$UNAME_RELEASE" - exit ;; + GUESS=m68k-milan-mint$UNAME_RELEASE + ;; hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*) - echo m68k-hades-mint"$UNAME_RELEASE" - exit ;; + GUESS=m68k-hades-mint$UNAME_RELEASE + ;; *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*) - echo m68k-unknown-mint"$UNAME_RELEASE" - exit ;; + GUESS=m68k-unknown-mint$UNAME_RELEASE + ;; m68k:machten:*:*) - echo m68k-apple-machten"$UNAME_RELEASE" - exit ;; + GUESS=m68k-apple-machten$UNAME_RELEASE + ;; powerpc:machten:*:*) - echo powerpc-apple-machten"$UNAME_RELEASE" - exit ;; + GUESS=powerpc-apple-machten$UNAME_RELEASE + ;; RISC*:Mach:*:*) - echo mips-dec-mach_bsd4.3 - exit ;; + GUESS=mips-dec-mach_bsd4.3 + ;; RISC*:ULTRIX:*:*) - echo mips-dec-ultrix"$UNAME_RELEASE" - exit ;; + GUESS=mips-dec-ultrix$UNAME_RELEASE + ;; VAX*:ULTRIX*:*:*) - echo vax-dec-ultrix"$UNAME_RELEASE" - exit ;; + GUESS=vax-dec-ultrix$UNAME_RELEASE + ;; 2020:CLIX:*:* | 2430:CLIX:*:*) - echo clipper-intergraph-clix"$UNAME_RELEASE" - exit ;; + GUESS=clipper-intergraph-clix$UNAME_RELEASE + ;; mips:*:*:UMIPS | mips:*:*:RISCos) set_cc_for_build sed 's/^ //' << EOF > "$dummy.c" @@ -510,82 +558,84 @@ EOF dummyarg=`echo "$UNAME_RELEASE" | sed -n 's/\([0-9]*\).*/\1/p'` && SYSTEM_NAME=`"$dummy" "$dummyarg"` && { echo "$SYSTEM_NAME"; exit; } - echo mips-mips-riscos"$UNAME_RELEASE" - exit ;; + GUESS=mips-mips-riscos$UNAME_RELEASE + ;; Motorola:PowerMAX_OS:*:*) - echo powerpc-motorola-powermax - exit ;; + GUESS=powerpc-motorola-powermax + ;; Motorola:*:4.3:PL8-*) - echo powerpc-harris-powermax - exit ;; + GUESS=powerpc-harris-powermax + ;; Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*) - echo powerpc-harris-powermax - exit ;; + GUESS=powerpc-harris-powermax + ;; Night_Hawk:Power_UNIX:*:*) - echo powerpc-harris-powerunix - exit ;; + GUESS=powerpc-harris-powerunix + ;; m88k:CX/UX:7*:*) - echo m88k-harris-cxux7 - exit ;; + GUESS=m88k-harris-cxux7 + ;; m88k:*:4*:R4*) - echo m88k-motorola-sysv4 - exit ;; + GUESS=m88k-motorola-sysv4 + ;; m88k:*:3*:R3*) - echo m88k-motorola-sysv3 - exit ;; + GUESS=m88k-motorola-sysv3 + ;; AViiON:dgux:*:*) # DG/UX returns AViiON for all architectures UNAME_PROCESSOR=`/usr/bin/uname -p` - if [ "$UNAME_PROCESSOR" = mc88100 ] || [ "$UNAME_PROCESSOR" = mc88110 ] + if test "$UNAME_PROCESSOR" = mc88100 || test "$UNAME_PROCESSOR" = mc88110 then - if [ "$TARGET_BINARY_INTERFACE"x = m88kdguxelfx ] || \ - [ "$TARGET_BINARY_INTERFACE"x = x ] + if test "$TARGET_BINARY_INTERFACE"x = m88kdguxelfx || \ + test "$TARGET_BINARY_INTERFACE"x = x then - echo m88k-dg-dgux"$UNAME_RELEASE" + GUESS=m88k-dg-dgux$UNAME_RELEASE else - echo m88k-dg-dguxbcs"$UNAME_RELEASE" + GUESS=m88k-dg-dguxbcs$UNAME_RELEASE fi else - echo i586-dg-dgux"$UNAME_RELEASE" + GUESS=i586-dg-dgux$UNAME_RELEASE fi - exit ;; + ;; M88*:DolphinOS:*:*) # DolphinOS (SVR3) - echo m88k-dolphin-sysv3 - exit ;; + GUESS=m88k-dolphin-sysv3 + ;; M88*:*:R3*:*) # Delta 88k system running SVR3 - echo m88k-motorola-sysv3 - exit ;; + GUESS=m88k-motorola-sysv3 + ;; XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3) - echo m88k-tektronix-sysv3 - exit ;; + GUESS=m88k-tektronix-sysv3 + ;; Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD) - echo m68k-tektronix-bsd - exit ;; + GUESS=m68k-tektronix-bsd + ;; *:IRIX*:*:*) - echo mips-sgi-irix"`echo "$UNAME_RELEASE"|sed -e 's/-/_/g'`" - exit ;; + IRIX_REL=`echo "$UNAME_RELEASE" | sed -e 's/-/_/g'` + GUESS=mips-sgi-irix$IRIX_REL + ;; ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX. - echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id - exit ;; # Note that: echo "'`uname -s`'" gives 'AIX ' + GUESS=romp-ibm-aix # uname -m gives an 8 hex-code CPU id + ;; # Note that: echo "'`uname -s`'" gives 'AIX ' i*86:AIX:*:*) - echo i386-ibm-aix - exit ;; + GUESS=i386-ibm-aix + ;; ia64:AIX:*:*) - if [ -x /usr/bin/oslevel ] ; then + if test -x /usr/bin/oslevel ; then IBM_REV=`/usr/bin/oslevel` else - IBM_REV="$UNAME_VERSION.$UNAME_RELEASE" + IBM_REV=$UNAME_VERSION.$UNAME_RELEASE fi - echo "$UNAME_MACHINE"-ibm-aix"$IBM_REV" - exit ;; + GUESS=$UNAME_MACHINE-ibm-aix$IBM_REV + ;; *:AIX:2:3) if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then set_cc_for_build sed 's/^ //' << EOF > "$dummy.c" #include - main() + int + main () { if (!__power_pc()) exit(1); @@ -595,16 +645,16 @@ EOF EOF if $CC_FOR_BUILD -o "$dummy" "$dummy.c" && SYSTEM_NAME=`"$dummy"` then - echo "$SYSTEM_NAME" + GUESS=$SYSTEM_NAME else - echo rs6000-ibm-aix3.2.5 + GUESS=rs6000-ibm-aix3.2.5 fi elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then - echo rs6000-ibm-aix3.2.4 + GUESS=rs6000-ibm-aix3.2.4 else - echo rs6000-ibm-aix3.2 + GUESS=rs6000-ibm-aix3.2 fi - exit ;; + ;; *:AIX:*:[4567]) IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'` if /usr/sbin/lsattr -El "$IBM_CPU_ID" | grep ' POWER' >/dev/null 2>&1; then @@ -612,56 +662,56 @@ EOF else IBM_ARCH=powerpc fi - if [ -x /usr/bin/lslpp ] ; then - IBM_REV=`/usr/bin/lslpp -Lqc bos.rte.libc | + if test -x /usr/bin/lslpp ; then + IBM_REV=`/usr/bin/lslpp -Lqc bos.rte.libc | \ awk -F: '{ print $3 }' | sed s/[0-9]*$/0/` else - IBM_REV="$UNAME_VERSION.$UNAME_RELEASE" + IBM_REV=$UNAME_VERSION.$UNAME_RELEASE fi - echo "$IBM_ARCH"-ibm-aix"$IBM_REV" - exit ;; + GUESS=$IBM_ARCH-ibm-aix$IBM_REV + ;; *:AIX:*:*) - echo rs6000-ibm-aix - exit ;; + GUESS=rs6000-ibm-aix + ;; ibmrt:4.4BSD:*|romp-ibm:4.4BSD:*) - echo romp-ibm-bsd4.4 - exit ;; + GUESS=romp-ibm-bsd4.4 + ;; ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and - echo romp-ibm-bsd"$UNAME_RELEASE" # 4.3 with uname added to - exit ;; # report: romp-ibm BSD 4.3 + GUESS=romp-ibm-bsd$UNAME_RELEASE # 4.3 with uname added to + ;; # report: romp-ibm BSD 4.3 *:BOSX:*:*) - echo rs6000-bull-bosx - exit ;; + GUESS=rs6000-bull-bosx + ;; DPX/2?00:B.O.S.:*:*) - echo m68k-bull-sysv3 - exit ;; + GUESS=m68k-bull-sysv3 + ;; 9000/[34]??:4.3bsd:1.*:*) - echo m68k-hp-bsd - exit ;; + GUESS=m68k-hp-bsd + ;; hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*) - echo m68k-hp-bsd4.4 - exit ;; + GUESS=m68k-hp-bsd4.4 + ;; 9000/[34678]??:HP-UX:*:*) - HPUX_REV=`echo "$UNAME_RELEASE"|sed -e 's/[^.]*.[0B]*//'` - case "$UNAME_MACHINE" in + HPUX_REV=`echo "$UNAME_RELEASE" | sed -e 's/[^.]*.[0B]*//'` + case $UNAME_MACHINE in 9000/31?) HP_ARCH=m68000 ;; 9000/[34]??) HP_ARCH=m68k ;; 9000/[678][0-9][0-9]) - if [ -x /usr/bin/getconf ]; then + if test -x /usr/bin/getconf; then sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null` sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null` - case "$sc_cpu_version" in + case $sc_cpu_version in 523) HP_ARCH=hppa1.0 ;; # CPU_PA_RISC1_0 528) HP_ARCH=hppa1.1 ;; # CPU_PA_RISC1_1 532) # CPU_PA_RISC2_0 - case "$sc_kernel_bits" in + case $sc_kernel_bits in 32) HP_ARCH=hppa2.0n ;; 64) HP_ARCH=hppa2.0w ;; '') HP_ARCH=hppa2.0 ;; # HP-UX 10.20 esac ;; esac fi - if [ "$HP_ARCH" = "" ]; then + if test "$HP_ARCH" = ""; then set_cc_for_build sed 's/^ //' << EOF > "$dummy.c" @@ -669,7 +719,8 @@ EOF #include #include - int main () + int + main () { #if defined(_SC_KERNEL_BITS) long bits = sysconf(_SC_KERNEL_BITS); @@ -700,7 +751,7 @@ EOF test -z "$HP_ARCH" && HP_ARCH=hppa fi ;; esac - if [ "$HP_ARCH" = hppa2.0w ] + if test "$HP_ARCH" = hppa2.0w then set_cc_for_build @@ -721,12 +772,12 @@ EOF HP_ARCH=hppa64 fi fi - echo "$HP_ARCH"-hp-hpux"$HPUX_REV" - exit ;; + GUESS=$HP_ARCH-hp-hpux$HPUX_REV + ;; ia64:HP-UX:*:*) - HPUX_REV=`echo "$UNAME_RELEASE"|sed -e 's/[^.]*.[0B]*//'` - echo ia64-hp-hpux"$HPUX_REV" - exit ;; + HPUX_REV=`echo "$UNAME_RELEASE" | sed -e 's/[^.]*.[0B]*//'` + GUESS=ia64-hp-hpux$HPUX_REV + ;; 3050*:HI-UX:*:*) set_cc_for_build sed 's/^ //' << EOF > "$dummy.c" @@ -756,36 +807,36 @@ EOF EOF $CC_FOR_BUILD -o "$dummy" "$dummy.c" && SYSTEM_NAME=`"$dummy"` && { echo "$SYSTEM_NAME"; exit; } - echo unknown-hitachi-hiuxwe2 - exit ;; + GUESS=unknown-hitachi-hiuxwe2 + ;; 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:*) - echo hppa1.1-hp-bsd - exit ;; + GUESS=hppa1.1-hp-bsd + ;; 9000/8??:4.3bsd:*:*) - echo hppa1.0-hp-bsd - exit ;; + GUESS=hppa1.0-hp-bsd + ;; *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*) - echo hppa1.0-hp-mpeix - exit ;; + GUESS=hppa1.0-hp-mpeix + ;; hp7??:OSF1:*:* | hp8?[79]:OSF1:*:*) - echo hppa1.1-hp-osf - exit ;; + GUESS=hppa1.1-hp-osf + ;; hp8??:OSF1:*:*) - echo hppa1.0-hp-osf - exit ;; + GUESS=hppa1.0-hp-osf + ;; i*86:OSF1:*:*) - if [ -x /usr/sbin/sysversion ] ; then - echo "$UNAME_MACHINE"-unknown-osf1mk + if test -x /usr/sbin/sysversion ; then + GUESS=$UNAME_MACHINE-unknown-osf1mk else - echo "$UNAME_MACHINE"-unknown-osf1 + GUESS=$UNAME_MACHINE-unknown-osf1 fi - exit ;; + ;; parisc*:Lites*:*:*) - echo hppa1.1-hp-lites - exit ;; + GUESS=hppa1.1-hp-lites + ;; C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*) - echo c1-convex-bsd - exit ;; + GUESS=c1-convex-bsd + ;; C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*) if getsysinfo -f scalar_acc then echo c32-convex-bsd @@ -793,17 +844,18 @@ EOF fi exit ;; C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*) - echo c34-convex-bsd - exit ;; + GUESS=c34-convex-bsd + ;; C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*) - echo c38-convex-bsd - exit ;; + GUESS=c38-convex-bsd + ;; C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*) - echo c4-convex-bsd - exit ;; + GUESS=c4-convex-bsd + ;; CRAY*Y-MP:*:*:*) - echo ymp-cray-unicos"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' - exit ;; + CRAY_REL=`echo "$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/'` + GUESS=ymp-cray-unicos$CRAY_REL + ;; CRAY*[A-Z]90:*:*:*) echo "$UNAME_MACHINE"-cray-unicos"$UNAME_RELEASE" \ | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \ @@ -811,114 +863,155 @@ EOF -e 's/\.[^.]*$/.X/' exit ;; CRAY*TS:*:*:*) - echo t90-cray-unicos"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' - exit ;; + CRAY_REL=`echo "$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/'` + GUESS=t90-cray-unicos$CRAY_REL + ;; CRAY*T3E:*:*:*) - echo alphaev5-cray-unicosmk"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' - exit ;; + CRAY_REL=`echo "$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/'` + GUESS=alphaev5-cray-unicosmk$CRAY_REL + ;; CRAY*SV1:*:*:*) - echo sv1-cray-unicos"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' - exit ;; + CRAY_REL=`echo "$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/'` + GUESS=sv1-cray-unicos$CRAY_REL + ;; *:UNICOS/mp:*:*) - echo craynv-cray-unicosmp"$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/' - exit ;; + CRAY_REL=`echo "$UNAME_RELEASE" | sed -e 's/\.[^.]*$/.X/'` + GUESS=craynv-cray-unicosmp$CRAY_REL + ;; F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*) FUJITSU_PROC=`uname -m | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz` FUJITSU_SYS=`uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///'` FUJITSU_REL=`echo "$UNAME_RELEASE" | sed -e 's/ /_/'` - echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" - exit ;; + GUESS=${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL} + ;; 5000:UNIX_System_V:4.*:*) FUJITSU_SYS=`uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///'` FUJITSU_REL=`echo "$UNAME_RELEASE" | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/ /_/'` - echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" - exit ;; + GUESS=sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL} + ;; i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*) - echo "$UNAME_MACHINE"-pc-bsdi"$UNAME_RELEASE" - exit ;; + GUESS=$UNAME_MACHINE-pc-bsdi$UNAME_RELEASE + ;; sparc*:BSD/OS:*:*) - echo sparc-unknown-bsdi"$UNAME_RELEASE" - exit ;; + GUESS=sparc-unknown-bsdi$UNAME_RELEASE + ;; *:BSD/OS:*:*) - echo "$UNAME_MACHINE"-unknown-bsdi"$UNAME_RELEASE" - exit ;; + GUESS=$UNAME_MACHINE-unknown-bsdi$UNAME_RELEASE + ;; arm:FreeBSD:*:*) UNAME_PROCESSOR=`uname -p` set_cc_for_build if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \ | grep -q __ARM_PCS_VFP then - echo "${UNAME_PROCESSOR}"-unknown-freebsd"`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`"-gnueabi + FREEBSD_REL=`echo "$UNAME_RELEASE" | sed -e 's/[-(].*//'` + GUESS=$UNAME_PROCESSOR-unknown-freebsd$FREEBSD_REL-gnueabi else - echo "${UNAME_PROCESSOR}"-unknown-freebsd"`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`"-gnueabihf + FREEBSD_REL=`echo "$UNAME_RELEASE" | sed -e 's/[-(].*//'` + GUESS=$UNAME_PROCESSOR-unknown-freebsd$FREEBSD_REL-gnueabihf fi - exit ;; + ;; *:FreeBSD:*:*) - UNAME_PROCESSOR=`/usr/bin/uname -p` - case "$UNAME_PROCESSOR" in + UNAME_PROCESSOR=`uname -p` + case $UNAME_PROCESSOR in amd64) UNAME_PROCESSOR=x86_64 ;; i386) UNAME_PROCESSOR=i586 ;; esac - echo "$UNAME_PROCESSOR"-unknown-freebsd"`echo "$UNAME_RELEASE"|sed -e 's/[-(].*//'`" - exit ;; + FREEBSD_REL=`echo "$UNAME_RELEASE" | sed -e 's/[-(].*//'` + GUESS=$UNAME_PROCESSOR-unknown-freebsd$FREEBSD_REL + ;; i*:CYGWIN*:*) - echo "$UNAME_MACHINE"-pc-cygwin - exit ;; + GUESS=$UNAME_MACHINE-pc-cygwin + ;; *:MINGW64*:*) - echo "$UNAME_MACHINE"-pc-mingw64 - exit ;; + GUESS=$UNAME_MACHINE-pc-mingw64 + ;; *:MINGW*:*) - echo "$UNAME_MACHINE"-pc-mingw32 - exit ;; + GUESS=$UNAME_MACHINE-pc-mingw32 + ;; *:MSYS*:*) - echo "$UNAME_MACHINE"-pc-msys - exit ;; + GUESS=$UNAME_MACHINE-pc-msys + ;; i*:PW*:*) - echo "$UNAME_MACHINE"-pc-pw32 - exit ;; + GUESS=$UNAME_MACHINE-pc-pw32 + ;; + *:SerenityOS:*:*) + GUESS=$UNAME_MACHINE-pc-serenity + ;; *:Interix*:*) - case "$UNAME_MACHINE" in + case $UNAME_MACHINE in x86) - echo i586-pc-interix"$UNAME_RELEASE" - exit ;; + GUESS=i586-pc-interix$UNAME_RELEASE + ;; authenticamd | genuineintel | EM64T) - echo x86_64-unknown-interix"$UNAME_RELEASE" - exit ;; + GUESS=x86_64-unknown-interix$UNAME_RELEASE + ;; IA64) - echo ia64-unknown-interix"$UNAME_RELEASE" - exit ;; + GUESS=ia64-unknown-interix$UNAME_RELEASE + ;; esac ;; i*:UWIN*:*) - echo "$UNAME_MACHINE"-pc-uwin - exit ;; + GUESS=$UNAME_MACHINE-pc-uwin + ;; amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*) - echo x86_64-pc-cygwin - exit ;; + GUESS=x86_64-pc-cygwin + ;; prep*:SunOS:5.*:*) - echo powerpcle-unknown-solaris2"`echo "$UNAME_RELEASE"|sed -e 's/[^.]*//'`" - exit ;; + SUN_REL=`echo "$UNAME_RELEASE" | sed -e 's/[^.]*//'` + GUESS=powerpcle-unknown-solaris2$SUN_REL + ;; *:GNU:*:*) # the GNU system - echo "`echo "$UNAME_MACHINE"|sed -e 's,[-/].*$,,'`-unknown-$LIBC`echo "$UNAME_RELEASE"|sed -e 's,/.*$,,'`" - exit ;; + GNU_ARCH=`echo "$UNAME_MACHINE" | sed -e 's,[-/].*$,,'` + GNU_REL=`echo "$UNAME_RELEASE" | sed -e 's,/.*$,,'` + GUESS=$GNU_ARCH-unknown-$LIBC$GNU_REL + ;; *:GNU/*:*:*) # other systems with GNU libc and userland - echo "$UNAME_MACHINE-unknown-`echo "$UNAME_SYSTEM" | sed 's,^[^/]*/,,' | tr "[:upper:]" "[:lower:]"``echo "$UNAME_RELEASE"|sed -e 's/[-(].*//'`-$LIBC" - exit ;; + GNU_SYS=`echo "$UNAME_SYSTEM" | sed 's,^[^/]*/,,' | tr "[:upper:]" "[:lower:]"` + GNU_REL=`echo "$UNAME_RELEASE" | sed -e 's/[-(].*//'` + GUESS=$UNAME_MACHINE-unknown-$GNU_SYS$GNU_REL-$LIBC + ;; + x86_64:[Mm]anagarm:*:*|i?86:[Mm]anagarm:*:*) + GUESS="$UNAME_MACHINE-pc-managarm-mlibc" + ;; + *:[Mm]anagarm:*:*) + GUESS="$UNAME_MACHINE-unknown-managarm-mlibc" + ;; *:Minix:*:*) - echo "$UNAME_MACHINE"-unknown-minix - exit ;; + GUESS=$UNAME_MACHINE-unknown-minix + ;; aarch64:Linux:*:*) - echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" - exit ;; + set_cc_for_build + CPU=$UNAME_MACHINE + LIBCABI=$LIBC + if test "$CC_FOR_BUILD" != no_compiler_found; then + ABI=64 + sed 's/^ //' << EOF > "$dummy.c" + #ifdef __ARM_EABI__ + #ifdef __ARM_PCS_VFP + ABI=eabihf + #else + ABI=eabi + #endif + #endif +EOF + cc_set_abi=`$CC_FOR_BUILD -E "$dummy.c" 2>/dev/null | grep '^ABI' | sed 's, ,,g'` + eval "$cc_set_abi" + case $ABI in + eabi | eabihf) CPU=armv8l; LIBCABI=$LIBC$ABI ;; + esac + fi + GUESS=$CPU-unknown-linux-$LIBCABI + ;; aarch64_be:Linux:*:*) UNAME_MACHINE=aarch64_be - echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" - exit ;; + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; alpha:Linux:*:*) - case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in + case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' /proc/cpuinfo 2>/dev/null` in EV5) UNAME_MACHINE=alphaev5 ;; EV56) UNAME_MACHINE=alphaev56 ;; PCA56) UNAME_MACHINE=alphapca56 ;; @@ -929,60 +1022,72 @@ EOF esac objdump --private-headers /bin/sh | grep -q ld.so.1 if test "$?" = 0 ; then LIBC=gnulibc1 ; fi - echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" - exit ;; - arc:Linux:*:* | arceb:Linux:*:*) - echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" - exit ;; + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; + arc:Linux:*:* | arceb:Linux:*:* | arc32:Linux:*:* | arc64:Linux:*:*) + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; arm*:Linux:*:*) set_cc_for_build if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \ | grep -q __ARM_EABI__ then - echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC else if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \ | grep -q __ARM_PCS_VFP then - echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"eabi + GUESS=$UNAME_MACHINE-unknown-linux-${LIBC}eabi else - echo "$UNAME_MACHINE"-unknown-linux-"$LIBC"eabihf + GUESS=$UNAME_MACHINE-unknown-linux-${LIBC}eabihf fi fi - exit ;; + ;; avr32*:Linux:*:*) - echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" - exit ;; + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; cris:Linux:*:*) - echo "$UNAME_MACHINE"-axis-linux-"$LIBC" - exit ;; + GUESS=$UNAME_MACHINE-axis-linux-$LIBC + ;; crisv32:Linux:*:*) - echo "$UNAME_MACHINE"-axis-linux-"$LIBC" - exit ;; + GUESS=$UNAME_MACHINE-axis-linux-$LIBC + ;; e2k:Linux:*:*) - echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" - exit ;; + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; frv:Linux:*:*) - echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" - exit ;; + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; hexagon:Linux:*:*) - echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" - exit ;; + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; i*86:Linux:*:*) - echo "$UNAME_MACHINE"-pc-linux-"$LIBC" - exit ;; + GUESS=$UNAME_MACHINE-pc-linux-$LIBC + ;; ia64:Linux:*:*) - echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" - exit ;; + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; k1om:Linux:*:*) - echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" - exit ;; + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; + kvx:Linux:*:*) + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; + kvx:cos:*:*) + GUESS=$UNAME_MACHINE-unknown-cos + ;; + kvx:mbr:*:*) + GUESS=$UNAME_MACHINE-unknown-mbr + ;; + loongarch32:Linux:*:* | loongarch64:Linux:*:*) + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; m32r*:Linux:*:*) - echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" - exit ;; + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; m68*:Linux:*:*) - echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" - exit ;; + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; mips:Linux:*:* | mips64:Linux:*:*) set_cc_for_build IS_GLIBC=0 @@ -1027,113 +1132,135 @@ EOF #endif #endif EOF - eval "`$CC_FOR_BUILD -E "$dummy.c" 2>/dev/null | grep '^CPU\|^MIPS_ENDIAN\|^LIBCABI'`" + cc_set_vars=`$CC_FOR_BUILD -E "$dummy.c" 2>/dev/null | grep '^CPU\|^MIPS_ENDIAN\|^LIBCABI'` + eval "$cc_set_vars" test "x$CPU" != x && { echo "$CPU${MIPS_ENDIAN}-unknown-linux-$LIBCABI"; exit; } ;; mips64el:Linux:*:*) - echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" - exit ;; + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; openrisc*:Linux:*:*) - echo or1k-unknown-linux-"$LIBC" - exit ;; + GUESS=or1k-unknown-linux-$LIBC + ;; or32:Linux:*:* | or1k*:Linux:*:*) - echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" - exit ;; + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; padre:Linux:*:*) - echo sparc-unknown-linux-"$LIBC" - exit ;; + GUESS=sparc-unknown-linux-$LIBC + ;; parisc64:Linux:*:* | hppa64:Linux:*:*) - echo hppa64-unknown-linux-"$LIBC" - exit ;; + GUESS=hppa64-unknown-linux-$LIBC + ;; parisc:Linux:*:* | hppa:Linux:*:*) # Look for CPU level case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in - PA7*) echo hppa1.1-unknown-linux-"$LIBC" ;; - PA8*) echo hppa2.0-unknown-linux-"$LIBC" ;; - *) echo hppa-unknown-linux-"$LIBC" ;; + PA7*) GUESS=hppa1.1-unknown-linux-$LIBC ;; + PA8*) GUESS=hppa2.0-unknown-linux-$LIBC ;; + *) GUESS=hppa-unknown-linux-$LIBC ;; esac - exit ;; + ;; ppc64:Linux:*:*) - echo powerpc64-unknown-linux-"$LIBC" - exit ;; + GUESS=powerpc64-unknown-linux-$LIBC + ;; ppc:Linux:*:*) - echo powerpc-unknown-linux-"$LIBC" - exit ;; + GUESS=powerpc-unknown-linux-$LIBC + ;; ppc64le:Linux:*:*) - echo powerpc64le-unknown-linux-"$LIBC" - exit ;; + GUESS=powerpc64le-unknown-linux-$LIBC + ;; ppcle:Linux:*:*) - echo powerpcle-unknown-linux-"$LIBC" - exit ;; - riscv32:Linux:*:* | riscv64:Linux:*:*) - echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" - exit ;; + GUESS=powerpcle-unknown-linux-$LIBC + ;; + riscv32:Linux:*:* | riscv32be:Linux:*:* | riscv64:Linux:*:* | riscv64be:Linux:*:*) + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; s390:Linux:*:* | s390x:Linux:*:*) - echo "$UNAME_MACHINE"-ibm-linux-"$LIBC" - exit ;; + GUESS=$UNAME_MACHINE-ibm-linux-$LIBC + ;; sh64*:Linux:*:*) - echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" - exit ;; + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; sh*:Linux:*:*) - echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" - exit ;; + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; sparc:Linux:*:* | sparc64:Linux:*:*) - echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" - exit ;; + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; tile*:Linux:*:*) - echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" - exit ;; + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; vax:Linux:*:*) - echo "$UNAME_MACHINE"-dec-linux-"$LIBC" - exit ;; + GUESS=$UNAME_MACHINE-dec-linux-$LIBC + ;; x86_64:Linux:*:*) - echo "$UNAME_MACHINE"-pc-linux-"$LIBC" - exit ;; + set_cc_for_build + CPU=$UNAME_MACHINE + LIBCABI=$LIBC + if test "$CC_FOR_BUILD" != no_compiler_found; then + ABI=64 + sed 's/^ //' << EOF > "$dummy.c" + #ifdef __i386__ + ABI=x86 + #else + #ifdef __ILP32__ + ABI=x32 + #endif + #endif +EOF + cc_set_abi=`$CC_FOR_BUILD -E "$dummy.c" 2>/dev/null | grep '^ABI' | sed 's, ,,g'` + eval "$cc_set_abi" + case $ABI in + x86) CPU=i686 ;; + x32) LIBCABI=${LIBC}x32 ;; + esac + fi + GUESS=$CPU-pc-linux-$LIBCABI + ;; xtensa*:Linux:*:*) - echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" - exit ;; + GUESS=$UNAME_MACHINE-unknown-linux-$LIBC + ;; i*86:DYNIX/ptx:4*:*) # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there. # earlier versions are messed up and put the nodename in both # sysname and nodename. - echo i386-sequent-sysv4 - exit ;; + GUESS=i386-sequent-sysv4 + ;; i*86:UNIX_SV:4.2MP:2.*) # Unixware is an offshoot of SVR4, but it has its own version # number series starting with 2... # I am not positive that other SVR4 systems won't match this, # I just have to hope. -- rms. # Use sysv4.2uw... so that sysv4* matches it. - echo "$UNAME_MACHINE"-pc-sysv4.2uw"$UNAME_VERSION" - exit ;; + GUESS=$UNAME_MACHINE-pc-sysv4.2uw$UNAME_VERSION + ;; i*86:OS/2:*:*) - # If we were able to find `uname', then EMX Unix compatibility + # If we were able to find 'uname', then EMX Unix compatibility # is probably installed. - echo "$UNAME_MACHINE"-pc-os2-emx - exit ;; + GUESS=$UNAME_MACHINE-pc-os2-emx + ;; i*86:XTS-300:*:STOP) - echo "$UNAME_MACHINE"-unknown-stop - exit ;; + GUESS=$UNAME_MACHINE-unknown-stop + ;; i*86:atheos:*:*) - echo "$UNAME_MACHINE"-unknown-atheos - exit ;; + GUESS=$UNAME_MACHINE-unknown-atheos + ;; i*86:syllable:*:*) - echo "$UNAME_MACHINE"-pc-syllable - exit ;; + GUESS=$UNAME_MACHINE-pc-syllable + ;; i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*) - echo i386-unknown-lynxos"$UNAME_RELEASE" - exit ;; + GUESS=i386-unknown-lynxos$UNAME_RELEASE + ;; i*86:*DOS:*:*) - echo "$UNAME_MACHINE"-pc-msdosdjgpp - exit ;; + GUESS=$UNAME_MACHINE-pc-msdosdjgpp + ;; i*86:*:4.*:*) UNAME_REL=`echo "$UNAME_RELEASE" | sed 's/\/MP$//'` if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then - echo "$UNAME_MACHINE"-univel-sysv"$UNAME_REL" + GUESS=$UNAME_MACHINE-univel-sysv$UNAME_REL else - echo "$UNAME_MACHINE"-pc-sysv"$UNAME_REL" + GUESS=$UNAME_MACHINE-pc-sysv$UNAME_REL fi - exit ;; + ;; i*86:*:5:[678]*) # UnixWare 7.x, OpenUNIX and OpenServer 6. case `/bin/uname -X | grep "^Machine"` in @@ -1141,12 +1268,12 @@ EOF *Pentium) UNAME_MACHINE=i586 ;; *Pent*|*Celeron) UNAME_MACHINE=i686 ;; esac - echo "$UNAME_MACHINE-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION}" - exit ;; + GUESS=$UNAME_MACHINE-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION} + ;; i*86:*:3.2:*) if test -f /usr/options/cb.name; then UNAME_REL=`sed -n 's/.*Version //p' /dev/null >/dev/null ; then UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')` (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486 @@ -1156,11 +1283,11 @@ EOF && UNAME_MACHINE=i686 (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \ && UNAME_MACHINE=i686 - echo "$UNAME_MACHINE"-pc-sco"$UNAME_REL" + GUESS=$UNAME_MACHINE-pc-sco$UNAME_REL else - echo "$UNAME_MACHINE"-pc-sysv32 + GUESS=$UNAME_MACHINE-pc-sysv32 fi - exit ;; + ;; pc:*:*:*) # Left here for compatibility: # uname -m prints for DJGPP always 'pc', but it prints nothing about @@ -1168,31 +1295,31 @@ EOF # Note: whatever this is, it MUST be the same as what config.sub # prints for the "djgpp" host, or else GDB configure will decide that # this is a cross-build. - echo i586-pc-msdosdjgpp - exit ;; + GUESS=i586-pc-msdosdjgpp + ;; Intel:Mach:3*:*) - echo i386-pc-mach3 - exit ;; + GUESS=i386-pc-mach3 + ;; paragon:*:*:*) - echo i860-intel-osf1 - exit ;; + GUESS=i860-intel-osf1 + ;; i860:*:4.*:*) # i860-SVR4 if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then - echo i860-stardent-sysv"$UNAME_RELEASE" # Stardent Vistra i860-SVR4 + GUESS=i860-stardent-sysv$UNAME_RELEASE # Stardent Vistra i860-SVR4 else # Add other i860-SVR4 vendors below as they are discovered. - echo i860-unknown-sysv"$UNAME_RELEASE" # Unknown i860-SVR4 + GUESS=i860-unknown-sysv$UNAME_RELEASE # Unknown i860-SVR4 fi - exit ;; + ;; mini*:CTIX:SYS*5:*) # "miniframe" - echo m68010-convergent-sysv - exit ;; + GUESS=m68010-convergent-sysv + ;; mc68k:UNIX:SYSTEM5:3.51m) - echo m68k-convergent-sysv - exit ;; + GUESS=m68k-convergent-sysv + ;; M680?0:D-NIX:5.3:*) - echo m68k-diab-dnix - exit ;; + GUESS=m68k-diab-dnix + ;; M68*:*:R3V[5678]*:*) test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;; 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0) @@ -1217,113 +1344,119 @@ EOF /bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \ && { echo i586-ncr-sysv4.3"$OS_REL"; exit; } ;; m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*) - echo m68k-unknown-lynxos"$UNAME_RELEASE" - exit ;; + GUESS=m68k-unknown-lynxos$UNAME_RELEASE + ;; mc68030:UNIX_System_V:4.*:*) - echo m68k-atari-sysv4 - exit ;; + GUESS=m68k-atari-sysv4 + ;; TSUNAMI:LynxOS:2.*:*) - echo sparc-unknown-lynxos"$UNAME_RELEASE" - exit ;; + GUESS=sparc-unknown-lynxos$UNAME_RELEASE + ;; rs6000:LynxOS:2.*:*) - echo rs6000-unknown-lynxos"$UNAME_RELEASE" - exit ;; + GUESS=rs6000-unknown-lynxos$UNAME_RELEASE + ;; PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*) - echo powerpc-unknown-lynxos"$UNAME_RELEASE" - exit ;; + GUESS=powerpc-unknown-lynxos$UNAME_RELEASE + ;; SM[BE]S:UNIX_SV:*:*) - echo mips-dde-sysv"$UNAME_RELEASE" - exit ;; + GUESS=mips-dde-sysv$UNAME_RELEASE + ;; RM*:ReliantUNIX-*:*:*) - echo mips-sni-sysv4 - exit ;; + GUESS=mips-sni-sysv4 + ;; RM*:SINIX-*:*:*) - echo mips-sni-sysv4 - exit ;; + GUESS=mips-sni-sysv4 + ;; *:SINIX-*:*:*) if uname -p 2>/dev/null >/dev/null ; then UNAME_MACHINE=`(uname -p) 2>/dev/null` - echo "$UNAME_MACHINE"-sni-sysv4 + GUESS=$UNAME_MACHINE-sni-sysv4 else - echo ns32k-sni-sysv + GUESS=ns32k-sni-sysv fi - exit ;; - PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort + ;; + PENTIUM:*:4.0*:*) # Unisys 'ClearPath HMP IX 4000' SVR4/MP effort # says - echo i586-unisys-sysv4 - exit ;; + GUESS=i586-unisys-sysv4 + ;; *:UNIX_System_V:4*:FTX*) # From Gerald Hewes . # How about differentiating between stratus architectures? -djm - echo hppa1.1-stratus-sysv4 - exit ;; + GUESS=hppa1.1-stratus-sysv4 + ;; *:*:*:FTX*) # From seanf@swdc.stratus.com. - echo i860-stratus-sysv4 - exit ;; + GUESS=i860-stratus-sysv4 + ;; i*86:VOS:*:*) # From Paul.Green@stratus.com. - echo "$UNAME_MACHINE"-stratus-vos - exit ;; + GUESS=$UNAME_MACHINE-stratus-vos + ;; *:VOS:*:*) # From Paul.Green@stratus.com. - echo hppa1.1-stratus-vos - exit ;; + GUESS=hppa1.1-stratus-vos + ;; mc68*:A/UX:*:*) - echo m68k-apple-aux"$UNAME_RELEASE" - exit ;; + GUESS=m68k-apple-aux$UNAME_RELEASE + ;; news*:NEWS-OS:6*:*) - echo mips-sony-newsos6 - exit ;; + GUESS=mips-sony-newsos6 + ;; R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*) - if [ -d /usr/nec ]; then - echo mips-nec-sysv"$UNAME_RELEASE" + if test -d /usr/nec; then + GUESS=mips-nec-sysv$UNAME_RELEASE else - echo mips-unknown-sysv"$UNAME_RELEASE" + GUESS=mips-unknown-sysv$UNAME_RELEASE fi - exit ;; + ;; BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only. - echo powerpc-be-beos - exit ;; + GUESS=powerpc-be-beos + ;; BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only. - echo powerpc-apple-beos - exit ;; + GUESS=powerpc-apple-beos + ;; BePC:BeOS:*:*) # BeOS running on Intel PC compatible. - echo i586-pc-beos - exit ;; + GUESS=i586-pc-beos + ;; BePC:Haiku:*:*) # Haiku running on Intel PC compatible. - echo i586-pc-haiku - exit ;; - x86_64:Haiku:*:*) - echo x86_64-unknown-haiku - exit ;; + GUESS=i586-pc-haiku + ;; + ppc:Haiku:*:*) # Haiku running on Apple PowerPC + GUESS=powerpc-apple-haiku + ;; + *:Haiku:*:*) # Haiku modern gcc (not bound by BeOS compat) + GUESS=$UNAME_MACHINE-unknown-haiku + ;; SX-4:SUPER-UX:*:*) - echo sx4-nec-superux"$UNAME_RELEASE" - exit ;; + GUESS=sx4-nec-superux$UNAME_RELEASE + ;; SX-5:SUPER-UX:*:*) - echo sx5-nec-superux"$UNAME_RELEASE" - exit ;; + GUESS=sx5-nec-superux$UNAME_RELEASE + ;; SX-6:SUPER-UX:*:*) - echo sx6-nec-superux"$UNAME_RELEASE" - exit ;; + GUESS=sx6-nec-superux$UNAME_RELEASE + ;; SX-7:SUPER-UX:*:*) - echo sx7-nec-superux"$UNAME_RELEASE" - exit ;; + GUESS=sx7-nec-superux$UNAME_RELEASE + ;; SX-8:SUPER-UX:*:*) - echo sx8-nec-superux"$UNAME_RELEASE" - exit ;; + GUESS=sx8-nec-superux$UNAME_RELEASE + ;; SX-8R:SUPER-UX:*:*) - echo sx8r-nec-superux"$UNAME_RELEASE" - exit ;; + GUESS=sx8r-nec-superux$UNAME_RELEASE + ;; SX-ACE:SUPER-UX:*:*) - echo sxace-nec-superux"$UNAME_RELEASE" - exit ;; + GUESS=sxace-nec-superux$UNAME_RELEASE + ;; Power*:Rhapsody:*:*) - echo powerpc-apple-rhapsody"$UNAME_RELEASE" - exit ;; + GUESS=powerpc-apple-rhapsody$UNAME_RELEASE + ;; *:Rhapsody:*:*) - echo "$UNAME_MACHINE"-apple-rhapsody"$UNAME_RELEASE" - exit ;; + GUESS=$UNAME_MACHINE-apple-rhapsody$UNAME_RELEASE + ;; + arm64:Darwin:*:*) + GUESS=aarch64-apple-darwin$UNAME_RELEASE + ;; *:Darwin:*:*) UNAME_PROCESSOR=`uname -p` case $UNAME_PROCESSOR in @@ -1338,7 +1471,7 @@ EOF else set_cc_for_build fi - if [ "$CC_FOR_BUILD" != no_compiler_found ]; then + if test "$CC_FOR_BUILD" != no_compiler_found; then if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \ (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ grep IS_64BIT_ARCH >/dev/null @@ -1359,109 +1492,122 @@ EOF # uname -m returns i386 or x86_64 UNAME_PROCESSOR=$UNAME_MACHINE fi - echo "$UNAME_PROCESSOR"-apple-darwin"$UNAME_RELEASE" - exit ;; + GUESS=$UNAME_PROCESSOR-apple-darwin$UNAME_RELEASE + ;; *:procnto*:*:* | *:QNX:[0123456789]*:*) UNAME_PROCESSOR=`uname -p` if test "$UNAME_PROCESSOR" = x86; then UNAME_PROCESSOR=i386 UNAME_MACHINE=pc fi - echo "$UNAME_PROCESSOR"-"$UNAME_MACHINE"-nto-qnx"$UNAME_RELEASE" - exit ;; + GUESS=$UNAME_PROCESSOR-$UNAME_MACHINE-nto-qnx$UNAME_RELEASE + ;; *:QNX:*:4*) - echo i386-pc-qnx - exit ;; + GUESS=i386-pc-qnx + ;; NEO-*:NONSTOP_KERNEL:*:*) - echo neo-tandem-nsk"$UNAME_RELEASE" - exit ;; + GUESS=neo-tandem-nsk$UNAME_RELEASE + ;; NSE-*:NONSTOP_KERNEL:*:*) - echo nse-tandem-nsk"$UNAME_RELEASE" - exit ;; + GUESS=nse-tandem-nsk$UNAME_RELEASE + ;; NSR-*:NONSTOP_KERNEL:*:*) - echo nsr-tandem-nsk"$UNAME_RELEASE" - exit ;; + GUESS=nsr-tandem-nsk$UNAME_RELEASE + ;; NSV-*:NONSTOP_KERNEL:*:*) - echo nsv-tandem-nsk"$UNAME_RELEASE" - exit ;; + GUESS=nsv-tandem-nsk$UNAME_RELEASE + ;; NSX-*:NONSTOP_KERNEL:*:*) - echo nsx-tandem-nsk"$UNAME_RELEASE" - exit ;; + GUESS=nsx-tandem-nsk$UNAME_RELEASE + ;; *:NonStop-UX:*:*) - echo mips-compaq-nonstopux - exit ;; + GUESS=mips-compaq-nonstopux + ;; BS2000:POSIX*:*:*) - echo bs2000-siemens-sysv - exit ;; + GUESS=bs2000-siemens-sysv + ;; DS/*:UNIX_System_V:*:*) - echo "$UNAME_MACHINE"-"$UNAME_SYSTEM"-"$UNAME_RELEASE" - exit ;; + GUESS=$UNAME_MACHINE-$UNAME_SYSTEM-$UNAME_RELEASE + ;; *:Plan9:*:*) # "uname -m" is not consistent, so use $cputype instead. 386 # is converted to i386 for consistency with other x86 # operating systems. - # shellcheck disable=SC2154 - if test "$cputype" = 386; then + if test "${cputype-}" = 386; then UNAME_MACHINE=i386 - else - UNAME_MACHINE="$cputype" + elif test "x${cputype-}" != x; then + UNAME_MACHINE=$cputype fi - echo "$UNAME_MACHINE"-unknown-plan9 - exit ;; + GUESS=$UNAME_MACHINE-unknown-plan9 + ;; *:TOPS-10:*:*) - echo pdp10-unknown-tops10 - exit ;; + GUESS=pdp10-unknown-tops10 + ;; *:TENEX:*:*) - echo pdp10-unknown-tenex - exit ;; + GUESS=pdp10-unknown-tenex + ;; KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*) - echo pdp10-dec-tops20 - exit ;; + GUESS=pdp10-dec-tops20 + ;; XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*) - echo pdp10-xkl-tops20 - exit ;; + GUESS=pdp10-xkl-tops20 + ;; *:TOPS-20:*:*) - echo pdp10-unknown-tops20 - exit ;; + GUESS=pdp10-unknown-tops20 + ;; *:ITS:*:*) - echo pdp10-unknown-its - exit ;; + GUESS=pdp10-unknown-its + ;; SEI:*:*:SEIUX) - echo mips-sei-seiux"$UNAME_RELEASE" - exit ;; + GUESS=mips-sei-seiux$UNAME_RELEASE + ;; *:DragonFly:*:*) - echo "$UNAME_MACHINE"-unknown-dragonfly"`echo "$UNAME_RELEASE"|sed -e 's/[-(].*//'`" - exit ;; + DRAGONFLY_REL=`echo "$UNAME_RELEASE" | sed -e 's/[-(].*//'` + GUESS=$UNAME_MACHINE-unknown-dragonfly$DRAGONFLY_REL + ;; *:*VMS:*:*) UNAME_MACHINE=`(uname -p) 2>/dev/null` - case "$UNAME_MACHINE" in - A*) echo alpha-dec-vms ; exit ;; - I*) echo ia64-dec-vms ; exit ;; - V*) echo vax-dec-vms ; exit ;; + case $UNAME_MACHINE in + A*) GUESS=alpha-dec-vms ;; + I*) GUESS=ia64-dec-vms ;; + V*) GUESS=vax-dec-vms ;; esac ;; *:XENIX:*:SysV) - echo i386-pc-xenix - exit ;; + GUESS=i386-pc-xenix + ;; i*86:skyos:*:*) - echo "$UNAME_MACHINE"-pc-skyos"`echo "$UNAME_RELEASE" | sed -e 's/ .*$//'`" - exit ;; + SKYOS_REL=`echo "$UNAME_RELEASE" | sed -e 's/ .*$//'` + GUESS=$UNAME_MACHINE-pc-skyos$SKYOS_REL + ;; i*86:rdos:*:*) - echo "$UNAME_MACHINE"-pc-rdos - exit ;; - i*86:AROS:*:*) - echo "$UNAME_MACHINE"-pc-aros - exit ;; + GUESS=$UNAME_MACHINE-pc-rdos + ;; + i*86:Fiwix:*:*) + GUESS=$UNAME_MACHINE-pc-fiwix + ;; + *:AROS:*:*) + GUESS=$UNAME_MACHINE-unknown-aros + ;; x86_64:VMkernel:*:*) - echo "$UNAME_MACHINE"-unknown-esx - exit ;; + GUESS=$UNAME_MACHINE-unknown-esx + ;; amd64:Isilon\ OneFS:*:*) - echo x86_64-unknown-onefs - exit ;; + GUESS=x86_64-unknown-onefs + ;; *:Unleashed:*:*) - echo "$UNAME_MACHINE"-unknown-unleashed"$UNAME_RELEASE" - exit ;; + GUESS=$UNAME_MACHINE-unknown-unleashed$UNAME_RELEASE + ;; + *:Ironclad:*:*) + GUESS=$UNAME_MACHINE-unknown-ironclad + ;; esac +# Do we have a guess based on uname results? +if test "x$GUESS" != x; then + echo "$GUESS" + exit +fi + # No uname command or uname output not recognized. set_cc_for_build cat > "$dummy.c" < "$dummy.c" </dev/null && SYSTEM_NAME=`$dummy` && +$CC_FOR_BUILD -o "$dummy" "$dummy.c" 2>/dev/null && SYSTEM_NAME=`"$dummy"` && { echo "$SYSTEM_NAME"; exit; } # Apollos put the system type in the environment. @@ -1601,7 +1748,7 @@ test -d /usr/apollo && { echo "$ISP-apollo-$SYSTYPE"; exit; } echo "$0: unable to guess system type" >&2 -case "$UNAME_MACHINE:$UNAME_SYSTEM" in +case $UNAME_MACHINE:$UNAME_SYSTEM in mips:Linux | mips64:Linux) # If we got here on MIPS GNU/Linux, output extra information. cat >&2 <&2 <&2 + echo "Invalid configuration '$1': more than four components" >&2 exit 1 ;; *-*-*-*) basic_machine=$field1-$field2 - os=$field3-$field4 + basic_os=$field3-$field4 ;; *-*-*) # Ambiguous whether COMPANY is present, or skipped and KERNEL-OS is two # parts maybe_os=$field2-$field3 case $maybe_os in - nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc \ - | linux-newlib* | linux-musl* | linux-uclibc* | uclinux-uclibc* \ - | uclinux-gnu* | kfreebsd*-gnu* | knetbsd*-gnu* | netbsd*-gnu* \ - | netbsd*-eabi* | kopensolaris*-gnu* | cloudabi*-eabi* \ - | storm-chaos* | os2-emx* | rtmk-nova*) + cloudabi*-eabi* \ + | kfreebsd*-gnu* \ + | knetbsd*-gnu* \ + | kopensolaris*-gnu* \ + | linux-* \ + | managarm-* \ + | netbsd*-eabi* \ + | netbsd*-gnu* \ + | nto-qnx* \ + | os2-emx* \ + | rtmk-nova* \ + | storm-chaos* \ + | uclinux-gnu* \ + | uclinux-uclibc* \ + | windows-* ) basic_machine=$field1 - os=$maybe_os + basic_os=$maybe_os ;; android-linux) basic_machine=$field1-unknown - os=linux-android + basic_os=linux-android ;; *) basic_machine=$field1-$field2 - os=$field3 + basic_os=$field3 ;; esac ;; *-*) - # A lone config we happen to match not fitting any pattern case $field1-$field2 in + # Shorthands that happen to contain a single dash + convex-c[12] | convex-c3[248]) + basic_machine=$field2-convex + basic_os= + ;; decstation-3100) basic_machine=mips-dec - os= + basic_os= ;; *-*) # Second component is usually, but not always the OS case $field2 in - # Prevent following clause from handling this valid os + # Do not treat sunos as a manufacturer sun*os*) basic_machine=$field1 - os=$field2 + basic_os=$field2 ;; # Manufacturers - dec* | mips* | sequent* | encore* | pc533* | sgi* | sony* \ - | att* | 7300* | 3300* | delta* | motorola* | sun[234]* \ - | unicom* | ibm* | next | hp | isi* | apollo | altos* \ - | convergent* | ncr* | news | 32* | 3600* | 3100* \ - | hitachi* | c[123]* | convex* | sun | crds | omron* | dg \ - | ultra | tti* | harris | dolphin | highlevel | gould \ - | cbm | ns | masscomp | apple | axis | knuth | cray \ - | microblaze* | sim | cisco \ - | oki | wec | wrs | winbond) + 3100* \ + | 32* \ + | 3300* \ + | 3600* \ + | 7300* \ + | acorn \ + | altos* \ + | apollo \ + | apple \ + | atari \ + | att* \ + | axis \ + | be \ + | bull \ + | cbm \ + | ccur \ + | cisco \ + | commodore \ + | convergent* \ + | convex* \ + | cray \ + | crds \ + | dec* \ + | delta* \ + | dg \ + | digital \ + | dolphin \ + | encore* \ + | gould \ + | harris \ + | highlevel \ + | hitachi* \ + | hp \ + | ibm* \ + | intergraph \ + | isi* \ + | knuth \ + | masscomp \ + | microblaze* \ + | mips* \ + | motorola* \ + | ncr* \ + | news \ + | next \ + | ns \ + | oki \ + | omron* \ + | pc533* \ + | rebel \ + | rom68k \ + | rombug \ + | semi \ + | sequent* \ + | siemens \ + | sgi* \ + | siemens \ + | sim \ + | sni \ + | sony* \ + | stratus \ + | sun \ + | sun[234]* \ + | tektronix \ + | tti* \ + | ultra \ + | unicom* \ + | wec \ + | winbond \ + | wrs) basic_machine=$field1-$field2 - os= + basic_os= + ;; + zephyr*) + basic_machine=$field1-unknown + basic_os=$field2 ;; *) basic_machine=$field1 - os=$field2 + basic_os=$field2 ;; esac ;; @@ -191,450 +279,431 @@ case $1 in case $field1 in 386bsd) basic_machine=i386-pc - os=bsd + basic_os=bsd ;; a29khif) basic_machine=a29k-amd - os=udi + basic_os=udi ;; adobe68k) basic_machine=m68010-adobe - os=scout + basic_os=scout ;; alliant) basic_machine=fx80-alliant - os= + basic_os= ;; altos | altos3068) basic_machine=m68k-altos - os= + basic_os= ;; am29k) basic_machine=a29k-none - os=bsd + basic_os=bsd ;; amdahl) basic_machine=580-amdahl - os=sysv + basic_os=sysv ;; amiga) basic_machine=m68k-unknown - os= + basic_os= ;; amigaos | amigados) basic_machine=m68k-unknown - os=amigaos + basic_os=amigaos ;; amigaunix | amix) basic_machine=m68k-unknown - os=sysv4 + basic_os=sysv4 ;; apollo68) basic_machine=m68k-apollo - os=sysv + basic_os=sysv ;; apollo68bsd) basic_machine=m68k-apollo - os=bsd + basic_os=bsd ;; aros) basic_machine=i386-pc - os=aros + basic_os=aros ;; aux) basic_machine=m68k-apple - os=aux + basic_os=aux ;; balance) basic_machine=ns32k-sequent - os=dynix + basic_os=dynix ;; blackfin) basic_machine=bfin-unknown - os=linux + basic_os=linux ;; cegcc) basic_machine=arm-unknown - os=cegcc - ;; - convex-c1) - basic_machine=c1-convex - os=bsd - ;; - convex-c2) - basic_machine=c2-convex - os=bsd - ;; - convex-c32) - basic_machine=c32-convex - os=bsd - ;; - convex-c34) - basic_machine=c34-convex - os=bsd - ;; - convex-c38) - basic_machine=c38-convex - os=bsd + basic_os=cegcc ;; cray) basic_machine=j90-cray - os=unicos + basic_os=unicos ;; crds | unos) basic_machine=m68k-crds - os= + basic_os= ;; da30) basic_machine=m68k-da30 - os= + basic_os= ;; decstation | pmax | pmin | dec3100 | decstatn) basic_machine=mips-dec - os= + basic_os= ;; delta88) basic_machine=m88k-motorola - os=sysv3 + basic_os=sysv3 ;; dicos) basic_machine=i686-pc - os=dicos + basic_os=dicos ;; djgpp) basic_machine=i586-pc - os=msdosdjgpp + basic_os=msdosdjgpp ;; ebmon29k) basic_machine=a29k-amd - os=ebmon + basic_os=ebmon ;; es1800 | OSE68k | ose68k | ose | OSE) basic_machine=m68k-ericsson - os=ose + basic_os=ose ;; gmicro) basic_machine=tron-gmicro - os=sysv + basic_os=sysv ;; go32) basic_machine=i386-pc - os=go32 + basic_os=go32 ;; h8300hms) basic_machine=h8300-hitachi - os=hms + basic_os=hms ;; h8300xray) basic_machine=h8300-hitachi - os=xray + basic_os=xray ;; h8500hms) basic_machine=h8500-hitachi - os=hms + basic_os=hms ;; harris) basic_machine=m88k-harris - os=sysv3 + basic_os=sysv3 ;; - hp300) + hp300 | hp300hpux) basic_machine=m68k-hp + basic_os=hpux ;; hp300bsd) basic_machine=m68k-hp - os=bsd - ;; - hp300hpux) - basic_machine=m68k-hp - os=hpux + basic_os=bsd ;; hppaosf) basic_machine=hppa1.1-hp - os=osf + basic_os=osf ;; hppro) basic_machine=hppa1.1-hp - os=proelf + basic_os=proelf ;; i386mach) basic_machine=i386-mach - os=mach - ;; - vsta) - basic_machine=i386-pc - os=vsta + basic_os=mach ;; isi68 | isi) basic_machine=m68k-isi - os=sysv + basic_os=sysv ;; m68knommu) basic_machine=m68k-unknown - os=linux + basic_os=linux ;; magnum | m3230) basic_machine=mips-mips - os=sysv + basic_os=sysv ;; merlin) basic_machine=ns32k-utek - os=sysv + basic_os=sysv ;; mingw64) basic_machine=x86_64-pc - os=mingw64 + basic_os=mingw64 ;; mingw32) basic_machine=i686-pc - os=mingw32 + basic_os=mingw32 ;; mingw32ce) basic_machine=arm-unknown - os=mingw32ce + basic_os=mingw32ce ;; monitor) basic_machine=m68k-rom68k - os=coff + basic_os=coff ;; morphos) basic_machine=powerpc-unknown - os=morphos + basic_os=morphos ;; moxiebox) basic_machine=moxie-unknown - os=moxiebox + basic_os=moxiebox ;; msdos) basic_machine=i386-pc - os=msdos + basic_os=msdos ;; msys) basic_machine=i686-pc - os=msys + basic_os=msys ;; mvs) basic_machine=i370-ibm - os=mvs + basic_os=mvs ;; nacl) basic_machine=le32-unknown - os=nacl + basic_os=nacl ;; ncr3000) basic_machine=i486-ncr - os=sysv4 + basic_os=sysv4 ;; netbsd386) basic_machine=i386-pc - os=netbsd + basic_os=netbsd ;; netwinder) basic_machine=armv4l-rebel - os=linux + basic_os=linux ;; news | news700 | news800 | news900) basic_machine=m68k-sony - os=newsos + basic_os=newsos ;; news1000) basic_machine=m68030-sony - os=newsos + basic_os=newsos ;; necv70) basic_machine=v70-nec - os=sysv + basic_os=sysv ;; nh3000) basic_machine=m68k-harris - os=cxux + basic_os=cxux ;; nh[45]000) basic_machine=m88k-harris - os=cxux + basic_os=cxux ;; nindy960) basic_machine=i960-intel - os=nindy + basic_os=nindy ;; mon960) basic_machine=i960-intel - os=mon960 + basic_os=mon960 ;; nonstopux) basic_machine=mips-compaq - os=nonstopux + basic_os=nonstopux ;; os400) basic_machine=powerpc-ibm - os=os400 + basic_os=os400 ;; OSE68000 | ose68000) basic_machine=m68000-ericsson - os=ose + basic_os=ose ;; os68k) basic_machine=m68k-none - os=os68k + basic_os=os68k ;; paragon) basic_machine=i860-intel - os=osf + basic_os=osf ;; parisc) basic_machine=hppa-unknown - os=linux + basic_os=linux + ;; + psp) + basic_machine=mipsallegrexel-sony + basic_os=psp ;; pw32) basic_machine=i586-unknown - os=pw32 + basic_os=pw32 ;; rdos | rdos64) basic_machine=x86_64-pc - os=rdos + basic_os=rdos ;; rdos32) basic_machine=i386-pc - os=rdos + basic_os=rdos ;; rom68k) basic_machine=m68k-rom68k - os=coff + basic_os=coff ;; sa29200) basic_machine=a29k-amd - os=udi + basic_os=udi ;; sei) basic_machine=mips-sei - os=seiux + basic_os=seiux ;; sequent) basic_machine=i386-sequent - os= + basic_os= ;; sps7) basic_machine=m68k-bull - os=sysv2 + basic_os=sysv2 ;; st2000) basic_machine=m68k-tandem - os= + basic_os= ;; stratus) basic_machine=i860-stratus - os=sysv4 + basic_os=sysv4 ;; sun2) basic_machine=m68000-sun - os= + basic_os= ;; sun2os3) basic_machine=m68000-sun - os=sunos3 + basic_os=sunos3 ;; sun2os4) basic_machine=m68000-sun - os=sunos4 + basic_os=sunos4 ;; sun3) basic_machine=m68k-sun - os= + basic_os= ;; sun3os3) basic_machine=m68k-sun - os=sunos3 + basic_os=sunos3 ;; sun3os4) basic_machine=m68k-sun - os=sunos4 + basic_os=sunos4 ;; sun4) basic_machine=sparc-sun - os= + basic_os= ;; sun4os3) basic_machine=sparc-sun - os=sunos3 + basic_os=sunos3 ;; sun4os4) basic_machine=sparc-sun - os=sunos4 + basic_os=sunos4 ;; sun4sol2) basic_machine=sparc-sun - os=solaris2 + basic_os=solaris2 ;; sun386 | sun386i | roadrunner) basic_machine=i386-sun - os= + basic_os= ;; sv1) basic_machine=sv1-cray - os=unicos + basic_os=unicos ;; symmetry) basic_machine=i386-sequent - os=dynix + basic_os=dynix ;; t3e) basic_machine=alphaev5-cray - os=unicos + basic_os=unicos ;; t90) basic_machine=t90-cray - os=unicos + basic_os=unicos ;; toad1) basic_machine=pdp10-xkl - os=tops20 + basic_os=tops20 ;; tpf) basic_machine=s390x-ibm - os=tpf + basic_os=tpf ;; udi29k) basic_machine=a29k-amd - os=udi + basic_os=udi ;; ultra3) basic_machine=a29k-nyu - os=sym1 + basic_os=sym1 ;; v810 | necv810) basic_machine=v810-nec - os=none + basic_os=none ;; vaxv) basic_machine=vax-dec - os=sysv + basic_os=sysv ;; vms) basic_machine=vax-dec - os=vms + basic_os=vms + ;; + vsta) + basic_machine=i386-pc + basic_os=vsta ;; vxworks960) basic_machine=i960-wrs - os=vxworks + basic_os=vxworks ;; vxworks68) basic_machine=m68k-wrs - os=vxworks + basic_os=vxworks ;; vxworks29k) basic_machine=a29k-wrs - os=vxworks + basic_os=vxworks ;; xbox) basic_machine=i686-pc - os=mingw32 + basic_os=mingw32 ;; ymp) basic_machine=ymp-cray - os=unicos + basic_os=unicos ;; *) basic_machine=$1 - os= + basic_os= ;; esac ;; @@ -686,27 +755,38 @@ case $basic_machine in bluegene*) cpu=powerpc vendor=ibm - os=cnk + basic_os=cnk ;; decsystem10* | dec10*) cpu=pdp10 vendor=dec - os=tops10 + basic_os=tops10 ;; decsystem20* | dec20*) cpu=pdp10 vendor=dec - os=tops20 + basic_os=tops20 ;; - delta | 3300 | motorola-3300 | motorola-delta \ - | 3300-motorola | delta-motorola) + delta | 3300 | delta-motorola | 3300-motorola | motorola-delta | motorola-3300) cpu=m68k vendor=motorola ;; - dpx2*) + # This used to be dpx2*, but that gets the RS6000-based + # DPX/20 and the x86-based DPX/2-100 wrong. See + # https://oldskool.silicium.org/stations/bull_dpx20.htm + # https://www.feb-patrimoine.com/english/bull_dpx2.htm + # https://www.feb-patrimoine.com/english/unix_and_bull.htm + dpx2 | dpx2[23]00 | dpx2[23]xx) cpu=m68k vendor=bull - os=sysv3 + ;; + dpx2100 | dpx21xx) + cpu=i386 + vendor=bull + ;; + dpx20) + cpu=rs6000 + vendor=bull ;; encore | umax | mmax) cpu=ns32k @@ -715,7 +795,7 @@ case $basic_machine in elxsi) cpu=elxsi vendor=elxsi - os=${os:-bsd} + basic_os=${basic_os:-bsd} ;; fx2800) cpu=i860 @@ -728,7 +808,7 @@ case $basic_machine in h3050r* | hiux*) cpu=hppa1.1 vendor=hitachi - os=hiuxwe2 + basic_os=hiuxwe2 ;; hp3k9[0-9][0-9] | hp9[0-9][0-9]) cpu=hppa1.0 @@ -771,36 +851,36 @@ case $basic_machine in i*86v32) cpu=`echo "$1" | sed -e 's/86.*/86/'` vendor=pc - os=sysv32 + basic_os=sysv32 ;; i*86v4*) cpu=`echo "$1" | sed -e 's/86.*/86/'` vendor=pc - os=sysv4 + basic_os=sysv4 ;; i*86v) cpu=`echo "$1" | sed -e 's/86.*/86/'` vendor=pc - os=sysv + basic_os=sysv ;; i*86sol2) cpu=`echo "$1" | sed -e 's/86.*/86/'` vendor=pc - os=solaris2 + basic_os=solaris2 ;; j90 | j90-cray) cpu=j90 vendor=cray - os=${os:-unicos} + basic_os=${basic_os:-unicos} ;; iris | iris4d) cpu=mips vendor=sgi - case $os in + case $basic_os in irix*) ;; *) - os=irix4 + basic_os=irix4 ;; esac ;; @@ -811,28 +891,16 @@ case $basic_machine in *mint | mint[0-9]* | *MiNT | *MiNT[0-9]*) cpu=m68k vendor=atari - os=mint + basic_os=mint ;; news-3600 | risc-news) cpu=mips vendor=sony - os=newsos + basic_os=newsos ;; next | m*-next) cpu=m68k vendor=next - case $os in - openstep*) - ;; - nextstep*) - ;; - ns2*) - os=nextstep2 - ;; - *) - os=nextstep3 - ;; - esac ;; np1) cpu=np1 @@ -841,12 +909,12 @@ case $basic_machine in op50n-* | op60c-*) cpu=hppa1.1 vendor=oki - os=proelf + basic_os=proelf ;; pa-hitachi) cpu=hppa1.1 vendor=hitachi - os=hiuxwe2 + basic_os=hiuxwe2 ;; pbd) cpu=sparc @@ -883,12 +951,12 @@ case $basic_machine in sde) cpu=mipsisa32 vendor=sde - os=${os:-elf} + basic_os=${basic_os:-elf} ;; simso-wrs) cpu=sparclite vendor=wrs - os=vxworks + basic_os=vxworks ;; tower | tower-32) cpu=m68k @@ -905,7 +973,7 @@ case $basic_machine in w89k-*) cpu=hppa1.1 vendor=winbond - os=proelf + basic_os=proelf ;; none) cpu=none @@ -921,12 +989,13 @@ case $basic_machine in ;; *-*) - # shellcheck disable=SC2162 + saved_IFS=$IFS IFS="-" read cpu vendor <&2 + echo "Invalid configuration '$1': machine '$cpu-$vendor' not recognized" 1>&2 exit 1 ;; esac @@ -1278,8 +1491,53 @@ esac # Decode manufacturer-specific aliases for certain operating systems. -if [ x$os != x ] +if test x"$basic_os" != x then + +# First recognize some ad-hoc cases, or perhaps split kernel-os, or else just +# set os. +obj= +case $basic_os in + gnu/linux*) + kernel=linux + os=`echo "$basic_os" | sed -e 's|gnu/linux|gnu|'` + ;; + os2-emx) + kernel=os2 + os=`echo "$basic_os" | sed -e 's|os2-emx|emx|'` + ;; + nto-qnx*) + kernel=nto + os=`echo "$basic_os" | sed -e 's|nto-qnx|qnx|'` + ;; + *-*) + saved_IFS=$IFS + IFS="-" read kernel os <&2 - exit 1 + # No normalization, but not necessarily accepted, that comes below. ;; esac + else # Here we handle the default operating systems that come with various machines. @@ -1533,42 +1744,54 @@ else # will signal an error saying that MANUFACTURER isn't an operating # system, and we'll never get to this point. +kernel= +obj= case $cpu-$vendor in score-*) - os=elf + os= + obj=elf ;; spu-*) - os=elf + os= + obj=elf ;; *-acorn) os=riscix1.2 ;; arm*-rebel) - os=linux + kernel=linux + os=gnu ;; arm*-semi) - os=aout + os= + obj=aout ;; c4x-* | tic4x-*) - os=coff + os= + obj=coff ;; c8051-*) - os=elf + os= + obj=elf ;; clipper-intergraph) os=clix ;; hexagon-*) - os=elf + os= + obj=elf ;; tic54x-*) - os=coff + os= + obj=coff ;; tic55x-*) - os=coff + os= + obj=coff ;; tic6x-*) - os=coff + os= + obj=coff ;; # This must come before the *-dec entry. pdp10-*) @@ -1590,28 +1813,43 @@ case $cpu-$vendor in os=sunos3 ;; m68*-cisco) - os=aout + os= + obj=aout ;; mep-*) - os=elf + os= + obj=elf + ;; + # The -sgi and -siemens entries must be before the mips- entry + # or we get the wrong os. + *-sgi) + os=irix + ;; + *-siemens) + os=sysv4 ;; mips*-cisco) - os=elf + os= + obj=elf ;; - mips*-*) - os=elf + mips*-*|nanomips*-*) + os= + obj=elf ;; or32-*) - os=coff + os= + obj=coff ;; - *-tti) # must be before sparc entry or we get the wrong os. + # This must be before the sparc-* entry or we get the wrong os. + *-tti) os=sysv3 ;; sparc-* | *-sun) os=sunos4.1.1 ;; pru-*) - os=elf + os= + obj=elf ;; *-be) os=beos @@ -1635,7 +1873,7 @@ case $cpu-$vendor in os=hpux ;; *-hitachi) - os=hiux + os=hiuxwe2 ;; i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent) os=sysv @@ -1679,12 +1917,6 @@ case $cpu-$vendor in *-encore) os=bsd ;; - *-sgi) - os=irix - ;; - *-siemens) - os=sysv4 - ;; *-masscomp) os=rtu ;; @@ -1692,10 +1924,12 @@ case $cpu-$vendor in os=uxpv ;; *-rom68k) - os=coff + os= + obj=coff ;; *-*bug) - os=coff + os= + obj=coff ;; *-apple) os=macos @@ -1710,84 +1944,406 @@ case $cpu-$vendor in os=none ;; esac + fi +# Now, validate our (potentially fixed-up) individual pieces (OS, OBJ). + +case $os in + # Sometimes we do "kernel-libc", so those need to count as OSes. + llvm* | musl* | newlib* | relibc* | uclibc*) + ;; + # Likewise for "kernel-abi" + eabi* | gnueabi*) + ;; + # VxWorks passes extra cpu info in the 4th filed. + simlinux | simwindows | spe) + ;; + # See `case $cpu-$os` validation below + ghcjs) + ;; + # Now accept the basic system types. + # Each alternative MUST end in a * to match a version number. + abug \ + | aix* \ + | amdhsa* \ + | amigados* \ + | amigaos* \ + | android* \ + | aof* \ + | aos* \ + | aros* \ + | atheos* \ + | auroraux* \ + | aux* \ + | beos* \ + | bitrig* \ + | bme* \ + | bosx* \ + | bsd* \ + | cegcc* \ + | chorusos* \ + | chorusrdb* \ + | clix* \ + | cloudabi* \ + | cnk* \ + | conix* \ + | cos* \ + | cxux* \ + | cygwin* \ + | darwin* \ + | dgux* \ + | dicos* \ + | dnix* \ + | domain* \ + | dragonfly* \ + | drops* \ + | ebmon* \ + | ecoff* \ + | ekkobsd* \ + | emscripten* \ + | emx* \ + | es* \ + | fiwix* \ + | freebsd* \ + | fuchsia* \ + | genix* \ + | genode* \ + | glidix* \ + | gnu* \ + | go32* \ + | haiku* \ + | hcos* \ + | hiux* \ + | hms* \ + | hpux* \ + | ieee* \ + | interix* \ + | ios* \ + | iris* \ + | irix* \ + | ironclad* \ + | isc* \ + | its* \ + | l4re* \ + | libertybsd* \ + | lites* \ + | lnews* \ + | luna* \ + | lynxos* \ + | mach* \ + | macos* \ + | magic* \ + | mbr* \ + | midipix* \ + | midnightbsd* \ + | mingw32* \ + | mingw64* \ + | minix* \ + | mint* \ + | mirbsd* \ + | mks* \ + | mlibc* \ + | mmixware* \ + | mon960* \ + | morphos* \ + | moss* \ + | moxiebox* \ + | mpeix* \ + | mpw* \ + | msdos* \ + | msys* \ + | mvs* \ + | nacl* \ + | netbsd* \ + | netware* \ + | newsos* \ + | nextstep* \ + | nindy* \ + | nonstopux* \ + | nova* \ + | nsk* \ + | nucleus* \ + | nx6 \ + | nx7 \ + | oabi* \ + | ohos* \ + | onefs* \ + | openbsd* \ + | openedition* \ + | openstep* \ + | os108* \ + | os2* \ + | os400* \ + | os68k* \ + | os9* \ + | ose* \ + | osf* \ + | oskit* \ + | osx* \ + | palmos* \ + | phoenix* \ + | plan9* \ + | powermax* \ + | powerunix* \ + | proelf* \ + | psos* \ + | psp* \ + | ptx* \ + | pw32* \ + | qnx* \ + | rdos* \ + | redox* \ + | rhapsody* \ + | riscix* \ + | riscos* \ + | rtems* \ + | rtmk* \ + | rtu* \ + | scout* \ + | secbsd* \ + | sei* \ + | serenity* \ + | sim* \ + | skyos* \ + | solaris* \ + | solidbsd* \ + | sortix* \ + | storm-chaos* \ + | sunos \ + | sunos[34]* \ + | superux* \ + | syllable* \ + | sym* \ + | sysv* \ + | tenex* \ + | tirtos* \ + | toppers* \ + | tops10* \ + | tops20* \ + | tpf* \ + | tvos* \ + | twizzler* \ + | uclinux* \ + | udi* \ + | udk* \ + | ultrix* \ + | unicos* \ + | uniplus* \ + | unleashed* \ + | unos* \ + | uwin* \ + | uxpv* \ + | v88r* \ + |*vms* \ + | vos* \ + | vsta* \ + | vxsim* \ + | vxworks* \ + | wasi* \ + | watchos* \ + | wince* \ + | windiss* \ + | windows* \ + | winnt* \ + | xenix* \ + | xray* \ + | zephyr* \ + | zvmoe* ) + ;; + # This one is extra strict with allowed versions + sco3.2v2 | sco3.2v[4-9]* | sco5v6*) + # Don't forget version if it is 3.2v4 or newer. + ;; + # This refers to builds using the UEFI calling convention + # (which depends on the architecture) and PE file format. + # Note that this is both a different calling convention and + # different file format than that of GNU-EFI + # (x86_64-w64-mingw32). + uefi) + ;; + none) + ;; + kernel* | msvc* ) + # Restricted further below + ;; + '') + if test x"$obj" = x + then + echo "Invalid configuration '$1': Blank OS only allowed with explicit machine code file format" 1>&2 + fi + ;; + *) + echo "Invalid configuration '$1': OS '$os' not recognized" 1>&2 + exit 1 + ;; +esac + +case $obj in + aout* | coff* | elf* | pe*) + ;; + '') + # empty is fine + ;; + *) + echo "Invalid configuration '$1': Machine code format '$obj' not recognized" 1>&2 + exit 1 + ;; +esac + +# Here we handle the constraint that a (synthetic) cpu and os are +# valid only in combination with each other and nowhere else. +case $cpu-$os in + # The "javascript-unknown-ghcjs" triple is used by GHC; we + # accept it here in order to tolerate that, but reject any + # variations. + javascript-ghcjs) + ;; + javascript-* | *-ghcjs) + echo "Invalid configuration '$1': cpu '$cpu' is not valid with os '$os$obj'" 1>&2 + exit 1 + ;; +esac + +# As a final step for OS-related things, validate the OS-kernel combination +# (given a valid OS), if there is a kernel. +case $kernel-$os-$obj in + linux-gnu*- | linux-android*- | linux-dietlibc*- | linux-llvm*- \ + | linux-mlibc*- | linux-musl*- | linux-newlib*- \ + | linux-relibc*- | linux-uclibc*- | linux-ohos*- ) + ;; + uclinux-uclibc*- | uclinux-gnu*- ) + ;; + managarm-mlibc*- | managarm-kernel*- ) + ;; + windows*-msvc*-) + ;; + -dietlibc*- | -llvm*- | -mlibc*- | -musl*- | -newlib*- | -relibc*- \ + | -uclibc*- ) + # These are just libc implementations, not actual OSes, and thus + # require a kernel. + echo "Invalid configuration '$1': libc '$os' needs explicit kernel." 1>&2 + exit 1 + ;; + -kernel*- ) + echo "Invalid configuration '$1': '$os' needs explicit kernel." 1>&2 + exit 1 + ;; + *-kernel*- ) + echo "Invalid configuration '$1': '$kernel' does not support '$os'." 1>&2 + exit 1 + ;; + *-msvc*- ) + echo "Invalid configuration '$1': '$os' needs 'windows'." 1>&2 + exit 1 + ;; + kfreebsd*-gnu*- | knetbsd*-gnu*- | netbsd*-gnu*- | kopensolaris*-gnu*-) + ;; + vxworks-simlinux- | vxworks-simwindows- | vxworks-spe-) + ;; + nto-qnx*-) + ;; + os2-emx-) + ;; + rtmk-nova-) + ;; + *-eabi*- | *-gnueabi*-) + ;; + none--*) + # None (no kernel, i.e. freestanding / bare metal), + # can be paired with an machine code file format + ;; + -*-) + # Blank kernel with real OS is always fine. + ;; + --*) + # Blank kernel and OS with real machine code file format is always fine. + ;; + *-*-*) + echo "Invalid configuration '$1': Kernel '$kernel' not known to work with OS '$os'." 1>&2 + exit 1 + ;; +esac + # Here we handle the case where we know the os, and the CPU type, but not the # manufacturer. We pick the logical manufacturer. case $vendor in unknown) - case $os in - riscix*) + case $cpu-$os in + *-riscix*) vendor=acorn ;; - sunos*) + *-sunos* | *-solaris*) vendor=sun ;; - cnk*|-aix*) + *-cnk* | *-aix*) vendor=ibm ;; - beos*) + *-beos*) vendor=be ;; - hpux*) + *-hpux*) vendor=hp ;; - mpeix*) + *-mpeix*) vendor=hp ;; - hiux*) + *-hiux*) vendor=hitachi ;; - unos*) + *-unos*) vendor=crds ;; - dgux*) + *-dgux*) vendor=dg ;; - luna*) + *-luna*) vendor=omron ;; - genix*) + *-genix*) vendor=ns ;; - clix*) + *-clix*) vendor=intergraph ;; - mvs* | opened*) + *-mvs* | *-opened*) + vendor=ibm + ;; + *-os400*) vendor=ibm ;; - os400*) + s390-* | s390x-*) vendor=ibm ;; - ptx*) + *-ptx*) vendor=sequent ;; - tpf*) + *-tpf*) vendor=ibm ;; - vxsim* | vxworks* | windiss*) + *-vxsim* | *-vxworks* | *-windiss*) vendor=wrs ;; - aux*) + *-aux*) vendor=apple ;; - hms*) + *-hms*) vendor=hitachi ;; - mpw* | macos*) + *-mpw* | *-macos*) vendor=apple ;; - *mint | mint[0-9]* | *MiNT | MiNT[0-9]*) + *-*mint | *-mint[0-9]* | *-*MiNT | *-MiNT[0-9]*) vendor=atari ;; - vos*) + *-vos*) vendor=stratus ;; esac ;; esac -echo "$cpu-$vendor-$os" +echo "$cpu-$vendor${kernel:+-$kernel}${os:+-$os}${obj:+-$obj}" exit # Local variables: diff --git a/autosetup/autosetup-find-tclsh b/autosetup/autosetup-find-tclsh new file mode 100755 index 0000000000..9f6d6e9402 --- /dev/null +++ b/autosetup/autosetup-find-tclsh @@ -0,0 +1,16 @@ +#!/bin/sh +# Looks for a suitable tclsh or jimsh in the PATH +# If not found, builds a bootstrap jimsh in current dir from source +# Prefer $autosetup_tclsh if is set in the environment (unless ./jimsh0 works) +# If an argument is given, use that as the test instead of autosetup-test-tclsh +d="`dirname "$0"`" +for tclsh in ./jimsh0 $autosetup_tclsh jimsh tclsh tclsh8.5 tclsh8.6 tclsh8.7; do + { $tclsh "$d/${1-autosetup-test-tclsh}"; } 2>/dev/null && exit 0 +done +echo 1>&2 "No installed jimsh or tclsh, building local bootstrap jimsh0" +for cc in ${CC_FOR_BUILD:-cc} gcc; do + { $cc -o jimsh0 "$d/jimsh0.c"; } 2>/dev/null >/dev/null || continue + ./jimsh0 "$d/${1-autosetup-test-tclsh}" && exit 0 +done +echo 1>&2 "No working C compiler found. Tried ${CC_FOR_BUILD:-cc} and gcc." +echo false diff --git a/autosetup/autosetup-test-tclsh b/autosetup/autosetup-test-tclsh new file mode 100644 index 0000000000..75126d2444 --- /dev/null +++ b/autosetup/autosetup-test-tclsh @@ -0,0 +1,20 @@ +# A small Tcl script to verify that the chosen +# interpreter works. Sometimes we might e.g. pick up +# an interpreter for a different arch. +# Outputs the full path to the interpreter + +if {[catch {info version} version] == 0} { + # This is Jim Tcl + if {$version >= 0.72} { + # Ensure that regexp works + regexp (a.*?) a + puts [info nameofexecutable] + exit 0 + } +} elseif {[catch {info tclversion} version] == 0} { + if {$version >= 8.5 && ![string match 8.5a* [info patchlevel]]} { + puts [info nameofexecutable] + exit 0 + } +} +exit 1 diff --git a/autosetup/cc-db.tcl b/autosetup/cc-db.tcl new file mode 100644 index 0000000000..12f1aed2c9 --- /dev/null +++ b/autosetup/cc-db.tcl @@ -0,0 +1,15 @@ +# Copyright (c) 2011 WorkWare Systems http://www.workware.net.au/ +# All rights reserved + +# @synopsis: +# +# The 'cc-db' module provides a knowledge-base of system idiosyncrasies. +# In general, this module can always be included. + +use cc + +options {} + +# openbsd needs sys/types.h to detect some system headers +cc-include-needs sys/socket.h sys/types.h +cc-include-needs netinet/in.h sys/types.h diff --git a/autosetup/cc-lib.tcl b/autosetup/cc-lib.tcl new file mode 100644 index 0000000000..01a0fb3877 --- /dev/null +++ b/autosetup/cc-lib.tcl @@ -0,0 +1,187 @@ +# Copyright (c) 2011 WorkWare Systems http://www.workware.net.au/ +# All rights reserved + +# @synopsis: +# +# Provides a library of common tests on top of the 'cc' module. + +use cc + +# @cc-check-lfs +# +# The equivalent of the 'AC_SYS_LARGEFILE' macro. +# +# defines 'HAVE_LFS' if LFS is available, +# and defines '_FILE_OFFSET_BITS=64' if necessary +# +# Returns 1 if 'LFS' is available or 0 otherwise +# +proc cc-check-lfs {} { + cc-check-includes sys/types.h + msg-checking "Checking if -D_FILE_OFFSET_BITS=64 is needed..." + set lfs 1 + if {[msg-quiet cc-with {-includes sys/types.h} {cc-check-sizeof off_t}] == 8} { + msg-result no + } elseif {[msg-quiet cc-with {-includes sys/types.h -cflags -D_FILE_OFFSET_BITS=64} {cc-check-sizeof off_t}] == 8} { + define _FILE_OFFSET_BITS 64 + msg-result yes + } else { + set lfs 0 + msg-result none + } + define-feature lfs $lfs + return $lfs +} + +# @cc-check-endian +# +# The equivalent of the 'AC_C_BIGENDIAN' macro. +# +# defines 'HAVE_BIG_ENDIAN' if endian is known to be big, +# or 'HAVE_LITTLE_ENDIAN' if endian is known to be little. +# +# Returns 1 if determined, or 0 if not. +# +proc cc-check-endian {} { + cc-check-includes sys/types.h sys/param.h + set rc 0 + msg-checking "Checking endian..." + cc-with {-includes {sys/types.h sys/param.h}} { + if {[cctest -code { + #if !defined(BIG_ENDIAN) || !defined(BYTE_ORDER) + #error unknown + #elif BYTE_ORDER != BIG_ENDIAN + #error little + #endif + }]} { + define-feature big-endian + msg-result "big" + set rc 1 + } elseif {[cctest -code { + #if !defined(LITTLE_ENDIAN) || !defined(BYTE_ORDER) + #error unknown + #elif BYTE_ORDER != LITTLE_ENDIAN + #error big + #endif + }]} { + define-feature little-endian + msg-result "little" + set rc 1 + } else { + msg-result "unknown" + } + } + return $rc +} + +# @cc-check-flags flag ?...? +# +# Checks whether the given C/C++ compiler flags can be used. Defines feature +# names prefixed with 'HAVE_CFLAG' and 'HAVE_CXXFLAG' respectively, and +# appends working flags to '-cflags' and 'AS_CFLAGS' or 'AS_CXXFLAGS'. +proc cc-check-flags {args} { + set result 1 + array set opts [cc-get-settings] + switch -exact -- $opts(-lang) { + c++ { + set lang C++ + set prefix CXXFLAG + } + c { + set lang C + set prefix CFLAG + } + default { + autosetup-error "cc-check-flags failed with unknown language: $opts(-lang)" + } + } + foreach flag $args { + msg-checking "Checking whether the $lang compiler accepts $flag..." + if {[cctest -cflags $flag]} { + msg-result yes + define-feature $prefix$flag + cc-with [list -cflags [list $flag]] + define-append AS_${prefix}S $flag + } else { + msg-result no + set result 0 + } + } + return $result +} + +# @cc-check-standards ver ?...? +# +# Checks whether the C/C++ compiler accepts one of the specified '-std=$ver' +# options, and appends the first working one to '-cflags' and 'AS_CFLAGS' or +# 'AS_CXXFLAGS'. +proc cc-check-standards {args} { + array set opts [cc-get-settings] + foreach std $args { + if {[cc-check-flags -std=$std]} { + return $std + } + } + return "" +} + +# Checks whether $keyword is usable as alignof +proc cctest_alignof {keyword} { + msg-checking "Checking for $keyword..." + if {[cctest -code "int x = ${keyword}(char), y = ${keyword}('x');"]} then { + msg-result ok + define-feature $keyword + } else { + msg-result "not found" + } +} + +# @cc-check-c11 +# +# Checks for several C11/C++11 extensions and their alternatives. Currently +# checks for '_Static_assert', '_Alignof', '__alignof__', '__alignof'. +proc cc-check-c11 {} { + msg-checking "Checking for _Static_assert..." + if {[cctest -code { + _Static_assert(1, "static assertions are available"); + }]} then { + msg-result ok + define-feature _Static_assert + } else { + msg-result "not found" + } + + cctest_alignof _Alignof + cctest_alignof __alignof__ + cctest_alignof __alignof +} + +# @cc-check-alloca +# +# The equivalent of the 'AC_FUNC_ALLOCA' macro. +# +# Checks for the existence of 'alloca' +# defines 'HAVE_ALLOCA' and returns 1 if it exists. +proc cc-check-alloca {} { + cc-check-some-feature alloca { + cctest -includes alloca.h -code { alloca (2 * sizeof (int)); } + } +} + +# @cc-signal-return-type +# +# The equivalent of the 'AC_TYPE_SIGNAL' macro. +# +# defines 'RETSIGTYPE' to 'int' or 'void'. +proc cc-signal-return-type {} { + msg-checking "Checking return type of signal handlers..." + cc-with {-includes {sys/types.h signal.h}} { + if {[cctest -code {return *(signal (0, 0)) (0) == 1;}]} { + set type int + } else { + set type void + } + define RETSIGTYPE $type + msg-result $type + } +} diff --git a/autosetup/cc-shared.tcl b/autosetup/cc-shared.tcl new file mode 100644 index 0000000000..1fa200eec1 --- /dev/null +++ b/autosetup/cc-shared.tcl @@ -0,0 +1,115 @@ +# Copyright (c) 2010 WorkWare Systems http://www.workware.net.au/ +# All rights reserved + +# @synopsis: +# +# The 'cc-shared' module provides support for shared libraries and shared objects. +# It defines the following variables: +# +## SH_CFLAGS Flags to use compiling sources destined for a shared library +## SH_LDFLAGS Flags to use linking (creating) a shared library +## SH_SOPREFIX Prefix to use to set the soname when creating a shared library +## SH_SOFULLPATH Set to 1 if the shared library soname should include the full install path +## SH_SOEXT Extension for shared libs +## SH_SOEXTVER Format for versioned shared libs - %s = version +## SHOBJ_CFLAGS Flags to use compiling sources destined for a shared object +## SHOBJ_LDFLAGS Flags to use linking a shared object, undefined symbols allowed +## SHOBJ_LDFLAGS_R - as above, but all symbols must be resolved +## SH_LINKRPATH Format for setting the rpath when linking an executable, %s = path +## SH_LINKFLAGS Flags to use linking an executable which will load shared objects +## LD_LIBRARY_PATH Environment variable which specifies path to shared libraries +## STRIPLIBFLAGS Arguments to strip a dynamic library + +options {} + +# Defaults: gcc on unix +define SHOBJ_CFLAGS -fPIC +define SHOBJ_LDFLAGS -shared +define SH_CFLAGS -fPIC +define SH_LDFLAGS -shared +define SH_LINKFLAGS -rdynamic +define SH_LINKRPATH "-Wl,-rpath -Wl,%s" +define SH_SOEXT .so +define SH_SOEXTVER .so.%s +define SH_SOPREFIX -Wl,-soname, +define LD_LIBRARY_PATH LD_LIBRARY_PATH +define STRIPLIBFLAGS --strip-unneeded + +# Note: This is a helpful reference for identifying the toolchain +# http://sourceforge.net/apps/mediawiki/predef/index.php?title=Compilers + +switch -glob -- [get-define host] { + *-*-darwin* { + define SHOBJ_CFLAGS "-dynamic -fno-common" + define SHOBJ_LDFLAGS "-bundle -undefined dynamic_lookup" + define SHOBJ_LDFLAGS_R -bundle + define SH_CFLAGS -dynamic + define SH_LDFLAGS -dynamiclib + define SH_LINKFLAGS "" + define SH_SOEXT .dylib + define SH_SOEXTVER .%s.dylib + define SH_SOPREFIX -Wl,-install_name, + define SH_SOFULLPATH + define LD_LIBRARY_PATH DYLD_LIBRARY_PATH + define STRIPLIBFLAGS -x + } + *-*-ming* - *-*-cygwin - *-*-msys { + define SHOBJ_CFLAGS "" + define SHOBJ_LDFLAGS -shared + define SH_CFLAGS "" + define SH_LDFLAGS -shared + define SH_LINKRPATH "" + define SH_LINKFLAGS "" + define SH_SOEXT .dll + define SH_SOEXTVER .dll + define SH_SOPREFIX "" + define LD_LIBRARY_PATH PATH + } + sparc* { + if {[msg-quiet cc-check-decls __SUNPRO_C]} { + msg-result "Found sun stdio compiler" + # sun stdio compiler + # XXX: These haven't been fully tested. + define SHOBJ_CFLAGS -KPIC + define SHOBJ_LDFLAGS "-G" + define SH_CFLAGS -KPIC + define SH_LINKFLAGS -Wl,-export-dynamic + define SH_SOPREFIX -Wl,-h, + } + } + *-*-solaris* { + if {[msg-quiet cc-check-decls __SUNPRO_C]} { + msg-result "Found sun stdio compiler" + # sun stdio compiler + # XXX: These haven't been fully tested. + define SHOBJ_CFLAGS -KPIC + define SHOBJ_LDFLAGS "-G" + define SH_CFLAGS -KPIC + define SH_LINKFLAGS -Wl,-export-dynamic + define SH_SOPREFIX -Wl,-h, + } + } + *-*-hpux* { + define SHOBJ_CFLAGS +z + define SHOBJ_LDFLAGS -b + define SH_CFLAGS +z + define SH_LDFLAGS -b + define SH_LINKFLAGS -Wl,+s + define SH_LINKRPATH "-Wl,+b -Wl,%s" + define SH_SOPREFIX -Wl,+h, + define STRIPLIBFLAGS -Wl,-s + } + *-*-haiku { + define SHOBJ_CFLAGS "" + define SHOBJ_LDFLAGS -shared + define SH_CFLAGS "" + define SH_LDFLAGS -shared + define SH_LINKFLAGS "" + define SH_SOPREFIX "" + define LD_LIBRARY_PATH LIBRARY_PATH + } +} + +if {![is-defined SHOBJ_LDFLAGS_R]} { + define SHOBJ_LDFLAGS_R [get-define SHOBJ_LDFLAGS] +} diff --git a/autosetup/cc.tcl b/autosetup/cc.tcl new file mode 100644 index 0000000000..05c1b1cf40 --- /dev/null +++ b/autosetup/cc.tcl @@ -0,0 +1,758 @@ +# Copyright (c) 2010 WorkWare Systems http://www.workware.net.au/ +# All rights reserved + +# @synopsis: +# +# The 'cc' module supports checking various 'features' of the C or C++ +# compiler/linker environment. Common commands are 'cc-check-includes', +# 'cc-check-types', 'cc-check-functions', 'cc-with' and 'make-config-header' +# +# The following environment variables are used if set: +# +## CC - C compiler +## CXX - C++ compiler +## CPP - C preprocessor +## CCACHE - Set to "none" to disable automatic use of ccache +## CPPFLAGS - Additional C preprocessor compiler flags (C and C++), before CFLAGS, CXXFLAGS +## CFLAGS - Additional C compiler flags +## CXXFLAGS - Additional C++ compiler flags +## LDFLAGS - Additional compiler flags during linking +## LINKFLAGS - ?How is this different from LDFLAGS? +## LIBS - Additional libraries to use (for all tests) +## CROSS - Tool prefix for cross compilation +# +# The following variables are defined from the corresponding +# environment variables if set. +# +## CC_FOR_BUILD +## LD + +use system + +options {} + +# Checks for the existence of the given function by linking +# +proc cctest_function {function} { + cctest -link 1 -declare "extern void $function\(void);" -code "$function\();" +} + +# Checks for the existence of the given type by compiling +proc cctest_type {type} { + cctest -code "$type _x;" +} + +# Checks for the existence of the given type/structure member. +# e.g. "struct stat.st_mtime" +proc cctest_member {struct_member} { + # split at the first dot + regexp {^([^.]+)[.](.*)$} $struct_member -> struct member + cctest -code "static $struct _s; return sizeof(_s.$member);" +} + +# Checks for the existence of the given define by compiling +# +proc cctest_define {name} { + cctest -code "#ifndef $name\n#error not defined\n#endif" +} + +# Checks for the existence of the given name either as +# a macro (#define) or an rvalue (such as an enum) +# +proc cctest_decl {name} { + cctest -code "#ifndef $name\n(void)$name;\n#endif" +} + +# @cc-check-sizeof type ... +# +# Checks the size of the given types (between 1 and 32, inclusive). +# Defines a variable with the size determined, or 'unknown' otherwise. +# e.g. for type 'long long', defines 'SIZEOF_LONG_LONG'. +# Returns the size of the last type. +# +proc cc-check-sizeof {args} { + foreach type $args { + msg-checking "Checking for sizeof $type..." + set size unknown + # Try the most common sizes first + foreach i {4 8 1 2 16 32} { + if {[cctest -code "static int _x\[sizeof($type) == $i ? 1 : -1\] = { 1 };"]} { + set size $i + break + } + } + msg-result $size + set define [feature-define-name $type SIZEOF_] + define $define $size + } + # Return the last result + get-define $define +} + +# Checks for each feature in $list by using the given script. +# +# When the script is evaluated, $each is set to the feature +# being checked, and $extra is set to any additional cctest args. +# +# Returns 1 if all features were found, or 0 otherwise. +proc cc-check-some-feature {list script} { + set ret 1 + foreach each $list { + if {![check-feature $each $script]} { + set ret 0 + } + } + return $ret +} + +# @cc-check-includes includes ... +# +# Checks that the given include files can be used. +proc cc-check-includes {args} { + cc-check-some-feature $args { + set with {} + if {[dict exists $::autosetup(cc-include-deps) $each]} { + set deps [dict keys [dict get $::autosetup(cc-include-deps) $each]] + msg-quiet cc-check-includes {*}$deps + foreach i $deps { + if {[have-feature $i]} { + lappend with $i + } + } + } + if {[llength $with]} { + cc-with [list -includes $with] { + cctest -includes $each + } + } else { + cctest -includes $each + } + } +} + +# @cc-include-needs include required ... +# +# Ensures that when checking for '$include', a check is first +# made for each '$required' file, and if found, it is included with '#include'. +proc cc-include-needs {file args} { + foreach depfile $args { + dict set ::autosetup(cc-include-deps) $file $depfile 1 + } +} + +# @cc-check-types type ... +# +# Checks that the types exist. +proc cc-check-types {args} { + cc-check-some-feature $args { + cctest_type $each + } +} + +# @cc-check-defines define ... +# +# Checks that the given preprocessor symbols are defined. +proc cc-check-defines {args} { + cc-check-some-feature $args { + cctest_define $each + } +} + +# @cc-check-decls name ... +# +# Checks that each given name is either a preprocessor symbol or rvalue +# such as an enum. Note that the define used is 'HAVE_DECL_xxx' +# rather than 'HAVE_xxx'. +proc cc-check-decls {args} { + set ret 1 + foreach name $args { + msg-checking "Checking for $name..." + set r [cctest_decl $name] + define-feature "decl $name" $r + if {$r} { + msg-result "ok" + } else { + msg-result "not found" + set ret 0 + } + } + return $ret +} + +# @cc-check-functions function ... +# +# Checks that the given functions exist (can be linked). +proc cc-check-functions {args} { + cc-check-some-feature $args { + cctest_function $each + } +} + +# @cc-check-members type.member ... +# +# Checks that the given type/structure members exist. +# A structure member is of the form 'struct stat.st_mtime'. +proc cc-check-members {args} { + cc-check-some-feature $args { + cctest_member $each + } +} + +# @cc-check-function-in-lib function libs ?otherlibs? +# +# Checks that the given function can be found in one of the libs. +# +# First checks for no library required, then checks each of the libraries +# in turn. +# +# If the function is found, the feature is defined and 'lib_$function' is defined +# to '-l$lib' where the function was found, or "" if no library required. +# In addition, '-l$lib' is prepended to the 'LIBS' define. +# +# If additional libraries may be needed for linking, they should be specified +# with '$extralibs' as '-lotherlib1 -lotherlib2'. +# These libraries are not automatically added to 'LIBS'. +# +# Returns 1 if found or 0 if not. +# +proc cc-check-function-in-lib {function libs {otherlibs {}}} { + msg-checking "Checking libs for $function..." + set found 0 + cc-with [list -libs $otherlibs] { + if {[cctest_function $function]} { + msg-result "none needed" + define lib_$function "" + incr found + } else { + foreach lib $libs { + cc-with [list -libs -l$lib] { + if {[cctest_function $function]} { + msg-result -l$lib + define lib_$function -l$lib + # prepend to LIBS + define LIBS "-l$lib [get-define LIBS]" + incr found + break + } + } + } + } + } + define-feature $function $found + if {!$found} { + msg-result "no" + } + return $found +} + +# @cc-check-tools tool ... +# +# Checks for existence of the given compiler tools, taking +# into account any cross compilation prefix. +# +# For example, when checking for 'ar', first 'AR' is checked on the command +# line and then in the environment. If not found, '${host}-ar' or +# simply 'ar' is assumed depending upon whether cross compiling. +# The path is searched for this executable, and if found 'AR' is defined +# to the executable name. +# Note that even when cross compiling, the simple 'ar' is used as a fallback, +# but a warning is generated. This is necessary for some toolchains. +# +# It is an error if the executable is not found. +# +proc cc-check-tools {args} { + foreach tool $args { + set TOOL [string toupper $tool] + set exe [get-env $TOOL [get-define cross]$tool] + if {[find-executable $exe]} { + define $TOOL $exe + continue + } + if {[find-executable $tool]} { + msg-result "Warning: Failed to find $exe, falling back to $tool which may be incorrect" + define $TOOL $tool + continue + } + user-error "Failed to find $exe" + } +} + +# @cc-check-progs prog ... +# +# Checks for existence of the given executables on the path. +# +# For example, when checking for 'grep', the path is searched for +# the executable, 'grep', and if found 'GREP' is defined as 'grep'. +# +# If the executable is not found, the variable is defined as 'false'. +# Returns 1 if all programs were found, or 0 otherwise. +# +proc cc-check-progs {args} { + set failed 0 + foreach prog $args { + set PROG [string toupper $prog] + msg-checking "Checking for $prog..." + if {![find-executable $prog]} { + msg-result no + define $PROG false + incr failed + } else { + msg-result ok + define $PROG $prog + } + } + expr {!$failed} +} + +# @cc-path-progs prog ... +# +# Like cc-check-progs, but sets the define to the full path rather +# than just the program name. +# +proc cc-path-progs {args} { + set failed 0 + foreach prog $args { + set PROG [string toupper $prog] + msg-checking "Checking for $prog..." + set path [find-executable-path $prog] + if {$path eq ""} { + msg-result no + define $PROG false + incr failed + } else { + msg-result $path + define $PROG $path + } + } + expr {!$failed} +} + +# Adds the given settings to $::autosetup(ccsettings) and +# returns the old settings. +# +proc cc-add-settings {settings} { + if {[llength $settings] % 2} { + autosetup-error "settings list is missing a value: $settings" + } + + set prev [cc-get-settings] + # workaround a bug in some versions of jimsh by forcing + # conversion of $prev to a list + llength $prev + + array set new $prev + + foreach {name value} $settings { + switch -exact -- $name { + -cflags - -includes { + # These are given as lists + lappend new($name) {*}[list-non-empty $value] + } + -declare { + lappend new($name) $value + } + -libs { + # Note that new libraries are added before previous libraries + set new($name) [list {*}[list-non-empty $value] {*}$new($name)] + } + -link - -lang - -nooutput { + set new($name) $value + } + -source - -sourcefile - -code { + # XXX: These probably are only valid directly from cctest + set new($name) $value + } + default { + autosetup-error "unknown cctest setting: $name" + } + } + } + + cc-store-settings [array get new] + + return $prev +} + +proc cc-store-settings {new} { + set ::autosetup(ccsettings) $new +} + +proc cc-get-settings {} { + return $::autosetup(ccsettings) +} + +# Similar to cc-add-settings, but each given setting +# simply replaces the existing value. +# +# Returns the previous settings +proc cc-update-settings {args} { + set prev [cc-get-settings] + cc-store-settings [dict merge $prev $args] + return $prev +} + +# @cc-with settings ?{ script }? +# +# Sets the given 'cctest' settings and then runs the tests in '$script'. +# Note that settings such as '-lang' replace the current setting, while +# those such as '-includes' are appended to the existing setting. +# +# If no script is given, the settings become the default for the remainder +# of the 'auto.def' file. +# +## cc-with {-lang c++} { +## # This will check with the C++ compiler +## cc-check-types bool +## cc-with {-includes signal.h} { +## # This will check with the C++ compiler, signal.h and any existing includes. +## ... +## } +## # back to just the C++ compiler +## } +# +# The '-libs' setting is special in that newer values are added *before* earlier ones. +# +## cc-with {-libs {-lc -lm}} { +## cc-with {-libs -ldl} { +## cctest -libs -lsocket ... +## # libs will be in this order: -lsocket -ldl -lc -lm +## } +## } +# +# If you wish to invoke something like cc-check-flags but not have -cflags updated, +# use the following idiom: +# +## cc-with {} { +## cc-check-flags ... +## } +proc cc-with {settings args} { + if {[llength $args] == 0} { + cc-add-settings $settings + } elseif {[llength $args] > 1} { + autosetup-error "usage: cc-with settings ?script?" + } else { + set save [cc-add-settings $settings] + set rc [catch {uplevel 1 [lindex $args 0]} result info] + cc-store-settings $save + if {$rc != 0} { + return -code [dict get $info -code] $result + } + return $result + } +} + +# @cctest ?settings? +# +# Low level C/C++ compiler checker. Compiles and or links a small C program +# according to the arguments and returns 1 if OK, or 0 if not. +# +# Supported settings are: +# +## -cflags cflags A list of flags to pass to the compiler +## -includes list A list of includes, e.g. {stdlib.h stdio.h} +## -declare code Code to declare before main() +## -link 1 Don't just compile, link too +## -lang c|c++ Use the C (default) or C++ compiler +## -libs liblist List of libraries to link, e.g. {-ldl -lm} +## -code code Code to compile in the body of main() +## -source code Compile a complete program. Ignore -includes, -declare and -code +## -sourcefile file Shorthand for -source [readfile [get-define srcdir]/$file] +## -nooutput 1 Treat any compiler output (e.g. a warning) as an error +# +# Unless '-source' or '-sourcefile' is specified, the C program looks like: +# +## #include /* same for remaining includes in the list */ +## declare-code /* any code in -declare, verbatim */ +## int main(void) { +## code /* any code in -code, verbatim */ +## return 0; +## } +# +# And the command line looks like: +# +## CC -cflags CFLAGS CPPFLAGS conftest.c -o conftest.o +## CXX -cflags CXXFLAGS CPPFLAGS conftest.cpp -o conftest.o +# +# And if linking: +# +## CC LDFLAGS -cflags CFLAGS conftest.c -o conftest -libs LIBS +## CXX LDFLAGS -cflags CXXFLAGS conftest.c -o conftest -libs LIBS +# +# Any failures are recorded in 'config.log' +# +proc cctest {args} { + set tmp conftest__ + + # Easiest way to merge in the settings + cc-with $args { + array set opts [cc-get-settings] + } + + if {[info exists opts(-sourcefile)]} { + set opts(-source) [readfile [get-define srcdir]/$opts(-sourcefile) "#error can't find $opts(-sourcefile)"] + } + if {[info exists opts(-source)]} { + set lines $opts(-source) + } else { + foreach i $opts(-includes) { + if {$opts(-code) ne "" && ![feature-checked $i]} { + # Compiling real code with an unchecked header file + # Quickly (and silently) check for it now + + # Remove all -includes from settings before checking + set saveopts [cc-update-settings -includes {}] + msg-quiet cc-check-includes $i + cc-store-settings $saveopts + } + if {$opts(-code) eq "" || [have-feature $i]} { + lappend source "#include <$i>" + } + } + lappend source {*}$opts(-declare) + lappend source "int main(void) {" + lappend source $opts(-code) + lappend source "return 0;" + lappend source "}" + + set lines [join $source \n] + } + + # Build the command line + set cmdline {} + lappend cmdline {*}[get-define CCACHE] + switch -exact -- $opts(-lang) { + c++ { + set src conftest__.cpp + lappend cmdline {*}[get-define CXX] + set cflags [get-define CXXFLAGS] + } + c { + set src conftest__.c + lappend cmdline {*}[get-define CC] + set cflags [get-define CFLAGS] + } + default { + autosetup-error "cctest called with unknown language: $opts(-lang)" + } + } + + if {$opts(-link)} { + lappend cmdline {*}[get-define LDFLAGS] + } else { + lappend cflags {*}[get-define CPPFLAGS] + set tmp conftest__.o + lappend cmdline -c + } + lappend cmdline {*}$opts(-cflags) {*}[get-define cc-default-debug ""] {*}$cflags + lappend cmdline $src -o $tmp + if {$opts(-link)} { + lappend cmdline {*}$opts(-libs) {*}[get-define LIBS] + } + + # At this point we have the complete command line and the + # complete source to be compiled. Get the result from cache if + # we can + if {[info exists ::cc_cache($cmdline,$lines)]} { + msg-checking "(cached) " + set ok $::cc_cache($cmdline,$lines) + if {$::autosetup(debug)} { + configlog "From cache (ok=$ok): [join $cmdline]" + configlog "============" + configlog $lines + configlog "============" + } + return $ok + } + + writefile $src $lines\n + + set ok 1 + set err [catch {exec-with-stderr {*}$cmdline} result errinfo] + if {$err || ($opts(-nooutput) && [string length $result])} { + configlog "Failed: [join $cmdline]" + configlog $result + configlog "============" + configlog "The failed code was:" + configlog $lines + configlog "============" + set ok 0 + } elseif {$::autosetup(debug)} { + configlog "Compiled OK: [join $cmdline]" + configlog "============" + configlog $lines + configlog "============" + } + file delete $src + file delete $tmp + + # cache it + set ::cc_cache($cmdline,$lines) $ok + + return $ok +} + +# @make-autoconf-h outfile ?auto-patterns=HAVE_*? ?bare-patterns=SIZEOF_*? +# +# Deprecated - see 'make-config-header' +proc make-autoconf-h {file {autopatterns {HAVE_*}} {barepatterns {SIZEOF_* HAVE_DECL_*}}} { + user-notice "*** make-autoconf-h is deprecated -- use make-config-header instead" + make-config-header $file -auto $autopatterns -bare $barepatterns +} + +# @make-config-header outfile ?-auto patternlist? ?-bare patternlist? ?-none patternlist? ?-str patternlist? ... +# +# Examines all defined variables which match the given patterns +# and writes an include file, '$file', which defines each of these. +# Variables which match '-auto' are output as follows: +# - defines which have the value '0' are ignored. +# - defines which have integer values are defined as the integer value. +# - any other value is defined as a string, e.g. '"value"' +# Variables which match '-bare' are defined as-is. +# Variables which match '-str' are defined as a string, e.g. '"value"' +# Variables which match '-none' are omitted. +# +# Note that order is important. The first pattern that matches is selected. +# Default behaviour is: +# +## -bare {SIZEOF_* HAVE_DECL_*} -auto HAVE_* -none * +# +# If the file would be unchanged, it is not written. +proc make-config-header {file args} { + set guard _[string toupper [regsub -all {[^a-zA-Z0-9]} [file tail $file] _]] + file mkdir [file dirname $file] + set lines {} + lappend lines "#ifndef $guard" + lappend lines "#define $guard" + + # Add some defaults + lappend args -bare {SIZEOF_* HAVE_DECL_*} -auto HAVE_* + + foreach n [lsort [dict keys [all-defines]]] { + set value [get-define $n] + set type [calc-define-output-type $n $args] + switch -exact -- $type { + -bare { + # Just output the value unchanged + } + -none { + continue + } + -str { + set value \"[string map [list \\ \\\\ \" \\\"] $value]\" + } + -auto { + # Automatically determine the type + if {$value eq "0"} { + lappend lines "/* #undef $n */" + continue + } + if {![string is integer -strict $value]} { + set value \"[string map [list \\ \\\\ \" \\\"] $value]\" + } + } + "" { + continue + } + default { + autosetup-error "Unknown type in make-config-header: $type" + } + } + lappend lines "#define $n $value" + } + lappend lines "#endif" + set buf [join $lines \n] + write-if-changed $file $buf { + msg-result "Created $file" + } +} + +proc calc-define-output-type {name spec} { + foreach {type patterns} $spec { + foreach pattern $patterns { + if {[string match $pattern $name]} { + return $type + } + } + } + return "" +} + +proc cc-init {} { + global autosetup + + # Initialise some values from the environment or commandline or default settings + foreach i {LDFLAGS LIBS CPPFLAGS LINKFLAGS CFLAGS} { + lassign $i var default + define $var [get-env $var $default] + } + + if {[env-is-set CC]} { + # Set by the user, so don't try anything else + set try [list [get-env CC ""]] + } else { + # Try some reasonable options + set try [list [get-define cross]cc [get-define cross]gcc] + } + define CC [find-an-executable {*}$try] + if {[get-define CC] eq ""} { + user-error "Could not find a C compiler. Tried: [join $try ", "]" + } + + define CPP [get-env CPP "[get-define CC] -E"] + + # XXX: Could avoid looking for a C++ compiler until requested + # If CXX isn't found, it is set to the empty string. + if {[env-is-set CXX]} { + define CXX [find-an-executable -required [get-env CXX ""]] + } else { + define CXX [find-an-executable [get-define cross]c++ [get-define cross]g++] + } + + # CXXFLAGS default to CFLAGS if not specified + define CXXFLAGS [get-env CXXFLAGS [get-define CFLAGS]] + + # May need a CC_FOR_BUILD, so look for one + define CC_FOR_BUILD [find-an-executable [get-env CC_FOR_BUILD ""] cc gcc false] + + # These start empty and never come from the user or environment + define AS_CFLAGS "" + define AS_CPPFLAGS "" + define AS_CXXFLAGS "" + + define CCACHE [find-an-executable [get-env CCACHE ccache]] + + # If any of these are set in the environment, propagate them to the AUTOREMAKE commandline + foreach i {CC CXX CCACHE CPP CFLAGS CXXFLAGS CXXFLAGS LDFLAGS LIBS CROSS CPPFLAGS LINKFLAGS CC_FOR_BUILD LD} { + if {[env-is-set $i]} { + # Note: If the variable is set on the command line, get-env will return that value + # so the command line will continue to override the environment + define-append-argv AUTOREMAKE $i=[get-env $i ""] + } + } + + # Initial cctest settings + cc-store-settings {-cflags {} -includes {} -declare {} -link 0 -lang c -libs {} -code {} -nooutput 0} + set autosetup(cc-include-deps) {} + + msg-result "C compiler...[get-define CCACHE] [get-define CC] [get-define CFLAGS] [get-define CPPFLAGS]" + if {[get-define CXX] ne "false"} { + msg-result "C++ compiler...[get-define CCACHE] [get-define CXX] [get-define CXXFLAGS] [get-define CPPFLAGS]" + } + msg-result "Build C compiler...[get-define CC_FOR_BUILD]" + + # On Darwin, we prefer to use -g0 to avoid creating .dSYM directories + # but some compilers may not support it, so test here. + switch -glob -- [get-define host] { + *-*-darwin* { + if {[cctest -cflags {-g0}]} { + define cc-default-debug -g0 + } + } + } + + if {![cc-check-includes stdlib.h]} { + user-error "Compiler does not work. See config.log" + } +} + +cc-init diff --git a/autosetup/find_tclconfig.tcl b/autosetup/find_tclconfig.tcl new file mode 100644 index 0000000000..c3d3df8ec3 --- /dev/null +++ b/autosetup/find_tclconfig.tcl @@ -0,0 +1,24 @@ +# +# Run this TCL script to find and print the pathname for the tclConfig.sh +# file. Used by ../configure +# +if {[catch { + set libdir [tcl::pkgconfig get libdir,install] +}]} { + puts stderr "tclsh too old: does not support tcl::pkgconfig" + exit 1 +} +if {![file exists $libdir]} { + puts stderr "tclsh reported library directory \"$libdir\" does not exist" + exit 1 +} +if {![file exists $libdir/tclConfig.sh]} { + set n1 $libdir/tcl$::tcl_version + if {[file exists $n1/tclConfig.sh]} { + set libdir $n1 + } else { + puts stderr "cannot find tclConfig.sh in either $libdir or $n1" + exit 1 + } +} +puts $libdir diff --git a/autosetup/jimsh0.c b/autosetup/jimsh0.c new file mode 100644 index 0000000000..1a6453d0c8 --- /dev/null +++ b/autosetup/jimsh0.c @@ -0,0 +1,24506 @@ +/* This is single source file, bootstrap version of Jim Tcl. See http://jim.tcl.tk/ */ +#define JIM_COMPAT +#define JIM_ANSIC +#define JIM_REGEXP +#define HAVE_NO_AUTOCONF +#define JIM_TINY +#define _JIMAUTOCONF_H +#define TCL_LIBRARY "." +#define jim_ext_bootstrap +#define jim_ext_aio +#define jim_ext_readdir +#define jim_ext_regexp +#define jim_ext_file +#define jim_ext_glob +#define jim_ext_exec +#define jim_ext_clock +#define jim_ext_array +#define jim_ext_stdlib +#define jim_ext_tclcompat +#if defined(_MSC_VER) +#define TCL_PLATFORM_OS "windows" +#define TCL_PLATFORM_PLATFORM "windows" +#define TCL_PLATFORM_PATH_SEPARATOR ";" +#define HAVE_MKDIR_ONE_ARG +#define HAVE_SYSTEM +#elif defined(__MINGW32__) +#define TCL_PLATFORM_OS "mingw" +#define TCL_PLATFORM_PLATFORM "windows" +#define TCL_PLATFORM_PATH_SEPARATOR ";" +#define HAVE_MKDIR_ONE_ARG +#define HAVE_SYSTEM +#define HAVE_SYS_TIME_H +#define HAVE_DIRENT_H +#define HAVE_UNISTD_H +#define HAVE_UMASK +#include +#ifndef S_IRWXG +#define S_IRWXG 0 +#endif +#ifndef S_IRWXO +#define S_IRWXO 0 +#endif +#else +#define TCL_PLATFORM_OS "unknown" +#define TCL_PLATFORM_PLATFORM "unix" +#define TCL_PLATFORM_PATH_SEPARATOR ":" +#ifdef _MINIX +#define vfork fork +#define _POSIX_SOURCE +#else +#define _GNU_SOURCE +#endif +#define HAVE_FORK +#define HAVE_WAITPID +#define HAVE_ISATTY +#define HAVE_MKSTEMP +#define HAVE_LINK +#define HAVE_SYS_TIME_H +#define HAVE_DIRENT_H +#define HAVE_UNISTD_H +#define HAVE_UMASK +#define HAVE_PIPE +#define _FILE_OFFSET_BITS 64 +#endif +#define JIM_VERSION 84 +#ifndef JIM_WIN32COMPAT_H +#define JIM_WIN32COMPAT_H + + + +#ifdef __cplusplus +extern "C" { +#endif + + +#if defined(_WIN32) || defined(WIN32) + +#define HAVE_DLOPEN +void *dlopen(const char *path, int mode); +int dlclose(void *handle); +void *dlsym(void *handle, const char *symbol); +char *dlerror(void); + + +#if defined(__MINGW32__) + #define JIM_SPRINTF_DOUBLE_NEEDS_FIX +#endif + +#ifdef _MSC_VER + + +#if _MSC_VER >= 1000 + #pragma warning(disable:4146) +#endif + +#include +#define jim_wide _int64 +#ifndef HAVE_LONG_LONG +#define HAVE_LONG_LONG +#endif +#ifndef LLONG_MAX + #define LLONG_MAX 9223372036854775807I64 +#endif +#ifndef LLONG_MIN + #define LLONG_MIN (-LLONG_MAX - 1I64) +#endif +#define JIM_WIDE_MIN LLONG_MIN +#define JIM_WIDE_MAX LLONG_MAX +#define JIM_WIDE_MODIFIER "I64d" +#define strcasecmp _stricmp +#define strtoull _strtoui64 + +#include + +#include +int gettimeofday(struct timeval *tv, void *unused); + +#define HAVE_OPENDIR +struct dirent { + char *d_name; +}; + +typedef struct DIR { + long handle; + struct _finddata_t info; + struct dirent result; + char *name; +} DIR; + +DIR *opendir(const char *name); +int closedir(DIR *dir); +struct dirent *readdir(DIR *dir); + +#endif + +#endif + +#ifdef __cplusplus +} +#endif + +#endif +#ifndef UTF8_UTIL_H +#define UTF8_UTIL_H + +#ifdef __cplusplus +extern "C" { +#endif + + + +#define MAX_UTF8_LEN 4 + +int utf8_fromunicode(char *p, unsigned uc); + +#ifndef JIM_UTF8 +#include + + +#define utf8_strlen(S, B) ((B) < 0 ? (int)strlen(S) : (B)) +#define utf8_strwidth(S, B) utf8_strlen((S), (B)) +#define utf8_tounicode(S, CP) (*(CP) = (unsigned char)*(S), 1) +#define utf8_getchars(CP, C) (*(CP) = (C), 1) +#define utf8_upper(C) toupper(C) +#define utf8_title(C) toupper(C) +#define utf8_lower(C) tolower(C) +#define utf8_index(C, I) (I) +#define utf8_charlen(C) 1 +#define utf8_prev_len(S, L) 1 +#define utf8_width(C) 1 + +#else + +#endif + +#ifdef __cplusplus +} +#endif + +#endif + +#ifndef __JIM__H +#define __JIM__H + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include +#include + + +#ifndef HAVE_NO_AUTOCONF +#endif + + + +#ifndef jim_wide +# ifdef HAVE_LONG_LONG +# define jim_wide long long +# ifndef LLONG_MAX +# define LLONG_MAX 9223372036854775807LL +# endif +# ifndef LLONG_MIN +# define LLONG_MIN (-LLONG_MAX - 1LL) +# endif +# define JIM_WIDE_MIN LLONG_MIN +# define JIM_WIDE_MAX LLONG_MAX +# else +# define jim_wide long +# define JIM_WIDE_MIN LONG_MIN +# define JIM_WIDE_MAX LONG_MAX +# endif + + +# ifdef HAVE_LONG_LONG +# define JIM_WIDE_MODIFIER "lld" +# else +# define JIM_WIDE_MODIFIER "ld" +# define strtoull strtoul +# endif +#endif + +#define UCHAR(c) ((unsigned char)(c)) + + + +#define JIM_ABI_VERSION 101 + +#define JIM_OK 0 +#define JIM_ERR 1 +#define JIM_RETURN 2 +#define JIM_BREAK 3 +#define JIM_CONTINUE 4 +#define JIM_SIGNAL 5 +#define JIM_EXIT 6 + +#define JIM_EVAL 7 + +#define JIM_MAX_CALLFRAME_DEPTH 1000 +#define JIM_MAX_EVAL_DEPTH 2000 + + +#define JIM_PRIV_FLAG_SHIFT 20 + +#define JIM_NONE 0 +#define JIM_ERRMSG 1 +#define JIM_ENUM_ABBREV 2 +#define JIM_UNSHARED 4 +#define JIM_MUSTEXIST 8 +#define JIM_NORESULT 16 + + +#define JIM_SUBST_NOVAR 1 +#define JIM_SUBST_NOCMD 2 +#define JIM_SUBST_NOESC 4 +#define JIM_SUBST_FLAG 128 + + +#define JIM_CASESENS 0 +#define JIM_NOCASE 1 +#define JIM_OPT_END 2 + + +#define JIM_PATH_LEN 1024 + + +#define JIM_NOTUSED(V) ((void) V) + +#define JIM_LIBPATH "auto_path" +#define JIM_INTERACTIVE "tcl_interactive" + + +typedef struct Jim_Stack { + int len; + int maxlen; + void **vector; +} Jim_Stack; + + +typedef struct Jim_HashEntry { + void *key; + union { + void *val; + int intval; + } u; + struct Jim_HashEntry *next; +} Jim_HashEntry; + +typedef struct Jim_HashTableType { + unsigned int (*hashFunction)(const void *key); + void *(*keyDup)(void *privdata, const void *key); + void *(*valDup)(void *privdata, const void *obj); + int (*keyCompare)(void *privdata, const void *key1, const void *key2); + void (*keyDestructor)(void *privdata, void *key); + void (*valDestructor)(void *privdata, void *obj); +} Jim_HashTableType; + +typedef struct Jim_HashTable { + Jim_HashEntry **table; + const Jim_HashTableType *type; + void *privdata; + unsigned int size; + unsigned int sizemask; + unsigned int used; + unsigned int collisions; + unsigned int uniq; +} Jim_HashTable; + +typedef struct Jim_HashTableIterator { + Jim_HashTable *ht; + Jim_HashEntry *entry, *nextEntry; + int index; +} Jim_HashTableIterator; + + +#define JIM_HT_INITIAL_SIZE 16 + + +#define Jim_FreeEntryVal(ht, entry) \ + if ((ht)->type->valDestructor) \ + (ht)->type->valDestructor((ht)->privdata, (entry)->u.val) + +#define Jim_SetHashVal(ht, entry, _val_) do { \ + if ((ht)->type->valDup) \ + (entry)->u.val = (ht)->type->valDup((ht)->privdata, (_val_)); \ + else \ + (entry)->u.val = (_val_); \ +} while(0) + +#define Jim_SetHashIntVal(ht, entry, _val_) (entry)->u.intval = (_val_) + +#define Jim_FreeEntryKey(ht, entry) \ + if ((ht)->type->keyDestructor) \ + (ht)->type->keyDestructor((ht)->privdata, (entry)->key) + +#define Jim_SetHashKey(ht, entry, _key_) do { \ + if ((ht)->type->keyDup) \ + (entry)->key = (ht)->type->keyDup((ht)->privdata, (_key_)); \ + else \ + (entry)->key = (void *)(_key_); \ +} while(0) + +#define Jim_CompareHashKeys(ht, key1, key2) \ + (((ht)->type->keyCompare) ? \ + (ht)->type->keyCompare((ht)->privdata, (key1), (key2)) : \ + (key1) == (key2)) + +#define Jim_HashKey(ht, key) ((ht)->type->hashFunction(key) + (ht)->uniq) + +#define Jim_GetHashEntryKey(he) ((he)->key) +#define Jim_GetHashEntryVal(he) ((he)->u.val) +#define Jim_GetHashEntryIntVal(he) ((he)->u.intval) +#define Jim_GetHashTableCollisions(ht) ((ht)->collisions) +#define Jim_GetHashTableSize(ht) ((ht)->size) +#define Jim_GetHashTableUsed(ht) ((ht)->used) + + +typedef struct Jim_Obj { + char *bytes; + const struct Jim_ObjType *typePtr; + int refCount; + int length; + + union { + + jim_wide wideValue; + + int intValue; + + double doubleValue; + + void *ptr; + + struct { + void *ptr1; + void *ptr2; + } twoPtrValue; + + struct { + void *ptr; + int int1; + int int2; + } ptrIntValue; + + struct { + struct Jim_VarVal *vv; + unsigned long callFrameId; + int global; + } varValue; + + struct { + struct Jim_Obj *nsObj; + struct Jim_Cmd *cmdPtr; + unsigned long procEpoch; + } cmdValue; + + struct { + struct Jim_Obj **ele; + int len; + int maxLen; + } listValue; + + struct Jim_Dict *dictValue; + + struct { + int maxLength; + int charLength; + } strValue; + + struct { + unsigned long id; + struct Jim_Reference *refPtr; + } refValue; + + struct { + struct Jim_Obj *fileNameObj; + int lineNumber; + } sourceValue; + + struct { + struct Jim_Obj *varNameObjPtr; + struct Jim_Obj *indexObjPtr; + } dictSubstValue; + struct { + int line; + int argc; + } scriptLineValue; + } internalRep; + struct Jim_Obj *prevObjPtr; + struct Jim_Obj *nextObjPtr; +} Jim_Obj; + + +#define Jim_IncrRefCount(objPtr) \ + ++(objPtr)->refCount +#define Jim_DecrRefCount(interp, objPtr) \ + if (--(objPtr)->refCount <= 0) Jim_FreeObj(interp, objPtr) +#define Jim_IsShared(objPtr) \ + ((objPtr)->refCount > 1) + +#define Jim_FreeNewObj Jim_FreeObj + + +#define Jim_FreeIntRep(i,o) \ + if ((o)->typePtr && (o)->typePtr->freeIntRepProc) \ + (o)->typePtr->freeIntRepProc(i, o) + + +#define Jim_GetIntRepPtr(o) (o)->internalRep.ptr + + +#define Jim_SetIntRepPtr(o, p) \ + (o)->internalRep.ptr = (p) + + +struct Jim_Interp; + +typedef void (Jim_FreeInternalRepProc)(struct Jim_Interp *interp, + struct Jim_Obj *objPtr); +typedef void (Jim_DupInternalRepProc)(struct Jim_Interp *interp, + struct Jim_Obj *srcPtr, Jim_Obj *dupPtr); +typedef void (Jim_UpdateStringProc)(struct Jim_Obj *objPtr); + +typedef struct Jim_ObjType { + const char *name; + Jim_FreeInternalRepProc *freeIntRepProc; + Jim_DupInternalRepProc *dupIntRepProc; + Jim_UpdateStringProc *updateStringProc; + int flags; +} Jim_ObjType; + + +#define JIM_TYPE_NONE 0 +#define JIM_TYPE_REFERENCES 1 + + + +typedef struct Jim_CallFrame { + unsigned long id; + int level; + struct Jim_HashTable vars; + struct Jim_HashTable *staticVars; + struct Jim_CallFrame *parent; + Jim_Obj *const *argv; + int argc; + Jim_Obj *procArgsObjPtr; + Jim_Obj *procBodyObjPtr; + struct Jim_CallFrame *next; + Jim_Obj *nsObj; + Jim_Obj *unused_fileNameObj; + int unused_line; + Jim_Stack *localCommands; + struct Jim_Obj *tailcallObj; + struct Jim_Cmd *tailcallCmd; +} Jim_CallFrame; + + +typedef struct Jim_EvalFrame { + Jim_CallFrame *framePtr; + int level; + int procLevel; + struct Jim_Cmd *cmd; + struct Jim_EvalFrame *parent; + Jim_Obj *const *argv; + int argc; + Jim_Obj *scriptObj; +} Jim_EvalFrame; + +typedef struct Jim_VarVal { + Jim_Obj *objPtr; + struct Jim_CallFrame *linkFramePtr; + int refCount; +} Jim_VarVal; + + +typedef int Jim_CmdProc(struct Jim_Interp *interp, int argc, + Jim_Obj *const *argv); +typedef void Jim_DelCmdProc(struct Jim_Interp *interp, void *privData); + +typedef struct Jim_Dict { + struct JimDictHashEntry { + int offset; + unsigned hash; + } *ht; + unsigned int size; + unsigned int sizemask; + unsigned int uniq; + Jim_Obj **table; + int len; + int maxLen; + unsigned int dummy; +} Jim_Dict; + +typedef struct Jim_Cmd { + int inUse; + int isproc; + struct Jim_Cmd *prevCmd; + Jim_Obj *cmdNameObj; + union { + struct { + + Jim_CmdProc *cmdProc; + Jim_DelCmdProc *delProc; + void *privData; + } native; + struct { + + Jim_Obj *argListObjPtr; + Jim_Obj *bodyObjPtr; + Jim_HashTable *staticVars; + int argListLen; + int reqArity; + int optArity; + int argsPos; + int upcall; + struct Jim_ProcArg { + Jim_Obj *nameObjPtr; + Jim_Obj *defaultObjPtr; + } *arglist; + Jim_Obj *nsObj; + } proc; + } u; +} Jim_Cmd; + + +typedef struct Jim_PrngState { + unsigned char sbox[256]; + unsigned int i, j; +} Jim_PrngState; + +typedef struct Jim_Interp { + Jim_Obj *result; + int unused_errorLine; + Jim_Obj *currentFilenameObj; + int break_level; + int maxCallFrameDepth; + int maxEvalDepth; + int evalDepth; + int returnCode; + int returnLevel; + int exitCode; + long id; + int signal_level; + jim_wide sigmask; + int (*signal_set_result)(struct Jim_Interp *interp, jim_wide sigmask); + Jim_CallFrame *framePtr; + Jim_CallFrame *topFramePtr; + struct Jim_HashTable commands; + unsigned long procEpoch; /* Incremented every time the result + of procedures names lookup caching + may no longer be valid. */ + unsigned long callFrameEpoch; /* Incremented every time a new + callframe is created. This id is used for the + 'ID' field contained in the Jim_CallFrame + structure. */ + int local; + int quitting; + int safeexpr; + Jim_Obj *liveList; + Jim_Obj *freeList; + Jim_Obj *unused_currentScriptObj; + Jim_EvalFrame topEvalFrame; + Jim_EvalFrame *evalFrame; + int procLevel; + Jim_Obj * const *unused_argv; + Jim_Obj *nullScriptObj; + Jim_Obj *emptyObj; + Jim_Obj *trueObj; + Jim_Obj *falseObj; + unsigned long referenceNextId; + struct Jim_HashTable references; + unsigned long lastCollectId; /* reference max Id of the last GC + execution. It's set to ~0 while the collection + is running as sentinel to avoid to recursive + calls via the [collect] command inside + finalizers. */ + jim_wide lastCollectTime; + Jim_Obj *stackTrace; + Jim_Obj *errorProc; + Jim_Obj *unknown; + Jim_Obj *defer; + Jim_Obj *traceCmdObj; + int unknown_called; + int errorFlag; + void *cmdPrivData; /* Used to pass the private data pointer to + a command. It is set to what the user specified + via Jim_CreateCommand(). */ + + Jim_Cmd *oldCmdCache; + int oldCmdCacheSize; + struct Jim_CallFrame *freeFramesList; + struct Jim_HashTable assocData; + Jim_PrngState *prngState; + struct Jim_HashTable packages; + Jim_Stack *loadHandles; +} Jim_Interp; + +#define Jim_SetResultString(i,s,l) Jim_SetResult(i, Jim_NewStringObj(i,s,l)) +#define Jim_SetResultInt(i,intval) Jim_SetResult(i, Jim_NewIntObj(i,intval)) + +#define Jim_SetResultBool(i,b) Jim_SetResultInt(i, b) +#define Jim_SetEmptyResult(i) Jim_SetResult(i, (i)->emptyObj) +#define Jim_GetResult(i) ((i)->result) +#define Jim_CmdPrivData(i) ((i)->cmdPrivData) + +#define Jim_SetResult(i,o) do { \ + Jim_Obj *_resultObjPtr_ = (o); \ + Jim_IncrRefCount(_resultObjPtr_); \ + Jim_DecrRefCount(i,(i)->result); \ + (i)->result = _resultObjPtr_; \ +} while(0) + + +#define Jim_GetId(i) (++(i)->id) + + +#define JIM_REFERENCE_TAGLEN 7 /* The tag is fixed-length, because the reference + string representation must be fixed length. */ +typedef struct Jim_Reference { + Jim_Obj *objPtr; + Jim_Obj *finalizerCmdNamePtr; + char tag[JIM_REFERENCE_TAGLEN+1]; +} Jim_Reference; + + +#define Jim_NewEmptyStringObj(i) Jim_NewStringObj(i, "", 0) +#define Jim_FreeHashTableIterator(iter) Jim_Free(iter) + +#define JIM_EXPORT extern + + + +JIM_EXPORT void *(*Jim_Allocator)(void *ptr, size_t size); + +#define Jim_Free(P) Jim_Allocator((P), 0) +#define Jim_Realloc(P, S) Jim_Allocator((P), (S)) +#define Jim_Alloc(S) Jim_Allocator(NULL, (S)) +JIM_EXPORT char * Jim_StrDup (const char *s); +JIM_EXPORT char *Jim_StrDupLen(const char *s, int l); + + +JIM_EXPORT char **Jim_GetEnviron(void); +JIM_EXPORT void Jim_SetEnviron(char **env); +JIM_EXPORT int Jim_MakeTempFile(Jim_Interp *interp, const char *filename_template, int unlink_file); +#ifndef CLOCK_REALTIME +# define CLOCK_REALTIME 0 +#endif +#ifndef CLOCK_MONOTONIC +# define CLOCK_MONOTONIC 1 +#endif +#ifndef CLOCK_MONOTONIC_RAW +# define CLOCK_MONOTONIC_RAW CLOCK_MONOTONIC +#endif +JIM_EXPORT jim_wide Jim_GetTimeUsec(unsigned type); + + +JIM_EXPORT int Jim_Eval(Jim_Interp *interp, const char *script); + + +JIM_EXPORT int Jim_EvalSource(Jim_Interp *interp, const char *filename, int lineno, const char *script); + +#define Jim_Eval_Named(I, S, F, L) Jim_EvalSource((I), (F), (L), (S)) + +JIM_EXPORT int Jim_EvalGlobal(Jim_Interp *interp, const char *script); +JIM_EXPORT int Jim_EvalFile(Jim_Interp *interp, const char *filename); +JIM_EXPORT int Jim_EvalFileGlobal(Jim_Interp *interp, const char *filename); +JIM_EXPORT int Jim_EvalObj (Jim_Interp *interp, Jim_Obj *scriptObjPtr); +JIM_EXPORT int Jim_EvalObjVector (Jim_Interp *interp, int objc, + Jim_Obj *const *objv); +JIM_EXPORT int Jim_EvalObjList(Jim_Interp *interp, Jim_Obj *listObj); +JIM_EXPORT int Jim_EvalObjPrefix(Jim_Interp *interp, Jim_Obj *prefix, + int objc, Jim_Obj *const *objv); +#define Jim_EvalPrefix(i, p, oc, ov) Jim_EvalObjPrefix((i), Jim_NewStringObj((i), (p), -1), (oc), (ov)) +JIM_EXPORT int Jim_EvalNamespace(Jim_Interp *interp, Jim_Obj *scriptObj, Jim_Obj *nsObj); +JIM_EXPORT int Jim_SubstObj (Jim_Interp *interp, Jim_Obj *substObjPtr, + Jim_Obj **resObjPtrPtr, int flags); + + +JIM_EXPORT Jim_Obj *Jim_GetSourceInfo(Jim_Interp *interp, Jim_Obj *objPtr, + int *lineptr); + +JIM_EXPORT void Jim_SetSourceInfo(Jim_Interp *interp, Jim_Obj *objPtr, + Jim_Obj *fileNameObj, int lineNumber); + + + +JIM_EXPORT void Jim_InitStack(Jim_Stack *stack); +JIM_EXPORT void Jim_FreeStack(Jim_Stack *stack); +JIM_EXPORT int Jim_StackLen(Jim_Stack *stack); +JIM_EXPORT void Jim_StackPush(Jim_Stack *stack, void *element); +JIM_EXPORT void * Jim_StackPop(Jim_Stack *stack); +JIM_EXPORT void * Jim_StackPeek(Jim_Stack *stack); +JIM_EXPORT void Jim_FreeStackElements(Jim_Stack *stack, void (*freeFunc)(void *ptr)); + + +JIM_EXPORT int Jim_InitHashTable (Jim_HashTable *ht, + const Jim_HashTableType *type, void *privdata); +JIM_EXPORT void Jim_ExpandHashTable (Jim_HashTable *ht, + unsigned int size); +JIM_EXPORT int Jim_AddHashEntry (Jim_HashTable *ht, const void *key, + void *val); +JIM_EXPORT int Jim_ReplaceHashEntry (Jim_HashTable *ht, + const void *key, void *val); +JIM_EXPORT int Jim_DeleteHashEntry (Jim_HashTable *ht, + const void *key); +JIM_EXPORT int Jim_FreeHashTable (Jim_HashTable *ht); +JIM_EXPORT Jim_HashEntry * Jim_FindHashEntry (Jim_HashTable *ht, + const void *key); +JIM_EXPORT Jim_HashTableIterator *Jim_GetHashTableIterator + (Jim_HashTable *ht); +JIM_EXPORT Jim_HashEntry * Jim_NextHashEntry + (Jim_HashTableIterator *iter); + + +JIM_EXPORT Jim_Obj * Jim_NewObj (Jim_Interp *interp); +JIM_EXPORT void Jim_FreeObj (Jim_Interp *interp, Jim_Obj *objPtr); +JIM_EXPORT void Jim_InvalidateStringRep (Jim_Obj *objPtr); +JIM_EXPORT Jim_Obj * Jim_DuplicateObj (Jim_Interp *interp, + Jim_Obj *objPtr); +JIM_EXPORT const char * Jim_GetString(Jim_Obj *objPtr, + int *lenPtr); +JIM_EXPORT const char *Jim_String(Jim_Obj *objPtr); +JIM_EXPORT int Jim_Length(Jim_Obj *objPtr); + + +JIM_EXPORT Jim_Obj * Jim_NewStringObj (Jim_Interp *interp, + const char *s, int len); +JIM_EXPORT Jim_Obj *Jim_NewStringObjUtf8(Jim_Interp *interp, + const char *s, int charlen); +JIM_EXPORT Jim_Obj * Jim_NewStringObjNoAlloc (Jim_Interp *interp, + char *s, int len); +JIM_EXPORT void Jim_AppendString (Jim_Interp *interp, Jim_Obj *objPtr, + const char *str, int len); +JIM_EXPORT void Jim_AppendObj (Jim_Interp *interp, Jim_Obj *objPtr, + Jim_Obj *appendObjPtr); +JIM_EXPORT void Jim_AppendStrings (Jim_Interp *interp, + Jim_Obj *objPtr, ...); +JIM_EXPORT int Jim_StringEqObj(Jim_Obj *aObjPtr, Jim_Obj *bObjPtr); +JIM_EXPORT int Jim_StringMatchObj (Jim_Interp *interp, Jim_Obj *patternObjPtr, + Jim_Obj *objPtr, int nocase); +JIM_EXPORT Jim_Obj * Jim_StringRangeObj (Jim_Interp *interp, + Jim_Obj *strObjPtr, Jim_Obj *firstObjPtr, + Jim_Obj *lastObjPtr); +JIM_EXPORT Jim_Obj * Jim_FormatString (Jim_Interp *interp, + Jim_Obj *fmtObjPtr, int objc, Jim_Obj *const *objv); +JIM_EXPORT Jim_Obj * Jim_ScanString (Jim_Interp *interp, Jim_Obj *strObjPtr, + Jim_Obj *fmtObjPtr, int flags); +JIM_EXPORT int Jim_CompareStringImmediate (Jim_Interp *interp, + Jim_Obj *objPtr, const char *str); +JIM_EXPORT int Jim_StringCompareObj(Jim_Interp *interp, Jim_Obj *firstObjPtr, + Jim_Obj *secondObjPtr, int nocase); +JIM_EXPORT int Jim_Utf8Length(Jim_Interp *interp, Jim_Obj *objPtr); + + +JIM_EXPORT Jim_Obj * Jim_NewReference (Jim_Interp *interp, + Jim_Obj *objPtr, Jim_Obj *tagPtr, Jim_Obj *cmdNamePtr); +JIM_EXPORT Jim_Reference * Jim_GetReference (Jim_Interp *interp, + Jim_Obj *objPtr); +JIM_EXPORT int Jim_SetFinalizer (Jim_Interp *interp, Jim_Obj *objPtr, Jim_Obj *cmdNamePtr); +JIM_EXPORT int Jim_GetFinalizer (Jim_Interp *interp, Jim_Obj *objPtr, Jim_Obj **cmdNamePtrPtr); + + +JIM_EXPORT Jim_Interp * Jim_CreateInterp (void); +JIM_EXPORT void Jim_FreeInterp (Jim_Interp *i); +JIM_EXPORT int Jim_GetExitCode (Jim_Interp *interp); +JIM_EXPORT const char *Jim_ReturnCode(int code); +JIM_EXPORT void Jim_SetResultFormatted(Jim_Interp *interp, const char *format, ...); + + +JIM_EXPORT void Jim_RegisterCoreCommands (Jim_Interp *interp); +JIM_EXPORT int Jim_CreateCommand (Jim_Interp *interp, + const char *cmdName, Jim_CmdProc *cmdProc, void *privData, + Jim_DelCmdProc *delProc); +JIM_EXPORT int Jim_DeleteCommand (Jim_Interp *interp, + Jim_Obj *cmdNameObj); +JIM_EXPORT int Jim_RenameCommand (Jim_Interp *interp, + Jim_Obj *oldNameObj, Jim_Obj *newNameObj); +JIM_EXPORT Jim_Cmd * Jim_GetCommand (Jim_Interp *interp, + Jim_Obj *objPtr, int flags); +JIM_EXPORT int Jim_SetVariable (Jim_Interp *interp, + Jim_Obj *nameObjPtr, Jim_Obj *valObjPtr); +JIM_EXPORT int Jim_SetVariableStr (Jim_Interp *interp, + const char *name, Jim_Obj *objPtr); +JIM_EXPORT int Jim_SetGlobalVariableStr (Jim_Interp *interp, + const char *name, Jim_Obj *objPtr); +JIM_EXPORT int Jim_SetVariableStrWithStr (Jim_Interp *interp, + const char *name, const char *val); +JIM_EXPORT int Jim_SetVariableLink (Jim_Interp *interp, + Jim_Obj *nameObjPtr, Jim_Obj *targetNameObjPtr, + Jim_CallFrame *targetCallFrame); +JIM_EXPORT Jim_Obj * Jim_MakeGlobalNamespaceName(Jim_Interp *interp, + Jim_Obj *nameObjPtr); +JIM_EXPORT Jim_Obj * Jim_GetVariable (Jim_Interp *interp, + Jim_Obj *nameObjPtr, int flags); +JIM_EXPORT Jim_Obj * Jim_GetGlobalVariable (Jim_Interp *interp, + Jim_Obj *nameObjPtr, int flags); +JIM_EXPORT Jim_Obj * Jim_GetVariableStr (Jim_Interp *interp, + const char *name, int flags); +JIM_EXPORT Jim_Obj * Jim_GetGlobalVariableStr (Jim_Interp *interp, + const char *name, int flags); +JIM_EXPORT int Jim_UnsetVariable (Jim_Interp *interp, + Jim_Obj *nameObjPtr, int flags); + + +JIM_EXPORT Jim_CallFrame *Jim_GetCallFrameByLevel(Jim_Interp *interp, + Jim_Obj *levelObjPtr); + + +JIM_EXPORT int Jim_Collect (Jim_Interp *interp); +JIM_EXPORT void Jim_CollectIfNeeded (Jim_Interp *interp); + + +JIM_EXPORT int Jim_GetIndex (Jim_Interp *interp, Jim_Obj *objPtr, + int *indexPtr); + + +JIM_EXPORT Jim_Obj * Jim_NewListObj (Jim_Interp *interp, + Jim_Obj *const *elements, int len); +JIM_EXPORT void Jim_ListInsertElements (Jim_Interp *interp, + Jim_Obj *listPtr, int listindex, int objc, Jim_Obj *const *objVec); +JIM_EXPORT void Jim_ListAppendElement (Jim_Interp *interp, + Jim_Obj *listPtr, Jim_Obj *objPtr); +JIM_EXPORT void Jim_ListAppendList (Jim_Interp *interp, + Jim_Obj *listPtr, Jim_Obj *appendListPtr); +JIM_EXPORT int Jim_ListLength (Jim_Interp *interp, Jim_Obj *objPtr); +JIM_EXPORT int Jim_ListIndex (Jim_Interp *interp, Jim_Obj *listPrt, + int listindex, Jim_Obj **objPtrPtr, int seterr); +JIM_EXPORT Jim_Obj *Jim_ListGetIndex(Jim_Interp *interp, Jim_Obj *listPtr, int idx); +JIM_EXPORT int Jim_SetListIndex (Jim_Interp *interp, + Jim_Obj *varNamePtr, Jim_Obj *const *indexv, int indexc, + Jim_Obj *newObjPtr); +JIM_EXPORT Jim_Obj * Jim_ConcatObj (Jim_Interp *interp, int objc, + Jim_Obj *const *objv); +JIM_EXPORT Jim_Obj *Jim_ListJoin(Jim_Interp *interp, + Jim_Obj *listObjPtr, const char *joinStr, int joinStrLen); + + +JIM_EXPORT Jim_Obj * Jim_NewDictObj (Jim_Interp *interp, + Jim_Obj *const *elements, int len); +JIM_EXPORT int Jim_DictKey (Jim_Interp *interp, Jim_Obj *dictPtr, + Jim_Obj *keyPtr, Jim_Obj **objPtrPtr, int flags); +JIM_EXPORT int Jim_DictKeysVector (Jim_Interp *interp, + Jim_Obj *dictPtr, Jim_Obj *const *keyv, int keyc, + Jim_Obj **objPtrPtr, int flags); +JIM_EXPORT int Jim_SetDictKeysVector (Jim_Interp *interp, + Jim_Obj *varNamePtr, Jim_Obj *const *keyv, int keyc, + Jim_Obj *newObjPtr, int flags); +JIM_EXPORT Jim_Obj **Jim_DictPairs(Jim_Interp *interp, + Jim_Obj *dictPtr, int *len); +JIM_EXPORT int Jim_DictAddElement(Jim_Interp *interp, Jim_Obj *objPtr, + Jim_Obj *keyObjPtr, Jim_Obj *valueObjPtr); + +#define JIM_DICTMATCH_KEYS 0x0001 +#define JIM_DICTMATCH_VALUES 0x002 + +JIM_EXPORT int Jim_DictMatchTypes(Jim_Interp *interp, Jim_Obj *objPtr, Jim_Obj *patternObj, int match_type, int return_types); +JIM_EXPORT int Jim_DictSize(Jim_Interp *interp, Jim_Obj *objPtr); +JIM_EXPORT int Jim_DictInfo(Jim_Interp *interp, Jim_Obj *objPtr); +JIM_EXPORT Jim_Obj *Jim_DictMerge(Jim_Interp *interp, int objc, Jim_Obj *const *objv); + + +JIM_EXPORT int Jim_GetReturnCode (Jim_Interp *interp, Jim_Obj *objPtr, + int *intPtr); + + +JIM_EXPORT int Jim_EvalExpression (Jim_Interp *interp, + Jim_Obj *exprObjPtr); +JIM_EXPORT int Jim_GetBoolFromExpr (Jim_Interp *interp, + Jim_Obj *exprObjPtr, int *boolPtr); + + +JIM_EXPORT int Jim_GetBoolean(Jim_Interp *interp, Jim_Obj *objPtr, + int *booleanPtr); + + +JIM_EXPORT int Jim_GetWide (Jim_Interp *interp, Jim_Obj *objPtr, + jim_wide *widePtr); +JIM_EXPORT int Jim_GetWideExpr(Jim_Interp *interp, Jim_Obj *objPtr, + jim_wide *widePtr); +JIM_EXPORT int Jim_GetLong (Jim_Interp *interp, Jim_Obj *objPtr, + long *longPtr); +#define Jim_NewWideObj Jim_NewIntObj +JIM_EXPORT Jim_Obj * Jim_NewIntObj (Jim_Interp *interp, + jim_wide wideValue); + + +JIM_EXPORT int Jim_GetDouble(Jim_Interp *interp, Jim_Obj *objPtr, + double *doublePtr); +JIM_EXPORT void Jim_SetDouble(Jim_Interp *interp, Jim_Obj *objPtr, + double doubleValue); +JIM_EXPORT Jim_Obj * Jim_NewDoubleObj(Jim_Interp *interp, double doubleValue); + + +JIM_EXPORT void Jim_WrongNumArgs (Jim_Interp *interp, int argc, + Jim_Obj *const *argv, const char *msg); +JIM_EXPORT int Jim_GetEnum (Jim_Interp *interp, Jim_Obj *objPtr, + const char * const *tablePtr, int *indexPtr, const char *name, int flags); +JIM_EXPORT int Jim_CheckShowCommands(Jim_Interp *interp, Jim_Obj *objPtr, + const char *const *tablePtr); +JIM_EXPORT int Jim_ScriptIsComplete(Jim_Interp *interp, + Jim_Obj *scriptObj, char *stateCharPtr); + +JIM_EXPORT int Jim_FindByName(const char *name, const char * const array[], size_t len); + + +typedef void (Jim_InterpDeleteProc)(Jim_Interp *interp, void *data); +JIM_EXPORT void * Jim_GetAssocData(Jim_Interp *interp, const char *key); +JIM_EXPORT int Jim_SetAssocData(Jim_Interp *interp, const char *key, + Jim_InterpDeleteProc *delProc, void *data); +JIM_EXPORT int Jim_DeleteAssocData(Jim_Interp *interp, const char *key); +JIM_EXPORT int Jim_CheckAbiVersion(Jim_Interp *interp, int abi_version); + + + + +JIM_EXPORT int Jim_PackageProvide (Jim_Interp *interp, + const char *name, const char *ver, int flags); +JIM_EXPORT int Jim_PackageRequire (Jim_Interp *interp, + const char *name, int flags); +#define Jim_PackageProvideCheck(INTERP, NAME) \ + if (Jim_CheckAbiVersion(INTERP, JIM_ABI_VERSION) == JIM_ERR || Jim_PackageProvide(INTERP, NAME, "1.0", JIM_ERRMSG)) \ + return JIM_ERR + + +JIM_EXPORT void Jim_MakeErrorMessage (Jim_Interp *interp); + + +JIM_EXPORT int Jim_InteractivePrompt (Jim_Interp *interp); +JIM_EXPORT void Jim_HistoryLoad(const char *filename); +JIM_EXPORT void Jim_HistorySave(const char *filename); +JIM_EXPORT char *Jim_HistoryGetline(Jim_Interp *interp, const char *prompt); +JIM_EXPORT void Jim_HistorySetCompletion(Jim_Interp *interp, Jim_Obj *completionCommandObj); +JIM_EXPORT void Jim_HistorySetHints(Jim_Interp *interp, Jim_Obj *hintsCommandObj); +JIM_EXPORT void Jim_HistoryAdd(const char *line); +JIM_EXPORT void Jim_HistoryShow(void); +JIM_EXPORT void Jim_HistorySetMaxLen(int length); +JIM_EXPORT int Jim_HistoryGetMaxLen(void); + + +JIM_EXPORT int Jim_InitStaticExtensions(Jim_Interp *interp); +JIM_EXPORT int Jim_StringToWide(const char *str, jim_wide *widePtr, int base); +JIM_EXPORT int Jim_IsBigEndian(void); + +#define Jim_CheckSignal(i) ((i)->signal_level && (i)->sigmask) +JIM_EXPORT void Jim_SignalSetIgnored(jim_wide mask); + + +JIM_EXPORT int Jim_LoadLibrary(Jim_Interp *interp, const char *pathName); +JIM_EXPORT void Jim_FreeLoadHandles(Jim_Interp *interp); + + +JIM_EXPORT int Jim_AioFilehandle(Jim_Interp *interp, Jim_Obj *command); + + +JIM_EXPORT int Jim_IsDict(Jim_Obj *objPtr); +JIM_EXPORT int Jim_IsList(Jim_Obj *objPtr); + +#ifdef __cplusplus +} +#endif + +#endif + +#ifndef JIM_SUBCMD_H +#define JIM_SUBCMD_H + + +#ifdef __cplusplus +extern "C" { +#endif + + +#define JIM_MODFLAG_HIDDEN 0x0001 +#define JIM_MODFLAG_FULLARGV 0x0002 + + + +typedef int jim_subcmd_function(Jim_Interp *interp, int argc, Jim_Obj *const *argv); + +typedef struct { + const char *cmd; + const char *args; + jim_subcmd_function *function; + short minargs; + short maxargs; + unsigned short flags; +} jim_subcmd_type; + +#define JIM_DEF_SUBCMD(name, args, minargs, maxargs) { name, args, NULL, minargs, maxargs } +#define JIM_DEF_SUBCMD_HIDDEN(name, args, minargs, maxargs) { name, args, NULL, minargs, maxargs, JIM_MODFLAG_HIDDEN } + +const jim_subcmd_type * +Jim_ParseSubCmd(Jim_Interp *interp, const jim_subcmd_type *command_table, int argc, Jim_Obj *const *argv); + +int Jim_SubCmdProc(Jim_Interp *interp, int argc, Jim_Obj *const *argv); + +int Jim_CallSubCmd(Jim_Interp *interp, const jim_subcmd_type *ct, int argc, Jim_Obj *const *argv); + +void Jim_SubCmdArgError(Jim_Interp *interp, const jim_subcmd_type *ct, Jim_Obj *subcmd); + +#ifdef __cplusplus +} +#endif + +#endif +#ifndef JIMREGEXP_H +#define JIMREGEXP_H + + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +typedef struct { + int rm_so; + int rm_eo; +} regmatch_t; + + +typedef struct regexp { + + int re_nsub; + + + int cflags; + int err; + int regstart; + int reganch; + int regmust; + int regmlen; + int *program; + + + const char *regparse; + int p; + int proglen; + + + int eflags; + const char *start; + const char *reginput; + const char *regbol; + + + regmatch_t *pmatch; + int nmatch; +} regexp; + +typedef regexp regex_t; + +#define REG_EXTENDED 0 +#define REG_NEWLINE 1 +#define REG_ICASE 2 + +#define REG_NOTBOL 16 + +enum { + REG_NOERROR, + REG_NOMATCH, + REG_BADPAT, + REG_ERR_NULL_ARGUMENT, + REG_ERR_UNKNOWN, + REG_ERR_TOO_BIG, + REG_ERR_NOMEM, + REG_ERR_TOO_MANY_PAREN, + REG_ERR_UNMATCHED_PAREN, + REG_ERR_UNMATCHED_BRACES, + REG_ERR_BAD_COUNT, + REG_ERR_JUNK_ON_END, + REG_ERR_OPERAND_COULD_BE_EMPTY, + REG_ERR_NESTED_COUNT, + REG_ERR_INTERNAL, + REG_ERR_COUNT_FOLLOWS_NOTHING, + REG_ERR_INVALID_ESCAPE, + REG_ERR_CORRUPTED, + REG_ERR_NULL_CHAR, + REG_ERR_UNMATCHED_BRACKET, + REG_ERR_NUM +}; + +int jim_regcomp(regex_t *preg, const char *regex, int cflags); +int jim_regexec(regex_t *preg, const char *string, size_t nmatch, regmatch_t pmatch[], int eflags); +size_t jim_regerror(int errcode, const regex_t *preg, char *errbuf, size_t errbuf_size); +void jim_regfree(regex_t *preg); + +#ifdef __cplusplus +} +#endif + +#endif +#ifndef JIM_SIGNAL_H +#define JIM_SIGNAL_H + +#ifdef __cplusplus +extern "C" { +#endif + +const char *Jim_SignalId(int sig); + +#ifdef __cplusplus +} +#endif + +#endif +#ifndef JIMIOCOMPAT_H +#define JIMIOCOMPAT_H + + +#include +#include +#include + + +void Jim_SetResultErrno(Jim_Interp *interp, const char *msg); + +int Jim_OpenForWrite(const char *filename, int append); + +int Jim_OpenForRead(const char *filename); + +#if defined(__MINGW32__) || defined(_WIN32) + #ifndef STRICT + #define STRICT + #endif + #define WIN32_LEAN_AND_MEAN + #include + #include + #include + #include + + typedef HANDLE phandle_t; + #define JIM_BAD_PHANDLE INVALID_HANDLE_VALUE + + + #define WIFEXITED(STATUS) (((STATUS) & 0xff00) == 0) + #define WEXITSTATUS(STATUS) ((STATUS) & 0x00ff) + #define WIFSIGNALED(STATUS) (((STATUS) & 0xff00) != 0) + #define WTERMSIG(STATUS) (((STATUS) >> 8) & 0xff) + #define WNOHANG 1 + + int Jim_Errno(void); + + long waitpid(phandle_t phandle, int *status, int nohang); + + phandle_t JimWaitPid(long processid, int *status, int nohang); + + long JimProcessPid(phandle_t phandle); + + #define HAVE_PIPE + #define pipe(P) _pipe((P), 0, O_NOINHERIT) + + typedef struct __stat64 jim_stat_t; + #define Jim_Stat _stat64 + #define Jim_FileStat _fstat64 + #define Jim_Lseek _lseeki64 + #define O_TEXT _O_TEXT + #define O_BINARY _O_BINARY + #define Jim_SetMode _setmode + #ifndef STDIN_FILENO + #define STDIN_FILENO 0 + #endif + +#else + #if defined(HAVE_STAT64) + typedef struct stat64 jim_stat_t; + #define Jim_Stat stat64 + #if defined(HAVE_FSTAT64) + #define Jim_FileStat fstat64 + #endif + #if defined(HAVE_LSTAT64) + #define Jim_LinkStat lstat64 + #endif + #else + typedef struct stat jim_stat_t; + #define Jim_Stat stat + #if defined(HAVE_FSTAT) + #define Jim_FileStat fstat + #endif + #if defined(HAVE_LSTAT) + #define Jim_LinkStat lstat + #endif + #endif + #if defined(HAVE_LSEEK64) + #define Jim_Lseek lseek64 + #else + #define Jim_Lseek lseek + #endif + + #if defined(HAVE_UNISTD_H) + #include + #include + #include + + typedef int phandle_t; + #define Jim_Errno() errno + #define JIM_BAD_PHANDLE -1 + #define JimProcessPid(PIDTYPE) (PIDTYPE) + #define JimWaitPid waitpid + + #ifndef HAVE_EXECVPE + #define execvpe(ARG0, ARGV, ENV) execvp(ARG0, ARGV) + #endif + #endif + + #ifndef O_TEXT + #define O_TEXT 0 + #endif + +#endif + +# ifndef MAXPATHLEN +# ifdef PATH_MAX +# define MAXPATHLEN PATH_MAX +# else +# define MAXPATHLEN JIM_PATH_LEN +# endif +# endif + + +int Jim_FileStoreStatData(Jim_Interp *interp, Jim_Obj *varName, const jim_stat_t *sb); + +#endif +int Jim_bootstrapInit(Jim_Interp *interp) +{ + if (Jim_PackageProvide(interp, "bootstrap", "1.0", JIM_ERRMSG)) + return JIM_ERR; + + return Jim_EvalSource(interp, "bootstrap.tcl", 1, +"\n" +"proc package {cmd args} {\n" +" if {$cmd eq \"require\"} {\n" +" foreach path $::auto_path {\n" +" lassign $args pkg\n" +" set pkgpath $path/$pkg.tcl\n" +" if {$path eq \".\"} {\n" +" set pkgpath $pkg.tcl\n" +" }\n" +" if {[file exists $pkgpath]} {\n" +" tailcall uplevel #0 [list source $pkgpath]\n" +" }\n" +" }\n" +" }\n" +"}\n" +"set tcl_platform(bootstrap) 1\n" +); +} +int Jim_initjimshInit(Jim_Interp *interp) +{ + if (Jim_PackageProvide(interp, "initjimsh", "1.0", JIM_ERRMSG)) + return JIM_ERR; + + return Jim_EvalSource(interp, "initjimsh.tcl", 1, +"\n" +"\n" +"\n" +"proc _jimsh_init {} {\n" +" rename _jimsh_init {}\n" +" global jim::exe jim::argv0 tcl_interactive auto_path tcl_platform\n" +"\n" +"\n" +" if {[exists jim::argv0]} {\n" +" if {[string match \"*/*\" $jim::argv0]} {\n" +" set jim::exe [file join [pwd] $jim::argv0]\n" +" } else {\n" +" set jim::argv0 [file tail $jim::argv0]\n" +" set path [split [env PATH \"\"] $tcl_platform(pathSeparator)]\n" +" if {$tcl_platform(platform) eq \"windows\"} {\n" +"\n" +" set path [lmap p [list \"\" {*}$path] { string map {\\\\ /} $p }]\n" +" }\n" +" foreach p $path {\n" +" set exec [file join [pwd] $p $jim::argv0]\n" +" if {[file executable $exec]} {\n" +" set jim::exe $exec\n" +" break\n" +" }\n" +" }\n" +" }\n" +" }\n" +"\n" +"\n" +" lappend p {*}[split [env JIMLIB {}] $tcl_platform(pathSeparator)]\n" +" if {[exists jim::exe]} {\n" +" lappend p [file dirname $jim::exe]\n" +" }\n" +" lappend p {*}$auto_path\n" +" set auto_path $p\n" +"\n" +" if {$tcl_interactive && [env HOME {}] ne \"\"} {\n" +" foreach src {.jimrc jimrc.tcl} {\n" +" if {[file exists [env HOME]/$src]} {\n" +" uplevel #0 source [env HOME]/$src\n" +" break\n" +" }\n" +" }\n" +" }\n" +" return \"\"\n" +"}\n" +"\n" +"if {$tcl_platform(platform) eq \"windows\"} {\n" +" set jim::argv0 [string map {\\\\ /} $jim::argv0]\n" +"}\n" +"\n" +"\n" +"set tcl::autocomplete_commands {array clock debug dict file history info namespace package signal socket string tcl::prefix zlib}\n" +"\n" +"\n" +"\n" +"proc tcl::autocomplete {prefix} {\n" +" if {[set space [string first \" \" $prefix]] != -1} {\n" +" set cmd [string range $prefix 0 $space-1]\n" +" if {$cmd in $::tcl::autocomplete_commands || [info channel $cmd] ne \"\"} {\n" +" set arg [string range $prefix $space+1 end]\n" +"\n" +" return [lmap p [$cmd -commands] {\n" +" if {![string match \"${arg}*\" $p]} continue\n" +" function \"$cmd $p\"\n" +" }]\n" +" }\n" +" }\n" +"\n" +" if {[string match \"source *\" $prefix]} {\n" +" set path [string range $prefix 7 end]\n" +" return [lmap p [glob -nocomplain \"${path}*\"] {\n" +" function \"source $p\"\n" +" }]\n" +" }\n" +"\n" +" return [lmap p [lsort [info commands $prefix*]] {\n" +" if {[string match \"* *\" $p]} {\n" +" continue\n" +" }\n" +" function $p\n" +" }]\n" +"}\n" +"\n" +"\n" +"set tcl::stdhint_commands {array clock debug dict file history info namespace package signal string zlib}\n" +"\n" +"set tcl::stdhint_cols {\n" +" none {0}\n" +" black {30}\n" +" red {31}\n" +" green {32}\n" +" yellow {33}\n" +" blue {34}\n" +" purple {35}\n" +" cyan {36}\n" +" normal {37}\n" +" grey {30 1}\n" +" gray {30 1}\n" +" lred {31 1}\n" +" lgreen {32 1}\n" +" lyellow {33 1}\n" +" lblue {34 1}\n" +" lpurple {35 1}\n" +" lcyan {36 1}\n" +" white {37 1}\n" +"}\n" +"\n" +"\n" +"set tcl::stdhint_col $tcl::stdhint_cols(lcyan)\n" +"\n" +"\n" +"proc tcl::stdhint {string} {\n" +" set result \"\"\n" +" if {[llength $string] >= 2} {\n" +" lassign $string cmd arg\n" +" if {$cmd in $::tcl::stdhint_commands || [info channel $cmd] ne \"\"} {\n" +" catch {\n" +" set help [$cmd -help $arg]\n" +" if {[string match \"Usage: $cmd *\" $help]} {\n" +" set n [llength $string]\n" +" set subcmd [lindex $help $n]\n" +" incr n\n" +" set hint [join [lrange $help $n end]]\n" +" set prefix \"\"\n" +" if {![string match \"* \" $string]} {\n" +" if {$n == 3 && $subcmd ne $arg} {\n" +"\n" +" set prefix \"[string range $subcmd [string length $arg] end] \"\n" +" } else {\n" +" set prefix \" \"\n" +" }\n" +" }\n" +" set result [list $prefix$hint {*}$::tcl::stdhint_col]\n" +" }\n" +" }\n" +" }\n" +" }\n" +" return $result\n" +"}\n" +"\n" +"_jimsh_init\n" +); +} +int Jim_globInit(Jim_Interp *interp) +{ + if (Jim_PackageProvide(interp, "glob", "1.0", JIM_ERRMSG)) + return JIM_ERR; + + return Jim_EvalSource(interp, "glob.tcl", 1, +"\n" +"\n" +"\n" +"\n" +"\n" +"\n" +"\n" +"package require readdir\n" +"\n" +"\n" +"proc glob.globdir {dir pattern} {\n" +" if {[file exists $dir/$pattern]} {\n" +"\n" +" return [list $pattern]\n" +" }\n" +"\n" +" set result {}\n" +" set files [readdir $dir]\n" +" lappend files . ..\n" +"\n" +" foreach name $files {\n" +" if {[string match $pattern $name]} {\n" +"\n" +" if {[string index $name 0] eq \".\" && [string index $pattern 0] ne \".\"} {\n" +" continue\n" +" }\n" +" lappend result $name\n" +" }\n" +" }\n" +"\n" +" return $result\n" +"}\n" +"\n" +"\n" +"\n" +"\n" +"proc glob.explode {pattern} {\n" +" set oldexp {}\n" +" set newexp {\"\"}\n" +"\n" +" while 1 {\n" +" set oldexp $newexp\n" +" set newexp {}\n" +" set ob [string first \\{ $pattern]\n" +" set cb [string first \\} $pattern]\n" +"\n" +" if {$ob < $cb && $ob != -1} {\n" +" set mid [string range $pattern 0 $ob-1]\n" +" set subexp [lassign [glob.explode [string range $pattern $ob+1 end]] pattern]\n" +" if {$pattern eq \"\"} {\n" +" error \"unmatched open brace in glob pattern\"\n" +" }\n" +" set pattern [string range $pattern 1 end]\n" +"\n" +" foreach subs $subexp {\n" +" foreach sub [split $subs ,] {\n" +" foreach old $oldexp {\n" +" lappend newexp $old$mid$sub\n" +" }\n" +" }\n" +" }\n" +" } elseif {$cb != -1} {\n" +" set suf [string range $pattern 0 $cb-1]\n" +" set rest [string range $pattern $cb end]\n" +" break\n" +" } else {\n" +" set suf $pattern\n" +" set rest \"\"\n" +" break\n" +" }\n" +" }\n" +"\n" +" foreach old $oldexp {\n" +" lappend newexp $old$suf\n" +" }\n" +" list $rest {*}$newexp\n" +"}\n" +"\n" +"\n" +"\n" +"proc glob.glob {base pattern} {\n" +" set dir [file dirname $pattern]\n" +" if {$pattern eq $dir || $pattern eq \"\"} {\n" +" return [list [file join $base $dir] $pattern]\n" +" } elseif {$pattern eq [file tail $pattern]} {\n" +" set dir \"\"\n" +" }\n" +"\n" +"\n" +" set dirlist [glob.glob $base $dir]\n" +" set pattern [file tail $pattern]\n" +"\n" +"\n" +" set result {}\n" +" foreach {realdir dir} $dirlist {\n" +" if {![file isdir $realdir]} {\n" +" continue\n" +" }\n" +" if {[string index $dir end] ne \"/\" && $dir ne \"\"} {\n" +" append dir /\n" +" }\n" +" foreach name [glob.globdir $realdir $pattern] {\n" +" lappend result [file join $realdir $name] $dir$name\n" +" }\n" +" }\n" +" return $result\n" +"}\n" +"\n" +"\n" +"\n" +"\n" +"\n" +"\n" +"\n" +"\n" +"\n" +"\n" +"\n" +"\n" +"proc glob {args} {\n" +" set nocomplain 0\n" +" set base \"\"\n" +" set tails 0\n" +"\n" +" set n 0\n" +" foreach arg $args {\n" +" if {[info exists param]} {\n" +" set $param $arg\n" +" unset param\n" +" incr n\n" +" continue\n" +" }\n" +" switch -glob -- $arg {\n" +" -d* {\n" +" set switch $arg\n" +" set param base\n" +" }\n" +" -n* {\n" +" set nocomplain 1\n" +" }\n" +" -ta* {\n" +" set tails 1\n" +" }\n" +" -- {\n" +" incr n\n" +" break\n" +" }\n" +" -* {\n" +" return -code error \"bad option \\\"$arg\\\": must be -directory, -nocomplain, -tails, or --\"\n" +" }\n" +" * {\n" +" break\n" +" }\n" +" }\n" +" incr n\n" +" }\n" +" if {[info exists param]} {\n" +" return -code error \"missing argument to \\\"$switch\\\"\"\n" +" }\n" +" if {[llength $args] <= $n} {\n" +" return -code error \"wrong # args: should be \\\"glob ?options? pattern ?pattern ...?\\\"\"\n" +" }\n" +"\n" +" set args [lrange $args $n end]\n" +"\n" +" set result {}\n" +" foreach pattern $args {\n" +" set escpattern [string map {\n" +" \\\\\\\\ \\x01 \\\\\\{ \\x02 \\\\\\} \\x03 \\\\, \\x04\n" +" } $pattern]\n" +" set patexps [lassign [glob.explode $escpattern] rest]\n" +" if {$rest ne \"\"} {\n" +" return -code error \"unmatched close brace in glob pattern\"\n" +" }\n" +" foreach patexp $patexps {\n" +" set patexp [string map {\n" +" \\x01 \\\\\\\\ \\x02 \\{ \\x03 \\} \\x04 ,\n" +" } $patexp]\n" +" foreach {realname name} [glob.glob $base $patexp] {\n" +" incr n\n" +" if {$tails} {\n" +" lappend result $name\n" +" } else {\n" +" lappend result [file join $base $name]\n" +" }\n" +" }\n" +" }\n" +" }\n" +"\n" +" if {!$nocomplain && [llength $result] == 0} {\n" +" set s $(([llength $args] > 1) ? \"s\" : \"\")\n" +" return -code error \"no files matched glob pattern$s \\\"[join $args]\\\"\"\n" +" }\n" +"\n" +" return $result\n" +"}\n" +); +} +int Jim_stdlibInit(Jim_Interp *interp) +{ + if (Jim_PackageProvide(interp, "stdlib", "1.0", JIM_ERRMSG)) + return JIM_ERR; + + return Jim_EvalSource(interp, "stdlib.tcl", 1, +"\n" +"\n" +"if {![exists -command ref]} {\n" +"\n" +" proc ref {args} {{count 0}} {\n" +" format %08x [incr count]\n" +" }\n" +"}\n" +"\n" +"\n" +"proc lambda {arglist args} {\n" +" tailcall proc [ref {} function lambda.finalizer] $arglist {*}$args\n" +"}\n" +"\n" +"proc lambda.finalizer {name val} {\n" +" rename $name {}\n" +"}\n" +"\n" +"\n" +"proc curry {args} {\n" +" alias [ref {} function lambda.finalizer] {*}$args\n" +"}\n" +"\n" +"\n" +"\n" +"\n" +"\n" +"\n" +"\n" +"\n" +"\n" +"proc function {value} {\n" +" return $value\n" +"}\n" +"\n" +"\n" +"proc stackdump {stacktrace} {\n" +" set lines {}\n" +" lappend lines \"Traceback (most recent call last):\"\n" +" foreach {cmd l f p} [lreverse $stacktrace] {\n" +" set line {}\n" +" if {$f ne \"\"} {\n" +" append line \" File \\\"$f\\\", line $l\"\n" +" }\n" +" if {$p ne \"\"} {\n" +" append line \", in $p\"\n" +" }\n" +" if {$line ne \"\"} {\n" +" lappend lines $line\n" +" if {$cmd ne \"\"} {\n" +" set nl [string first \\n $cmd 1]\n" +" if {$nl >= 0} {\n" +" set cmd [string range $cmd 0 $nl-1]...\n" +" }\n" +" lappend lines \" $cmd\"\n" +" }\n" +" }\n" +" }\n" +" if {[llength $lines] > 1} {\n" +" return [join $lines \\n]\n" +" }\n" +"}\n" +"\n" +"\n" +"\n" +"proc defer {script} {\n" +" upvar jim::defer v\n" +" lappend v $script\n" +"}\n" +"\n" +"\n" +"\n" +"proc errorInfo {msg {stacktrace \"\"}} {\n" +" if {$stacktrace eq \"\"} {\n" +"\n" +" set stacktrace [info stacktrace]\n" +" }\n" +" lassign $stacktrace p f l cmd\n" +" if {$f ne \"\"} {\n" +" set result \"$f:$l: Error: \"\n" +" }\n" +" append result \"$msg\\n\"\n" +" append result [stackdump $stacktrace]\n" +"\n" +"\n" +" string trim $result\n" +"}\n" +"\n" +"\n" +"\n" +"proc {info nameofexecutable} {} {\n" +" if {[exists ::jim::exe]} {\n" +" return $::jim::exe\n" +" }\n" +"}\n" +"\n" +"\n" +"proc {dict update} {&varName args script} {\n" +" set keys {}\n" +" foreach {n v} $args {\n" +" upvar $v var_$v\n" +" if {[dict exists $varName $n]} {\n" +" set var_$v [dict get $varName $n]\n" +" }\n" +" }\n" +" catch {uplevel 1 $script} msg opts\n" +" if {[info exists varName]} {\n" +" foreach {n v} $args {\n" +" if {[info exists var_$v]} {\n" +" dict set varName $n [set var_$v]\n" +" } else {\n" +" dict unset varName $n\n" +" }\n" +" }\n" +" }\n" +" return {*}$opts $msg\n" +"}\n" +"\n" +"proc {dict replace} {dictionary {args {key value}}} {\n" +" if {[llength ${key value}] % 2} {\n" +" tailcall {dict replace}\n" +" }\n" +" tailcall dict merge $dictionary ${key value}\n" +"}\n" +"\n" +"\n" +"proc {dict lappend} {varName key {args value}} {\n" +" upvar $varName dict\n" +" if {[exists dict] && [dict exists $dict $key]} {\n" +" set list [dict get $dict $key]\n" +" }\n" +" lappend list {*}$value\n" +" dict set dict $key $list\n" +"}\n" +"\n" +"\n" +"proc {dict append} {varName key {args value}} {\n" +" upvar $varName dict\n" +" if {[exists dict] && [dict exists $dict $key]} {\n" +" set str [dict get $dict $key]\n" +" }\n" +" append str {*}$value\n" +" dict set dict $key $str\n" +"}\n" +"\n" +"\n" +"proc {dict incr} {varName key {increment 1}} {\n" +" upvar $varName dict\n" +" if {[exists dict] && [dict exists $dict $key]} {\n" +" set value [dict get $dict $key]\n" +" }\n" +" incr value $increment\n" +" dict set dict $key $value\n" +"}\n" +"\n" +"\n" +"proc {dict remove} {dictionary {args key}} {\n" +" foreach k $key {\n" +" dict unset dictionary $k\n" +" }\n" +" return $dictionary\n" +"}\n" +"\n" +"\n" +"proc {dict for} {vars dictionary script} {\n" +" if {[llength $vars] != 2} {\n" +" return -code error \"must have exactly two variable names\"\n" +" }\n" +" dict size $dictionary\n" +" tailcall foreach $vars $dictionary $script\n" +"}\n" +); +} +int Jim_tclcompatInit(Jim_Interp *interp) +{ + if (Jim_PackageProvide(interp, "tclcompat", "1.0", JIM_ERRMSG)) + return JIM_ERR; + + return Jim_EvalSource(interp, "tclcompat.tcl", 1, +"\n" +"\n" +"\n" +"\n" +"\n" +"\n" +"\n" +"\n" +"set env [env]\n" +"\n" +"\n" +"if {[exists -command stdout]} {\n" +"\n" +" foreach p {gets flush close eof seek tell} {\n" +" proc $p {chan args} {p} {\n" +" tailcall $chan $p {*}$args\n" +" }\n" +" }\n" +" unset p\n" +"\n" +"\n" +"\n" +" proc puts {{-nonewline {}} {chan stdout} msg} {\n" +" if {${-nonewline} ni {-nonewline {}}} {\n" +" tailcall ${-nonewline} puts $msg\n" +" }\n" +" tailcall $chan puts {*}${-nonewline} $msg\n" +" }\n" +"\n" +"\n" +"\n" +"\n" +"\n" +" proc read {{-nonewline {}} chan} {\n" +" if {${-nonewline} ni {-nonewline {}}} {\n" +" tailcall ${-nonewline} read {*}${chan}\n" +" }\n" +" tailcall $chan read {*}${-nonewline}\n" +" }\n" +"\n" +" proc fconfigure {f args} {\n" +" foreach {n v} $args {\n" +" switch -glob -- $n {\n" +" -bl* {\n" +" $f ndelay $(!$v)\n" +" }\n" +" -bu* {\n" +" $f buffering $v\n" +" }\n" +" -tr* {\n" +" $f translation $v\n" +" }\n" +" default {\n" +" return -code error \"fconfigure: unknown option $n\"\n" +" }\n" +" }\n" +" }\n" +" }\n" +"}\n" +"\n" +"\n" +"proc fileevent {args} {\n" +" tailcall {*}$args\n" +"}\n" +"\n" +"\n" +"\n" +"proc parray {arrayname {pattern *} {puts puts}} {\n" +" upvar $arrayname a\n" +"\n" +" set max 0\n" +" foreach name [array names a $pattern]] {\n" +" if {[string length $name] > $max} {\n" +" set max [string length $name]\n" +" }\n" +" }\n" +" incr max [string length $arrayname]\n" +" incr max 2\n" +" foreach name [lsort [array names a $pattern]] {\n" +" $puts [format \"%-${max}s = %s\" $arrayname\\($name\\) $a($name)]\n" +" }\n" +"}\n" +"\n" +"\n" +"proc {file copy} {{force {}} source target} {\n" +" try {\n" +" if {$force ni {{} -force}} {\n" +" error \"bad option \\\"$force\\\": should be -force\"\n" +" }\n" +"\n" +" set in [open $source rb]\n" +"\n" +" if {[file exists $target]} {\n" +" if {$force eq \"\"} {\n" +" error \"error copying \\\"$source\\\" to \\\"$target\\\": file already exists\"\n" +" }\n" +"\n" +" if {$source eq $target} {\n" +" return\n" +" }\n" +"\n" +"\n" +" file stat $source ss\n" +" file stat $target ts\n" +" if {$ss(dev) == $ts(dev) && $ss(ino) == $ts(ino) && $ss(ino)} {\n" +" return\n" +" }\n" +" }\n" +" set out [open $target wb]\n" +" $in copyto $out\n" +" $out close\n" +" } on error {msg opts} {\n" +" incr opts(-level)\n" +" return {*}$opts $msg\n" +" } finally {\n" +" catch {$in close}\n" +" }\n" +"}\n" +"\n" +"\n" +"\n" +"proc popen {cmd {mode r}} {\n" +" lassign [pipe] r w\n" +" try {\n" +" if {[string match \"w*\" $mode]} {\n" +" lappend cmd <@$r &\n" +" set pids [exec {*}$cmd]\n" +" $r close\n" +" set f $w\n" +" } else {\n" +" lappend cmd >@$w &\n" +" set pids [exec {*}$cmd]\n" +" $w close\n" +" set f $r\n" +" }\n" +" lambda {cmd args} {f pids} {\n" +" if {$cmd eq \"pid\"} {\n" +" return $pids\n" +" }\n" +" if {$cmd eq \"close\"} {\n" +" $f close\n" +"\n" +" set retopts {}\n" +" foreach p $pids {\n" +" lassign [wait $p] status - rc\n" +" if {$status eq \"CHILDSTATUS\"} {\n" +" if {$rc == 0} {\n" +" continue\n" +" }\n" +" set msg \"child process exited abnormally\"\n" +" } else {\n" +" set msg \"child killed: received signal\"\n" +" }\n" +" set retopts [list -code error -errorcode [list $status $p $rc] $msg]\n" +" }\n" +" return {*}$retopts\n" +" }\n" +" tailcall $f $cmd {*}$args\n" +" }\n" +" } on error {error opts} {\n" +" $r close\n" +" $w close\n" +" error $error\n" +" }\n" +"}\n" +"\n" +"\n" +"local proc pid {{channelId {}}} {\n" +" if {$channelId eq \"\"} {\n" +" tailcall upcall pid\n" +" }\n" +" if {[catch {$channelId tell}]} {\n" +" return -code error \"can not find channel named \\\"$channelId\\\"\"\n" +" }\n" +" if {[catch {$channelId pid} pids]} {\n" +" return \"\"\n" +" }\n" +" return $pids\n" +"}\n" +"\n" +"\n" +"\n" +"proc throw {code {msg \"\"}} {\n" +" return -code $code $msg\n" +"}\n" +"\n" +"\n" +"proc {file delete force} {path} {\n" +" foreach e [readdir $path] {\n" +" file delete -force $path/$e\n" +" }\n" +" file delete $path\n" +"}\n" +); +} + + +#include +#include +#include +#include +#include +#ifdef HAVE_UNISTD_H +#include +#include +#endif +#ifdef HAVE_UTIL_H +#include +#endif +#ifdef HAVE_PTY_H +#include +#endif + + +#if defined(HAVE_SYS_SOCKET_H) && defined(HAVE_SELECT) && defined(HAVE_NETINET_IN_H) && defined(HAVE_NETDB_H) && defined(HAVE_ARPA_INET_H) +#include +#include +#include +#include +#include +#ifdef HAVE_SYS_UN_H +#include +#endif +#define HAVE_SOCKETS +#elif defined (__MINGW32__) + +#endif + +#if defined(JIM_SSL) +#include +#include +#endif + +#ifdef HAVE_TERMIOS_H +#endif + + +#define AIO_CMD_LEN 32 +#define AIO_DEFAULT_RBUF_LEN 256 +#define AIO_DEFAULT_WBUF_LIMIT (64 * 1024) + +#define AIO_KEEPOPEN 1 +#define AIO_NODELETE 2 +#define AIO_EOF 4 +#define AIO_WBUF_NONE 8 +#define AIO_NONBLOCK 16 + +#define AIO_ONEREAD 32 + +enum wbuftype { + WBUF_OPT_NONE, + WBUF_OPT_LINE, + WBUF_OPT_FULL, +}; + +#if defined(JIM_IPV6) +#define IPV6 1 +#else +#define IPV6 0 +#ifndef PF_INET6 +#define PF_INET6 0 +#endif +#endif +#if defined(HAVE_SYS_UN_H) && defined(PF_UNIX) +#define UNIX_SOCKETS 1 +#else +#define UNIX_SOCKETS 0 +#endif + + + + +static int JimReadableTimeout(int fd, long ms) +{ +#ifdef HAVE_SELECT + int retval; + struct timeval tv; + fd_set rfds; + + FD_ZERO(&rfds); + + FD_SET(fd, &rfds); + tv.tv_sec = ms / 1000; + tv.tv_usec = (ms % 1000) * 1000; + + retval = select(fd + 1, &rfds, NULL, NULL, ms == 0 ? NULL : &tv); + + if (retval > 0) { + return JIM_OK; + } + return JIM_ERR; +#else + return JIM_OK; +#endif +} + + +struct AioFile; + +typedef struct { + int (*writer)(struct AioFile *af, const char *buf, int len); + int (*reader)(struct AioFile *af, char *buf, int len, int pending); + int (*error)(const struct AioFile *af); + const char *(*strerror)(struct AioFile *af); + int (*verify)(struct AioFile *af); +} JimAioFopsType; + +typedef struct AioFile +{ + Jim_Obj *filename; + int wbuft; + int flags; + long timeout; + int fd; + int addr_family; + void *ssl; + const JimAioFopsType *fops; + Jim_Obj *readbuf; + Jim_Obj *writebuf; + char *rbuf; + size_t rbuf_len; + size_t wbuf_limit; +} AioFile; + +static void aio_consume(Jim_Obj *objPtr, int n); + +static int stdio_writer(struct AioFile *af, const char *buf, int len) +{ + int ret = write(af->fd, buf, len); + if (ret < 0 && errno == EPIPE) { + aio_consume(af->writebuf, Jim_Length(af->writebuf)); + } + return ret; +} + +static int stdio_reader(struct AioFile *af, char *buf, int len, int nb) +{ + if (nb || af->timeout == 0 || JimReadableTimeout(af->fd, af->timeout) == JIM_OK) { + + int ret; + + errno = 0; + ret = read(af->fd, buf, len); + if (ret <= 0 && errno != EAGAIN && errno != EINTR) { + af->flags |= AIO_EOF; + } + return ret; + } + errno = ETIMEDOUT; + return -1; +} + +static int stdio_error(const AioFile *af) +{ + if (af->flags & AIO_EOF) { + return JIM_OK; + } + + switch (errno) { + case EAGAIN: + case EINTR: + case ETIMEDOUT: +#ifdef ECONNRESET + case ECONNRESET: +#endif +#ifdef ECONNABORTED + case ECONNABORTED: +#endif + return JIM_OK; + default: + return JIM_ERR; + } +} + +static const char *stdio_strerror(struct AioFile *af) +{ + return strerror(errno); +} + +static const JimAioFopsType stdio_fops = { + stdio_writer, + stdio_reader, + stdio_error, + stdio_strerror, + NULL, +}; + + +static void aio_set_nonblocking(AioFile *af, int nb) +{ +#ifdef O_NDELAY + int old = !!(af->flags & AIO_NONBLOCK); + if (old != nb) { + int fmode = fcntl(af->fd, F_GETFL); + if (nb) { + fmode |= O_NDELAY; + af->flags |= AIO_NONBLOCK; + } + else { + fmode &= ~O_NDELAY; + af->flags &= ~AIO_NONBLOCK; + } + (void)fcntl(af->fd, F_SETFL, fmode); + } +#endif +} + +static int aio_start_nonblocking(AioFile *af) +{ + int old = !!(af->flags & AIO_NONBLOCK); + if (af->timeout) { + aio_set_nonblocking(af, 1); + } + return old; +} + +static int JimAioSubCmdProc(Jim_Interp *interp, int argc, Jim_Obj *const *argv); +static AioFile *JimMakeChannel(Jim_Interp *interp, int fd, Jim_Obj *filename, + const char *hdlfmt, int family, int flags); + + +static const char *JimAioErrorString(AioFile *af) +{ + if (af && af->fops) + return af->fops->strerror(af); + + return strerror(errno); +} + +static void JimAioSetError(Jim_Interp *interp, Jim_Obj *name) +{ + AioFile *af = Jim_CmdPrivData(interp); + + if (name) { + Jim_SetResultFormatted(interp, "%#s: %s", name, JimAioErrorString(af)); + } + else { + Jim_SetResultString(interp, JimAioErrorString(af), -1); + } +} + +static int aio_eof(AioFile *af) +{ + return af->flags & AIO_EOF; +} + +static int JimCheckStreamError(Jim_Interp *interp, AioFile *af) +{ + int ret = 0; + if (!aio_eof(af)) { + ret = af->fops->error(af); + if (ret) { + JimAioSetError(interp, af->filename); + } + } + return ret; +} + +static void aio_consume(Jim_Obj *objPtr, int n) +{ + assert(objPtr->bytes); + assert(n <= objPtr->length); + + + memmove(objPtr->bytes, objPtr->bytes + n, objPtr->length - n + 1); + objPtr->length -= n; +} + + +static int aio_flush(Jim_Interp *interp, AioFile *af); + +#ifdef jim_ext_eventloop +static int aio_autoflush(Jim_Interp *interp, void *clientData, int mask) +{ + AioFile *af = clientData; + + aio_flush(interp, af); + if (Jim_Length(af->writebuf) == 0) { + + return -1; + } + return 0; +} +#endif + + +static int aio_flush(Jim_Interp *interp, AioFile *af) +{ + int len; + const char *pt = Jim_GetString(af->writebuf, &len); + if (len) { + int ret = af->fops->writer(af, pt, len); + if (ret > 0) { + + aio_consume(af->writebuf, ret); + } + if (ret < 0) { + return JimCheckStreamError(interp, af); + } + if (Jim_Length(af->writebuf)) { +#ifdef jim_ext_eventloop + void *handler = Jim_FindFileHandler(interp, af->fd, JIM_EVENT_WRITABLE); + if (handler == NULL) { + Jim_CreateFileHandler(interp, af->fd, JIM_EVENT_WRITABLE, aio_autoflush, af, NULL); + return JIM_OK; + } + else if (handler == af) { + + return JIM_OK; + } +#endif + + Jim_SetResultString(interp, "send buffer is full", -1); + return JIM_ERR; + } + } + return JIM_OK; +} + +static int aio_read_len(Jim_Interp *interp, AioFile *af, unsigned flags, int neededLen) +{ + if (!af->readbuf) { + af->readbuf = Jim_NewStringObj(interp, NULL, 0); + } + + if (neededLen >= 0) { + neededLen -= Jim_Length(af->readbuf); + if (neededLen <= 0) { + return JIM_OK; + } + } + + while (neededLen && !aio_eof(af)) { + int retval; + int readlen; + + if (neededLen == -1) { + readlen = af->rbuf_len; + } + else { + readlen = (neededLen > af->rbuf_len ? af->rbuf_len : neededLen); + } + + if (!af->rbuf) { + af->rbuf = Jim_Alloc(af->rbuf_len); + } + retval = af->fops->reader(af, af->rbuf, readlen, flags & AIO_NONBLOCK); + if (retval > 0) { + if (retval) { + Jim_AppendString(interp, af->readbuf, af->rbuf, retval); + } + if (neededLen != -1) { + neededLen -= retval; + } + if (flags & AIO_ONEREAD) { + return JIM_OK; + } + continue; + } + if ((flags & AIO_ONEREAD) || JimCheckStreamError(interp, af)) { + return JIM_ERR; + } + break; + } + + return JIM_OK; +} + +static Jim_Obj *aio_read_consume(Jim_Interp *interp, AioFile *af, int neededLen) +{ + Jim_Obj *objPtr = NULL; + + if (neededLen < 0 || af->readbuf == NULL || Jim_Length(af->readbuf) <= neededLen) { + objPtr = af->readbuf; + af->readbuf = NULL; + } + else if (af->readbuf) { + + int len; + const char *pt = Jim_GetString(af->readbuf, &len); + + objPtr = Jim_NewStringObj(interp, pt, neededLen); + aio_consume(af->readbuf, neededLen); + } + + return objPtr; +} + +static void JimAioDelProc(Jim_Interp *interp, void *privData) +{ + AioFile *af = privData; + + JIM_NOTUSED(interp); + + + aio_flush(interp, af); + Jim_DecrRefCount(interp, af->writebuf); + +#if UNIX_SOCKETS + if (af->addr_family == PF_UNIX && (af->flags & AIO_NODELETE) == 0) { + + Jim_Obj *filenameObj = aio_sockname(interp, af->fd); + if (filenameObj) { + if (Jim_Length(filenameObj)) { + remove(Jim_String(filenameObj)); + } + Jim_FreeNewObj(interp, filenameObj); + } + } +#endif + + Jim_DecrRefCount(interp, af->filename); + +#ifdef jim_ext_eventloop + + Jim_DeleteFileHandler(interp, af->fd, JIM_EVENT_READABLE | JIM_EVENT_WRITABLE | JIM_EVENT_EXCEPTION); +#endif + +#if defined(JIM_SSL) + if (af->ssl != NULL) { + SSL_free(af->ssl); + } +#endif + if (!(af->flags & AIO_KEEPOPEN)) { + close(af->fd); + } + if (af->readbuf) { + Jim_FreeNewObj(interp, af->readbuf); + } + + Jim_Free(af->rbuf); + Jim_Free(af); +} + +static int aio_cmd_read(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + AioFile *af = Jim_CmdPrivData(interp); + int nonewline = 0; + jim_wide neededLen = -1; + static const char * const options[] = { "-pending", "-nonewline", NULL }; + enum { OPT_PENDING, OPT_NONEWLINE }; + int option; + int nb; + Jim_Obj *objPtr; + + if (argc) { + if (*Jim_String(argv[0]) == '-') { + if (Jim_GetEnum(interp, argv[0], options, &option, NULL, JIM_ERRMSG) != JIM_OK) { + return JIM_ERR; + } + switch (option) { + case OPT_PENDING: + + break; + case OPT_NONEWLINE: + nonewline++; + break; + } + } + else { + if (Jim_GetWide(interp, argv[0], &neededLen) != JIM_OK) + return JIM_ERR; + if (neededLen < 0) { + Jim_SetResultString(interp, "invalid parameter: negative len", -1); + return JIM_ERR; + } + } + argc--; + argv++; + } + if (argc) { + return -1; + } + + + nb = aio_start_nonblocking(af); + + if (aio_read_len(interp, af, nb ? AIO_NONBLOCK : 0, neededLen) != JIM_OK) { + aio_set_nonblocking(af, nb); + return JIM_ERR; + } + objPtr = aio_read_consume(interp, af, neededLen); + + aio_set_nonblocking(af, nb); + + if (objPtr) { + if (nonewline) { + int len; + const char *s = Jim_GetString(objPtr, &len); + + if (len > 0 && s[len - 1] == '\n') { + objPtr->length--; + objPtr->bytes[objPtr->length] = '\0'; + } + } + Jim_SetResult(interp, objPtr); + } + else { + Jim_SetEmptyResult(interp); + } + return JIM_OK; +} + +int Jim_AioFilehandle(Jim_Interp *interp, Jim_Obj *command) +{ + Jim_Cmd *cmdPtr = Jim_GetCommand(interp, command, JIM_ERRMSG); + + + if (cmdPtr && !cmdPtr->isproc && cmdPtr->u.native.cmdProc == JimAioSubCmdProc) { + return ((AioFile *) cmdPtr->u.native.privData)->fd; + } + Jim_SetResultFormatted(interp, "Not a filehandle: \"%#s\"", command); + return -1; +} + +static int aio_cmd_getfd(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + AioFile *af = Jim_CmdPrivData(interp); + + + aio_flush(interp, af); + + Jim_SetResultInt(interp, af->fd); + + return JIM_OK; +} + +static int aio_cmd_copy(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + AioFile *af = Jim_CmdPrivData(interp); + jim_wide count = 0; + jim_wide maxlen = JIM_WIDE_MAX; + int ok = 1; + Jim_Obj *objv[4]; + + if (argc == 2) { + if (Jim_GetWide(interp, argv[1], &maxlen) != JIM_OK) { + return JIM_ERR; + } + } + + objv[0] = argv[0]; + objv[1] = Jim_NewStringObj(interp, "flush", -1); + if (Jim_EvalObjVector(interp, 2, objv) != JIM_OK) { + Jim_SetResultFormatted(interp, "Not a filehandle: \"%#s\"", argv[0]); + return JIM_ERR; + } + + + objv[0] = argv[0]; + objv[1] = Jim_NewStringObj(interp, "puts", -1); + objv[2] = Jim_NewStringObj(interp, "-nonewline", -1); + Jim_IncrRefCount(objv[1]); + Jim_IncrRefCount(objv[2]); + + while (count < maxlen) { + jim_wide len = maxlen - count; + if (len > af->rbuf_len) { + len = af->rbuf_len; + } + if (aio_read_len(interp, af, 0, len) != JIM_OK) { + ok = 0; + break; + } + objv[3] = aio_read_consume(interp, af, len); + count += Jim_Length(objv[3]); + if (Jim_EvalObjVector(interp, 4, objv) != JIM_OK) { + ok = 0; + break; + } + if (aio_eof(af)) { + break; + } + if (count >= 16384 && af->rbuf_len < 65536) { + + af->rbuf_len = 65536; + af->rbuf = Jim_Realloc(af->rbuf, af->rbuf_len); + } + } + + Jim_DecrRefCount(interp, objv[1]); + Jim_DecrRefCount(interp, objv[2]); + + if (!ok) { + return JIM_ERR; + } + + Jim_SetResultInt(interp, count); + + return JIM_OK; +} + +static int aio_cmd_gets(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + AioFile *af = Jim_CmdPrivData(interp); + Jim_Obj *objPtr = NULL; + int len; + int nb; + unsigned flags = AIO_ONEREAD; + char *nl = NULL; + int offset = 0; + + errno = 0; + + + nb = aio_start_nonblocking(af); + if (nb) { + flags |= AIO_NONBLOCK; + } + + while (!aio_eof(af)) { + if (af->readbuf) { + const char *pt = Jim_GetString(af->readbuf, &len); + nl = memchr(pt + offset, '\n', len - offset); + if (nl) { + + objPtr = Jim_NewStringObj(interp, pt, nl - pt); + + aio_consume(af->readbuf, nl - pt + 1); + break; + } + offset = len; + } + + + if (aio_read_len(interp, af, flags, -1) != JIM_OK) { + break; + } + } + + aio_set_nonblocking(af, nb); + + if (!nl && aio_eof(af) && af->readbuf) { + + objPtr = af->readbuf; + af->readbuf = NULL; + } + else if (!objPtr) { + objPtr = Jim_NewStringObj(interp, NULL, 0); + } + + if (argc) { + if (Jim_SetVariable(interp, argv[0], objPtr) != JIM_OK) { + Jim_FreeNewObj(interp, objPtr); + return JIM_ERR; + } + + len = Jim_Length(objPtr); + + if (!nl && len == 0) { + + len = -1; + } + Jim_SetResultInt(interp, len); + } + else { + Jim_SetResult(interp, objPtr); + } + return JIM_OK; +} + +static int aio_cmd_puts(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + AioFile *af = Jim_CmdPrivData(interp); + int wlen; + const char *wdata; + Jim_Obj *strObj; + int wnow = 0; + int nl = 1; + + if (argc == 2) { + if (!Jim_CompareStringImmediate(interp, argv[0], "-nonewline")) { + return -1; + } + strObj = argv[1]; + nl = 0; + } + else { + strObj = argv[0]; + } + +#ifdef JIM_MAINTAINER + if (Jim_IsShared(af->writebuf)) { + Jim_DecrRefCount(interp, af->writebuf); + af->writebuf = Jim_DuplicateObj(interp, af->writebuf); + Jim_IncrRefCount(af->writebuf); + } +#endif + Jim_AppendObj(interp, af->writebuf, strObj); + if (nl) { + Jim_AppendString(interp, af->writebuf, "\n", 1); + } + + + wdata = Jim_GetString(af->writebuf, &wlen); + switch (af->wbuft) { + case WBUF_OPT_NONE: + + wnow = 1; + break; + + case WBUF_OPT_LINE: + + if (nl || memchr(wdata, '\n', wlen) != NULL) { + wnow = 1; + } + break; + + case WBUF_OPT_FULL: + if (wlen >= af->wbuf_limit) { + wnow = 1; + } + break; + } + + if (wnow) { + return aio_flush(interp, af); + } + return JIM_OK; +} + +static int aio_cmd_isatty(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ +#ifdef HAVE_ISATTY + AioFile *af = Jim_CmdPrivData(interp); + Jim_SetResultInt(interp, isatty(af->fd)); +#else + Jim_SetResultInt(interp, 0); +#endif + + return JIM_OK; +} + + +static int aio_cmd_flush(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + AioFile *af = Jim_CmdPrivData(interp); + return aio_flush(interp, af); +} + +static int aio_cmd_eof(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + AioFile *af = Jim_CmdPrivData(interp); + + Jim_SetResultInt(interp, !!aio_eof(af)); + return JIM_OK; +} + +static int aio_cmd_close(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + AioFile *af = Jim_CmdPrivData(interp); + if (argc == 3) { + int option = -1; +#if defined(HAVE_SOCKETS) + static const char * const options[] = { "r", "w", "-nodelete", NULL }; + enum { OPT_R, OPT_W, OPT_NODELETE }; + + if (Jim_GetEnum(interp, argv[2], options, &option, NULL, JIM_ERRMSG) != JIM_OK) { + return JIM_ERR; + } +#endif + switch (option) { +#if defined(HAVE_SHUTDOWN) + case OPT_R: + case OPT_W: + if (shutdown(af->fd, option == OPT_R ? SHUT_RD : SHUT_WR) == 0) { + return JIM_OK; + } + JimAioSetError(interp, NULL); + return JIM_ERR; +#endif +#if UNIX_SOCKETS + case OPT_NODELETE: + if (af->addr_family == PF_UNIX) { + af->flags |= AIO_NODELETE; + break; + } + +#endif + default: + Jim_SetResultString(interp, "not supported", -1); + return JIM_ERR; + } + } + + + af->flags &= ~AIO_KEEPOPEN; + + return Jim_DeleteCommand(interp, argv[0]); +} + +static int aio_cmd_seek(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + AioFile *af = Jim_CmdPrivData(interp); + int orig = SEEK_SET; + jim_wide offset; + + if (argc == 2) { + if (Jim_CompareStringImmediate(interp, argv[1], "start")) + orig = SEEK_SET; + else if (Jim_CompareStringImmediate(interp, argv[1], "current")) + orig = SEEK_CUR; + else if (Jim_CompareStringImmediate(interp, argv[1], "end")) + orig = SEEK_END; + else { + return -1; + } + } + if (Jim_GetWide(interp, argv[0], &offset) != JIM_OK) { + return JIM_ERR; + } + if (orig != SEEK_CUR || offset != 0) { + + aio_flush(interp, af); + } + if (Jim_Lseek(af->fd, offset, orig) == -1) { + JimAioSetError(interp, af->filename); + return JIM_ERR; + } + if (af->readbuf) { + Jim_FreeNewObj(interp, af->readbuf); + af->readbuf = NULL; + } + af->flags &= ~AIO_EOF; + return JIM_OK; +} + +static int aio_cmd_tell(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + AioFile *af = Jim_CmdPrivData(interp); + + Jim_SetResultInt(interp, Jim_Lseek(af->fd, 0, SEEK_CUR)); + return JIM_OK; +} + +static int aio_cmd_filename(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + AioFile *af = Jim_CmdPrivData(interp); + + Jim_SetResult(interp, af->filename); + return JIM_OK; +} + +#ifdef O_NDELAY +static int aio_cmd_ndelay(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + AioFile *af = Jim_CmdPrivData(interp); + + if (argc) { + long nb; + + if (Jim_GetLong(interp, argv[0], &nb) != JIM_OK) { + return JIM_ERR; + } + aio_set_nonblocking(af, nb); + } + Jim_SetResultInt(interp, (af->flags & AIO_NONBLOCK) ? 1 : 0); + return JIM_OK; +} +#endif + + +#ifdef HAVE_FSYNC +static int aio_cmd_sync(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + AioFile *af = Jim_CmdPrivData(interp); + + if (aio_flush(interp, af) != JIM_OK) { + return JIM_ERR; + } + fsync(af->fd); + return JIM_OK; +} +#endif + +static int aio_cmd_buffering(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + AioFile *af = Jim_CmdPrivData(interp); + Jim_Obj *resultObj; + + static const char * const options[] = { + "none", + "line", + "full", + NULL + }; + + if (argc) { + if (Jim_GetEnum(interp, argv[0], options, &af->wbuft, NULL, JIM_ERRMSG) != JIM_OK) { + return JIM_ERR; + } + + if (af->wbuft == WBUF_OPT_FULL && argc == 2) { + long l; + if (Jim_GetLong(interp, argv[1], &l) != JIM_OK || l <= 0) { + return JIM_ERR; + } + af->wbuf_limit = l; + } + + if (af->wbuft == WBUF_OPT_NONE) { + if (aio_flush(interp, af) != JIM_OK) { + return JIM_ERR; + } + } + + } + + resultObj = Jim_NewListObj(interp, NULL, 0); + Jim_ListAppendElement(interp, resultObj, Jim_NewStringObj(interp, options[af->wbuft], -1)); + if (af->wbuft == WBUF_OPT_FULL) { + Jim_ListAppendElement(interp, resultObj, Jim_NewIntObj(interp, af->wbuf_limit)); + } + Jim_SetResult(interp, resultObj); + + return JIM_OK; +} + +static int aio_cmd_translation(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + enum {OPT_BINARY, OPT_TEXT}; + static const char * const options[] = { + "binary", + "text", + NULL + }; + int opt; + + if (Jim_GetEnum(interp, argv[0], options, &opt, NULL, JIM_ERRMSG) != JIM_OK) { + return JIM_ERR; + } +#if defined(Jim_SetMode) + else { + AioFile *af = Jim_CmdPrivData(interp); + Jim_SetMode(af->fd, opt == OPT_BINARY ? O_BINARY : O_TEXT); + } +#endif + return JIM_OK; +} + +static int aio_cmd_readsize(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + AioFile *af = Jim_CmdPrivData(interp); + + if (argc) { + long l; + if (Jim_GetLong(interp, argv[0], &l) != JIM_OK || l <= 0) { + return JIM_ERR; + } + af->rbuf_len = l; + if (af->rbuf) { + af->rbuf = Jim_Realloc(af->rbuf, af->rbuf_len); + } + } + Jim_SetResultInt(interp, af->rbuf_len); + + return JIM_OK; +} + +#ifdef jim_ext_eventloop +static int aio_cmd_timeout(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ +#ifdef HAVE_SELECT + AioFile *af = Jim_CmdPrivData(interp); + if (argc == 1) { + if (Jim_GetLong(interp, argv[0], &af->timeout) != JIM_OK) { + return JIM_ERR; + } + } + Jim_SetResultInt(interp, af->timeout); + return JIM_OK; +#else + Jim_SetResultString(interp, "timeout not supported", -1); + return JIM_ERR; +#endif +} + +static int aio_eventinfo(Jim_Interp *interp, AioFile * af, unsigned mask, + int argc, Jim_Obj * const *argv) +{ + if (argc == 0) { + + Jim_Obj *objPtr = Jim_FindFileHandler(interp, af->fd, mask); + if (objPtr) { + Jim_SetResult(interp, objPtr); + } + return JIM_OK; + } + + + Jim_DeleteFileHandler(interp, af->fd, mask); + + + if (Jim_Length(argv[0])) { + Jim_CreateScriptFileHandler(interp, af->fd, mask, argv[0]); + } + + return JIM_OK; +} + +static int aio_cmd_readable(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + AioFile *af = Jim_CmdPrivData(interp); + + return aio_eventinfo(interp, af, JIM_EVENT_READABLE, argc, argv); +} + +static int aio_cmd_writable(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + AioFile *af = Jim_CmdPrivData(interp); + + return aio_eventinfo(interp, af, JIM_EVENT_WRITABLE, argc, argv); +} + +static int aio_cmd_onexception(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + AioFile *af = Jim_CmdPrivData(interp); + + return aio_eventinfo(interp, af, JIM_EVENT_EXCEPTION, argc, argv); +} +#endif + +#if defined(jim_ext_file) && defined(Jim_FileStat) +static int aio_cmd_stat(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + jim_stat_t sb; + AioFile *af = Jim_CmdPrivData(interp); + + if (Jim_FileStat(af->fd, &sb) == -1) { + JimAioSetError(interp, NULL); + return JIM_ERR; + } + return Jim_FileStoreStatData(interp, argc == 0 ? NULL : argv[0], &sb); +} +#endif + + + + +static const jim_subcmd_type aio_command_table[] = { + { "read", + "?-nonewline|len?", + aio_cmd_read, + 0, + 2, + + }, + { "copyto", + "handle ?size?", + aio_cmd_copy, + 1, + 2, + + }, + { "getfd", + NULL, + aio_cmd_getfd, + 0, + 0, + + }, + { "gets", + "?var?", + aio_cmd_gets, + 0, + 1, + + }, + { "puts", + "?-nonewline? str", + aio_cmd_puts, + 1, + 2, + + }, + { "isatty", + NULL, + aio_cmd_isatty, + 0, + 0, + + }, + { "flush", + NULL, + aio_cmd_flush, + 0, + 0, + + }, + { "eof", + NULL, + aio_cmd_eof, + 0, + 0, + + }, + { "close", + "?r(ead)|w(rite)?", + aio_cmd_close, + 0, + 1, + JIM_MODFLAG_FULLARGV, + + }, + { "seek", + "offset ?start|current|end", + aio_cmd_seek, + 1, + 2, + + }, + { "tell", + NULL, + aio_cmd_tell, + 0, + 0, + + }, + { "filename", + NULL, + aio_cmd_filename, + 0, + 0, + + }, +#ifdef O_NDELAY + { "ndelay", + "?0|1?", + aio_cmd_ndelay, + 0, + 1, + + }, +#endif +#ifdef HAVE_FSYNC + { "sync", + NULL, + aio_cmd_sync, + 0, + 0, + + }, +#endif + { "buffering", + "?none|line|full? ?size?", + aio_cmd_buffering, + 0, + 2, + + }, + { "translation", + "binary|text", + aio_cmd_translation, + 1, + 1, + + }, + { "readsize", + "?size?", + aio_cmd_readsize, + 0, + 1, + + }, +#if defined(jim_ext_file) && defined(Jim_FileStat) + { "stat", + "?var?", + aio_cmd_stat, + 0, + 1, + + }, +#endif +#ifdef jim_ext_eventloop + { "readable", + "?readable-script?", + aio_cmd_readable, + 0, + 1, + + }, + { "writable", + "?writable-script?", + aio_cmd_writable, + 0, + 1, + + }, + { "onexception", + "?exception-script?", + aio_cmd_onexception, + 0, + 1, + + }, + { "timeout", + "?ms?", + aio_cmd_timeout, + 0, + 1, + + }, +#endif + { NULL } +}; + +static int JimAioSubCmdProc(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + return Jim_CallSubCmd(interp, Jim_ParseSubCmd(interp, aio_command_table, argc, argv), argc, argv); +} + +static int parse_posix_open_mode(Jim_Interp *interp, Jim_Obj *modeObj) +{ + int i; + int flags = 0; + #ifndef O_NOCTTY + + #define O_NOCTTY 0 + #endif + static const char * const modetypes[] = { + "RDONLY", "WRONLY", "RDWR", "APPEND", "BINARY", "CREAT", "EXCL", "NOCTTY", "TRUNC", NULL + }; + static const int modeflags[] = { + O_RDONLY, O_WRONLY, O_RDWR, O_APPEND, 0, O_CREAT, O_EXCL, O_NOCTTY, O_TRUNC, + }; + + for (i = 0; i < Jim_ListLength(interp, modeObj); i++) { + int opt; + Jim_Obj *objPtr = Jim_ListGetIndex(interp, modeObj, i); + if (Jim_GetEnum(interp, objPtr, modetypes, &opt, "access mode", JIM_ERRMSG) != JIM_OK) { + return -1; + } + flags |= modeflags[opt]; + } + return flags; +} + +static int parse_open_mode(Jim_Interp *interp, Jim_Obj *filenameObj, Jim_Obj *modeObj) +{ + + int flags; + const char *mode = Jim_String(modeObj); + if (*mode == 'R' || *mode == 'W') { + return parse_posix_open_mode(interp, modeObj); + } + if (*mode == 'r') { + flags = O_RDONLY; + } + else if (*mode == 'w') { + flags = O_WRONLY | O_CREAT | O_TRUNC; + } + else if (*mode == 'a') { + flags = O_WRONLY | O_CREAT | O_APPEND; + } + else { + Jim_SetResultFormatted(interp, "%s: invalid open mode '%s'", Jim_String(filenameObj), mode); + return -1; + } + mode++; + + if (*mode == 'b') { +#ifdef O_BINARY + flags |= O_BINARY; +#endif + mode++; + } + + if (*mode == 't') { +#ifdef O_TEXT + flags |= O_TEXT; +#endif + mode++; + } + + if (*mode == '+') { + mode++; + + flags &= ~(O_RDONLY | O_WRONLY); + flags |= O_RDWR; + } + + if (*mode == 'x') { + mode++; +#ifdef O_EXCL + flags |= O_EXCL; +#endif + } + + if (*mode == 'F') { + mode++; +#ifdef O_LARGEFILE + flags |= O_LARGEFILE; +#endif + } + + if (*mode == 'e') { + + mode++; + } + return flags; +} + +static int JimAioOpenCommand(Jim_Interp *interp, int argc, + Jim_Obj *const *argv) +{ + int openflags; + const char *filename; + int fd = -1; + int n = 0; + int flags = 0; + + if (argc > 2 && Jim_CompareStringImmediate(interp, argv[2], "-noclose")) { + flags = AIO_KEEPOPEN; + n++; + } + if (argc < 2 || argc > 3 + n) { + Jim_WrongNumArgs(interp, 1, argv, "filename ?-noclose? ?mode?"); + return JIM_ERR; + } + + filename = Jim_String(argv[1]); + +#ifdef jim_ext_tclcompat + { + + + if (*filename == '|') { + Jim_Obj *evalObj[3]; + int i = 0; + + evalObj[i++] = Jim_NewStringObj(interp, "::popen", -1); + evalObj[i++] = Jim_NewStringObj(interp, filename + 1, -1); + if (argc == 3 + n) { + evalObj[i++] = argv[2 + n]; + } + + return Jim_EvalObjVector(interp, i, evalObj); + } + } +#endif + if (argc == 3 + n) { + openflags = parse_open_mode(interp, argv[1], argv[2 + n]); + if (openflags == -1) { + return JIM_ERR; + } + } + else { + openflags = O_RDONLY; + } + fd = open(filename, openflags, 0666); + if (fd < 0) { + JimAioSetError(interp, argv[1]); + return JIM_ERR; + } + + return JimMakeChannel(interp, fd, argv[1], "aio.handle%ld", 0, flags) ? JIM_OK : JIM_ERR; +} + + +static AioFile *JimMakeChannel(Jim_Interp *interp, int fd, Jim_Obj *filename, + const char *hdlfmt, int family, int flags) +{ + AioFile *af; + char buf[AIO_CMD_LEN]; + Jim_Obj *cmdname; + + snprintf(buf, sizeof(buf), hdlfmt, Jim_GetId(interp)); + cmdname = Jim_NewStringObj(interp, buf, -1); + if (!filename) { + filename = cmdname; + } + Jim_IncrRefCount(filename); + + + af = Jim_Alloc(sizeof(*af)); + memset(af, 0, sizeof(*af)); + af->filename = filename; + af->fd = fd; + af->addr_family = family; + af->fops = &stdio_fops; + af->ssl = NULL; + if (flags & AIO_WBUF_NONE) { + af->wbuft = WBUF_OPT_NONE; + } + else { +#ifdef HAVE_ISATTY + af->wbuft = isatty(af->fd) ? WBUF_OPT_LINE : WBUF_OPT_FULL; +#else + af->wbuft = WBUF_OPT_FULL; +#endif + } + +#ifdef FD_CLOEXEC + if ((flags & AIO_KEEPOPEN) == 0) { + (void)fcntl(af->fd, F_SETFD, FD_CLOEXEC); + } +#endif + aio_set_nonblocking(af, !!(flags & AIO_NONBLOCK)); + + af->flags |= flags; + + af->writebuf = Jim_NewStringObj(interp, NULL, 0); + Jim_IncrRefCount(af->writebuf); + af->wbuf_limit = AIO_DEFAULT_WBUF_LIMIT; + af->rbuf_len = AIO_DEFAULT_RBUF_LEN; + + + Jim_CreateCommand(interp, buf, JimAioSubCmdProc, af, JimAioDelProc); + + Jim_SetResult(interp, Jim_MakeGlobalNamespaceName(interp, cmdname)); + + return af; +} + +#if defined(HAVE_PIPE) || (defined(HAVE_SOCKETPAIR) && UNIX_SOCKETS) || defined(HAVE_OPENPTY) +static int JimMakeChannelPair(Jim_Interp *interp, int p[2], Jim_Obj *filename, + const char *hdlfmt, int family, int flags) +{ + if (JimMakeChannel(interp, p[0], filename, hdlfmt, family, flags)) { + Jim_Obj *objPtr = Jim_NewListObj(interp, NULL, 0); + Jim_ListAppendElement(interp, objPtr, Jim_GetResult(interp)); + if (JimMakeChannel(interp, p[1], filename, hdlfmt, family, flags)) { + Jim_ListAppendElement(interp, objPtr, Jim_GetResult(interp)); + Jim_SetResult(interp, objPtr); + return JIM_OK; + } + } + + + close(p[0]); + close(p[1]); + JimAioSetError(interp, NULL); + return JIM_ERR; +} +#endif + +#ifdef HAVE_PIPE +static int JimCreatePipe(Jim_Interp *interp, Jim_Obj *filenameObj, int flags) +{ + int p[2]; + + if (pipe(p) != 0) { + JimAioSetError(interp, NULL); + return JIM_ERR; + } + + return JimMakeChannelPair(interp, p, filenameObj, "aio.pipe%ld", 0, flags); +} + + +static int JimAioPipeCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + if (argc != 1) { + Jim_WrongNumArgs(interp, 1, argv, ""); + return JIM_ERR; + } + return JimCreatePipe(interp, argv[0], 0); +} +#endif + +#ifdef HAVE_OPENPTY +static int JimAioOpenPtyCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + int p[2]; + char path[MAXPATHLEN]; + + if (argc != 1) { + Jim_WrongNumArgs(interp, 1, argv, ""); + return JIM_ERR; + } + + if (openpty(&p[0], &p[1], path, NULL, NULL) != 0) { + JimAioSetError(interp, NULL); + return JIM_ERR; + } + + + return JimMakeChannelPair(interp, p, Jim_NewStringObj(interp, path, -1), "aio.pty%ld", 0, 0); + return JimMakeChannelPair(interp, p, Jim_NewStringObj(interp, path, -1), "aio.pty%ld", 0, 0); +} +#endif + + + +int Jim_aioInit(Jim_Interp *interp) +{ + if (Jim_PackageProvide(interp, "aio", "1.0", JIM_ERRMSG)) + return JIM_ERR; + +#if defined(JIM_SSL) + Jim_CreateCommand(interp, "load_ssl_certs", JimAioLoadSSLCertsCommand, NULL, NULL); +#endif + + Jim_CreateCommand(interp, "open", JimAioOpenCommand, NULL, NULL); +#ifdef HAVE_SOCKETS + Jim_CreateCommand(interp, "socket", JimAioSockCommand, NULL, NULL); +#endif +#ifdef HAVE_PIPE + Jim_CreateCommand(interp, "pipe", JimAioPipeCommand, NULL, NULL); +#endif + + + JimMakeChannel(interp, fileno(stdin), NULL, "stdin", 0, AIO_KEEPOPEN); + JimMakeChannel(interp, fileno(stdout), NULL, "stdout", 0, AIO_KEEPOPEN); + JimMakeChannel(interp, fileno(stderr), NULL, "stderr", 0, AIO_KEEPOPEN | AIO_WBUF_NONE); + + return JIM_OK; +} + +#include +#include +#include + + +#ifdef HAVE_DIRENT_H +#include +#endif + +int Jim_ReaddirCmd(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + const char *dirPath; + DIR *dirPtr; + struct dirent *entryPtr; + int nocomplain = 0; + + if (argc == 3 && Jim_CompareStringImmediate(interp, argv[1], "-nocomplain")) { + nocomplain = 1; + } + if (argc != 2 && !nocomplain) { + Jim_WrongNumArgs(interp, 1, argv, "?-nocomplain? dirPath"); + return JIM_ERR; + } + + dirPath = Jim_String(argv[1 + nocomplain]); + + dirPtr = opendir(dirPath); + if (dirPtr == NULL) { + if (nocomplain) { + return JIM_OK; + } + Jim_SetResultString(interp, strerror(errno), -1); + return JIM_ERR; + } + else { + Jim_Obj *listObj = Jim_NewListObj(interp, NULL, 0); + + while ((entryPtr = readdir(dirPtr)) != NULL) { + if (entryPtr->d_name[0] == '.') { + if (entryPtr->d_name[1] == '\0') { + continue; + } + if ((entryPtr->d_name[1] == '.') && (entryPtr->d_name[2] == '\0')) + continue; + } + Jim_ListAppendElement(interp, listObj, Jim_NewStringObj(interp, entryPtr->d_name, -1)); + } + closedir(dirPtr); + + Jim_SetResult(interp, listObj); + + return JIM_OK; + } +} + +int Jim_readdirInit(Jim_Interp *interp) +{ + Jim_PackageProvideCheck(interp, "readdir"); + Jim_CreateCommand(interp, "readdir", Jim_ReaddirCmd, NULL, NULL); + return JIM_OK; +} + +#include +#include + +#if defined(JIM_REGEXP) +#else + #include + #define jim_regcomp regcomp + #define jim_regexec regexec + #define jim_regerror regerror + #define jim_regfree regfree +#endif + +static void FreeRegexpInternalRep(Jim_Interp *interp, Jim_Obj *objPtr) +{ + jim_regfree(objPtr->internalRep.ptrIntValue.ptr); + Jim_Free(objPtr->internalRep.ptrIntValue.ptr); +} + +static const Jim_ObjType regexpObjType = { + "regexp", + FreeRegexpInternalRep, + NULL, + NULL, + JIM_TYPE_NONE +}; + +static regex_t *SetRegexpFromAny(Jim_Interp *interp, Jim_Obj *objPtr, unsigned flags) +{ + regex_t *compre; + const char *pattern; + int ret; + + + if (objPtr->typePtr == ®expObjType && + objPtr->internalRep.ptrIntValue.ptr && objPtr->internalRep.ptrIntValue.int1 == flags) { + + return objPtr->internalRep.ptrIntValue.ptr; + } + + + + + pattern = Jim_String(objPtr); + compre = Jim_Alloc(sizeof(regex_t)); + + if ((ret = jim_regcomp(compre, pattern, REG_EXTENDED | flags)) != 0) { + char buf[100]; + + jim_regerror(ret, compre, buf, sizeof(buf)); + Jim_SetResultFormatted(interp, "couldn't compile regular expression pattern: %s", buf); + jim_regfree(compre); + Jim_Free(compre); + return NULL; + } + + Jim_FreeIntRep(interp, objPtr); + + objPtr->typePtr = ®expObjType; + objPtr->internalRep.ptrIntValue.int1 = flags; + objPtr->internalRep.ptrIntValue.ptr = compre; + + return compre; +} + +int Jim_RegexpCmd(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + int opt_indices = 0; + int opt_all = 0; + int opt_inline = 0; + regex_t *regex; + int match, i, j; + int offset = 0; + regmatch_t *pmatch = NULL; + int source_len; + int result = JIM_OK; + const char *pattern; + const char *source_str; + int num_matches = 0; + int num_vars; + Jim_Obj *resultListObj = NULL; + int regcomp_flags = 0; + int eflags = 0; + int option; + enum { + OPT_INDICES, OPT_NOCASE, OPT_LINE, OPT_ALL, OPT_INLINE, OPT_START, OPT_END + }; + static const char * const options[] = { + "-indices", "-nocase", "-line", "-all", "-inline", "-start", "--", NULL + }; + + if (argc < 3) { + wrongNumArgs: + Jim_WrongNumArgs(interp, 1, argv, + "?-switch ...? exp string ?matchVar? ?subMatchVar ...?"); + return JIM_ERR; + } + + for (i = 1; i < argc; i++) { + const char *opt = Jim_String(argv[i]); + + if (*opt != '-') { + break; + } + if (Jim_GetEnum(interp, argv[i], options, &option, "switch", JIM_ERRMSG | JIM_ENUM_ABBREV) != JIM_OK) { + return JIM_ERR; + } + if (option == OPT_END) { + i++; + break; + } + switch (option) { + case OPT_INDICES: + opt_indices = 1; + break; + + case OPT_NOCASE: + regcomp_flags |= REG_ICASE; + break; + + case OPT_LINE: + regcomp_flags |= REG_NEWLINE; + break; + + case OPT_ALL: + opt_all = 1; + break; + + case OPT_INLINE: + opt_inline = 1; + break; + + case OPT_START: + if (++i == argc) { + goto wrongNumArgs; + } + if (Jim_GetIndex(interp, argv[i], &offset) != JIM_OK) { + return JIM_ERR; + } + break; + } + } + if (argc - i < 2) { + goto wrongNumArgs; + } + + regex = SetRegexpFromAny(interp, argv[i], regcomp_flags); + if (!regex) { + return JIM_ERR; + } + + pattern = Jim_String(argv[i]); + source_str = Jim_GetString(argv[i + 1], &source_len); + + num_vars = argc - i - 2; + + if (opt_inline) { + if (num_vars) { + Jim_SetResultString(interp, "regexp match variables not allowed when using -inline", + -1); + result = JIM_ERR; + goto done; + } + num_vars = regex->re_nsub + 1; + } + + pmatch = Jim_Alloc((num_vars + 1) * sizeof(*pmatch)); + + if (offset) { + if (offset < 0) { + offset += source_len + 1; + } + if (offset > source_len) { + source_str += source_len; + } + else if (offset > 0) { + source_str += utf8_index(source_str, offset); + } + eflags |= REG_NOTBOL; + } + + if (opt_inline) { + resultListObj = Jim_NewListObj(interp, NULL, 0); + } + + next_match: + match = jim_regexec(regex, source_str, num_vars + 1, pmatch, eflags); + if (match >= REG_BADPAT) { + char buf[100]; + + jim_regerror(match, regex, buf, sizeof(buf)); + Jim_SetResultFormatted(interp, "error while matching pattern: %s", buf); + result = JIM_ERR; + goto done; + } + + if (match == REG_NOMATCH) { + goto done; + } + + num_matches++; + + if (opt_all && !opt_inline) { + + goto try_next_match; + } + + + j = 0; + for (i += 2; opt_inline ? j < num_vars : i < argc; i++, j++) { + Jim_Obj *resultObj; + + if (opt_indices) { + resultObj = Jim_NewListObj(interp, NULL, 0); + } + else { + resultObj = Jim_NewStringObj(interp, "", 0); + } + + if (pmatch[j].rm_so == -1) { + if (opt_indices) { + Jim_ListAppendElement(interp, resultObj, Jim_NewIntObj(interp, -1)); + Jim_ListAppendElement(interp, resultObj, Jim_NewIntObj(interp, -1)); + } + } + else { + if (opt_indices) { + + int so = utf8_strlen(source_str, pmatch[j].rm_so); + int eo = utf8_strlen(source_str, pmatch[j].rm_eo); + Jim_ListAppendElement(interp, resultObj, Jim_NewIntObj(interp, offset + so)); + Jim_ListAppendElement(interp, resultObj, Jim_NewIntObj(interp, offset + eo - 1)); + } + else { + Jim_AppendString(interp, resultObj, source_str + pmatch[j].rm_so, pmatch[j].rm_eo - pmatch[j].rm_so); + } + } + + if (opt_inline) { + Jim_ListAppendElement(interp, resultListObj, resultObj); + } + else { + + result = Jim_SetVariable(interp, argv[i], resultObj); + + if (result != JIM_OK) { + Jim_FreeObj(interp, resultObj); + break; + } + } + } + + try_next_match: + if (opt_all && (pattern[0] != '^' || (regcomp_flags & REG_NEWLINE)) && *source_str) { + if (pmatch[0].rm_eo) { + offset += utf8_strlen(source_str, pmatch[0].rm_eo); + source_str += pmatch[0].rm_eo; + } + else { + source_str++; + offset++; + } + if (*source_str) { + eflags = REG_NOTBOL; + goto next_match; + } + } + + done: + if (result == JIM_OK) { + if (opt_inline) { + Jim_SetResult(interp, resultListObj); + } + else { + Jim_SetResultInt(interp, num_matches); + } + } + + Jim_Free(pmatch); + return result; +} + +#define MAX_SUB_MATCHES 50 + +int Jim_RegsubCmd(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + int regcomp_flags = 0; + int regexec_flags = 0; + int opt_all = 0; + int opt_command = 0; + int offset = 0; + regex_t *regex; + const char *p; + int result = JIM_OK; + regmatch_t pmatch[MAX_SUB_MATCHES + 1]; + int num_matches = 0; + + int i, j, n; + Jim_Obj *varname; + Jim_Obj *resultObj; + Jim_Obj *cmd_prefix = NULL; + Jim_Obj *regcomp_obj = NULL; + const char *source_str; + int source_len; + const char *replace_str = NULL; + int replace_len; + const char *pattern; + int option; + enum { + OPT_NOCASE, OPT_LINE, OPT_ALL, OPT_START, OPT_COMMAND, OPT_END + }; + static const char * const options[] = { + "-nocase", "-line", "-all", "-start", "-command", "--", NULL + }; + + if (argc < 4) { + wrongNumArgs: + Jim_WrongNumArgs(interp, 1, argv, + "?-switch ...? exp string subSpec ?varName?"); + return JIM_ERR; + } + + for (i = 1; i < argc; i++) { + const char *opt = Jim_String(argv[i]); + + if (*opt != '-') { + break; + } + if (Jim_GetEnum(interp, argv[i], options, &option, "switch", JIM_ERRMSG | JIM_ENUM_ABBREV) != JIM_OK) { + return JIM_ERR; + } + if (option == OPT_END) { + i++; + break; + } + switch (option) { + case OPT_NOCASE: + regcomp_flags |= REG_ICASE; + break; + + case OPT_LINE: + regcomp_flags |= REG_NEWLINE; + break; + + case OPT_ALL: + opt_all = 1; + break; + + case OPT_START: + if (++i == argc) { + goto wrongNumArgs; + } + if (Jim_GetIndex(interp, argv[i], &offset) != JIM_OK) { + return JIM_ERR; + } + break; + + case OPT_COMMAND: + opt_command = 1; + break; + } + } + if (argc - i != 3 && argc - i != 4) { + goto wrongNumArgs; + } + + + regcomp_obj = Jim_DuplicateObj(interp, argv[i]); + Jim_IncrRefCount(regcomp_obj); + regex = SetRegexpFromAny(interp, regcomp_obj, regcomp_flags); + if (!regex) { + Jim_DecrRefCount(interp, regcomp_obj); + return JIM_ERR; + } + pattern = Jim_String(argv[i]); + + source_str = Jim_GetString(argv[i + 1], &source_len); + if (opt_command) { + cmd_prefix = argv[i + 2]; + if (Jim_ListLength(interp, cmd_prefix) == 0) { + Jim_SetResultString(interp, "command prefix must be a list of at least one element", -1); + Jim_DecrRefCount(interp, regcomp_obj); + return JIM_ERR; + } + Jim_IncrRefCount(cmd_prefix); + } + else { + replace_str = Jim_GetString(argv[i + 2], &replace_len); + } + varname = argv[i + 3]; + + + resultObj = Jim_NewStringObj(interp, "", 0); + + if (offset) { + if (offset < 0) { + offset += source_len + 1; + } + if (offset > source_len) { + offset = source_len; + } + else if (offset < 0) { + offset = 0; + } + } + + offset = utf8_index(source_str, offset); + + + Jim_AppendString(interp, resultObj, source_str, offset); + + + n = source_len - offset; + p = source_str + offset; + do { + int match = jim_regexec(regex, p, MAX_SUB_MATCHES, pmatch, regexec_flags); + + if (match >= REG_BADPAT) { + char buf[100]; + + jim_regerror(match, regex, buf, sizeof(buf)); + Jim_SetResultFormatted(interp, "error while matching pattern: %s", buf); + return JIM_ERR; + } + if (match == REG_NOMATCH) { + break; + } + + num_matches++; + + Jim_AppendString(interp, resultObj, p, pmatch[0].rm_so); + + if (opt_command) { + + Jim_Obj *cmdListObj = Jim_DuplicateObj(interp, cmd_prefix); + for (j = 0; j < MAX_SUB_MATCHES; j++) { + if (pmatch[j].rm_so == -1) { + break; + } + else { + Jim_Obj *srcObj = Jim_NewStringObj(interp, p + pmatch[j].rm_so, pmatch[j].rm_eo - pmatch[j].rm_so); + Jim_ListAppendElement(interp, cmdListObj, srcObj); + } + } + Jim_IncrRefCount(cmdListObj); + + result = Jim_EvalObj(interp, cmdListObj); + Jim_DecrRefCount(interp, cmdListObj); + if (result != JIM_OK) { + goto cmd_error; + } + Jim_AppendString(interp, resultObj, Jim_String(Jim_GetResult(interp)), -1); + } + else { + + for (j = 0; j < replace_len; j++) { + int idx; + int c = replace_str[j]; + + if (c == '&') { + idx = 0; + } + else if (c == '\\' && j < replace_len) { + c = replace_str[++j]; + if ((c >= '0') && (c <= '9')) { + idx = c - '0'; + } + else if ((c == '\\') || (c == '&')) { + Jim_AppendString(interp, resultObj, replace_str + j, 1); + continue; + } + else { + Jim_AppendString(interp, resultObj, replace_str + j - 1, (j == replace_len) ? 1 : 2); + continue; + } + } + else { + Jim_AppendString(interp, resultObj, replace_str + j, 1); + continue; + } + if ((idx < MAX_SUB_MATCHES) && pmatch[idx].rm_so != -1 && pmatch[idx].rm_eo != -1) { + Jim_AppendString(interp, resultObj, p + pmatch[idx].rm_so, + pmatch[idx].rm_eo - pmatch[idx].rm_so); + } + } + } + + p += pmatch[0].rm_eo; + n -= pmatch[0].rm_eo; + + + if (!opt_all || n == 0) { + break; + } + + + if ((regcomp_flags & REG_NEWLINE) == 0 && pattern[0] == '^') { + break; + } + + + if (pattern[0] == '\0' && n) { + + Jim_AppendString(interp, resultObj, p, 1); + p++; + n--; + } + + if (pmatch[0].rm_eo == pmatch[0].rm_so) { + + regexec_flags = REG_NOTBOL; + } + else { + regexec_flags = 0; + } + + } while (n); + + Jim_AppendString(interp, resultObj, p, -1); + +cmd_error: + if (result == JIM_OK) { + + if (argc - i == 4) { + result = Jim_SetVariable(interp, varname, resultObj); + + if (result == JIM_OK) { + Jim_SetResultInt(interp, num_matches); + } + else { + Jim_FreeObj(interp, resultObj); + } + } + else { + Jim_SetResult(interp, resultObj); + result = JIM_OK; + } + } + else { + Jim_FreeObj(interp, resultObj); + } + + if (opt_command) { + Jim_DecrRefCount(interp, cmd_prefix); + } + + Jim_DecrRefCount(interp, regcomp_obj); + + return result; +} + +int Jim_regexpInit(Jim_Interp *interp) +{ + Jim_PackageProvideCheck(interp, "regexp"); + Jim_CreateCommand(interp, "regexp", Jim_RegexpCmd, NULL, NULL); + Jim_CreateCommand(interp, "regsub", Jim_RegsubCmd, NULL, NULL); + return JIM_OK; +} + +#include +#include +#include +#include +#include + + +#ifdef HAVE_UTIMES +#include +#endif +#ifdef HAVE_UNISTD_H +#include +#elif defined(_MSC_VER) +#include +#define F_OK 0 +#define W_OK 2 +#define R_OK 4 +#define S_ISREG(m) (((m) & S_IFMT) == S_IFREG) +#define S_ISDIR(m) (((m) & S_IFMT) == S_IFDIR) +#endif + +#if defined(__MINGW32__) || defined(__MSYS__) || defined(_MSC_VER) +#define ISWINDOWS 1 + +#undef HAVE_SYMLINK +#else +#define ISWINDOWS 0 +#endif + + +#if defined(HAVE_STRUCT_STAT_ST_MTIMESPEC) + #define STAT_MTIME_US(STAT) ((STAT).st_mtimespec.tv_sec * 1000000ll + (STAT).st_mtimespec.tv_nsec / 1000) +#elif defined(HAVE_STRUCT_STAT_ST_MTIM) + #define STAT_MTIME_US(STAT) ((STAT).st_mtim.tv_sec * 1000000ll + (STAT).st_mtim.tv_nsec / 1000) +#endif + + +static void JimFixPath(char *path) +{ + if (ISWINDOWS) { + + char *p = path; + while ((p = strchr(p, '\\')) != NULL) { + *p++ = '/'; + } + } +} + + +static const char *JimGetFileType(int mode) +{ + if (S_ISREG(mode)) { + return "file"; + } + else if (S_ISDIR(mode)) { + return "directory"; + } +#ifdef S_ISCHR + else if (S_ISCHR(mode)) { + return "characterSpecial"; + } +#endif +#ifdef S_ISBLK + else if (S_ISBLK(mode)) { + return "blockSpecial"; + } +#endif +#ifdef S_ISFIFO + else if (S_ISFIFO(mode)) { + return "fifo"; + } +#endif +#ifdef S_ISLNK + else if (S_ISLNK(mode)) { + return "link"; + } +#endif +#ifdef S_ISSOCK + else if (S_ISSOCK(mode)) { + return "socket"; + } +#endif + return "unknown"; +} + +static void AppendStatElement(Jim_Interp *interp, Jim_Obj *listObj, const char *key, jim_wide value) +{ + Jim_ListAppendElement(interp, listObj, Jim_NewStringObj(interp, key, -1)); + Jim_ListAppendElement(interp, listObj, Jim_NewIntObj(interp, value)); +} + +int Jim_FileStoreStatData(Jim_Interp *interp, Jim_Obj *varName, const jim_stat_t *sb) +{ + + Jim_Obj *listObj = Jim_NewListObj(interp, NULL, 0); + + AppendStatElement(interp, listObj, "dev", sb->st_dev); + AppendStatElement(interp, listObj, "ino", sb->st_ino); + AppendStatElement(interp, listObj, "mode", sb->st_mode); + AppendStatElement(interp, listObj, "nlink", sb->st_nlink); + AppendStatElement(interp, listObj, "uid", sb->st_uid); + AppendStatElement(interp, listObj, "gid", sb->st_gid); + AppendStatElement(interp, listObj, "size", sb->st_size); + AppendStatElement(interp, listObj, "atime", sb->st_atime); + AppendStatElement(interp, listObj, "mtime", sb->st_mtime); + AppendStatElement(interp, listObj, "ctime", sb->st_ctime); +#ifdef STAT_MTIME_US + AppendStatElement(interp, listObj, "mtimeus", STAT_MTIME_US(*sb)); +#endif + Jim_ListAppendElement(interp, listObj, Jim_NewStringObj(interp, "type", -1)); + Jim_ListAppendElement(interp, listObj, Jim_NewStringObj(interp, JimGetFileType((int)sb->st_mode), -1)); + + + if (varName) { + Jim_Obj *objPtr; + objPtr = Jim_GetVariable(interp, varName, JIM_NONE); + + if (objPtr) { + Jim_Obj *objv[2]; + + objv[0] = objPtr; + objv[1] = listObj; + + objPtr = Jim_DictMerge(interp, 2, objv); + if (objPtr == NULL) { + + Jim_SetResultFormatted(interp, "can't set \"%#s(dev)\": variable isn't array", varName); + Jim_FreeNewObj(interp, listObj); + return JIM_ERR; + } + + Jim_InvalidateStringRep(objPtr); + + Jim_FreeNewObj(interp, listObj); + listObj = objPtr; + } + Jim_SetVariable(interp, varName, listObj); + } + + + Jim_SetResult(interp, listObj); + + return JIM_OK; +} + +static int JimPathLenNoTrailingSlashes(const char *path, int len) +{ + int i; + for (i = len; i > 1 && path[i - 1] == '/'; i--) { + + if (ISWINDOWS && path[i - 2] == ':') { + + break; + } + } + return i; +} + +static Jim_Obj *JimStripTrailingSlashes(Jim_Interp *interp, Jim_Obj *objPtr) +{ + int len = Jim_Length(objPtr); + const char *path = Jim_String(objPtr); + int i = JimPathLenNoTrailingSlashes(path, len); + if (i != len) { + objPtr = Jim_NewStringObj(interp, path, i); + } + Jim_IncrRefCount(objPtr); + return objPtr; +} + +static int file_cmd_dirname(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + Jim_Obj *objPtr = JimStripTrailingSlashes(interp, argv[0]); + const char *path = Jim_String(objPtr); + const char *p = strrchr(path, '/'); + + if (!p) { + Jim_SetResultString(interp, ".", -1); + } + else if (p[1] == 0) { + + Jim_SetResult(interp, objPtr); + } + else if (p == path) { + Jim_SetResultString(interp, "/", -1); + } + else if (ISWINDOWS && p[-1] == ':') { + + Jim_SetResultString(interp, path, p - path + 1); + } + else { + + int len = JimPathLenNoTrailingSlashes(path, p - path); + Jim_SetResultString(interp, path, len); + } + Jim_DecrRefCount(interp, objPtr); + return JIM_OK; +} + +static int file_cmd_split(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + Jim_Obj *listObj = Jim_NewListObj(interp, NULL, 0); + const char *path = Jim_String(argv[0]); + + if (*path == '/') { + Jim_ListAppendElement(interp, listObj, Jim_NewStringObj(interp, "/", 1)); + } + + while (1) { + + while (*path == '/') { + path++; + } + if (*path) { + const char *pt = strchr(path, '/'); + if (pt) { + Jim_ListAppendElement(interp, listObj, Jim_NewStringObj(interp, path, pt - path)); + path = pt; + continue; + } + Jim_ListAppendElement(interp, listObj, Jim_NewStringObj(interp, path, -1)); + } + break; + } + Jim_SetResult(interp, listObj); + return JIM_OK; +} + +static int file_cmd_rootname(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + const char *path = Jim_String(argv[0]); + const char *lastSlash = strrchr(path, '/'); + const char *p = strrchr(path, '.'); + + if (p == NULL || (lastSlash != NULL && lastSlash > p)) { + Jim_SetResult(interp, argv[0]); + } + else { + Jim_SetResultString(interp, path, p - path); + } + return JIM_OK; +} + +static int file_cmd_extension(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + Jim_Obj *objPtr = JimStripTrailingSlashes(interp, argv[0]); + const char *path = Jim_String(objPtr); + const char *lastSlash = strrchr(path, '/'); + const char *p = strrchr(path, '.'); + + if (p == NULL || (lastSlash != NULL && lastSlash >= p)) { + p = ""; + } + Jim_SetResultString(interp, p, -1); + Jim_DecrRefCount(interp, objPtr); + return JIM_OK; +} + +static int file_cmd_tail(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + Jim_Obj *objPtr = JimStripTrailingSlashes(interp, argv[0]); + const char *path = Jim_String(objPtr); + const char *lastSlash = strrchr(path, '/'); + + if (lastSlash) { + Jim_SetResultString(interp, lastSlash + 1, -1); + } + else { + Jim_SetResult(interp, objPtr); + } + Jim_DecrRefCount(interp, objPtr); + return JIM_OK; +} + +#ifndef HAVE_RESTRICT +#define restrict +#endif + +static char *JimRealPath(const char *restrict path, char *restrict resolved_path, size_t len) +{ +#if defined(HAVE__FULLPATH) + return _fullpath(resolved_path, path, len); +#elif defined(HAVE_REALPATH) + return realpath(path, resolved_path); +#else + return NULL; +#endif +} + +static int file_cmd_normalize(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + const char *path = Jim_String(argv[0]); + char *newname = Jim_Alloc(MAXPATHLEN); + + if (JimRealPath(path, newname, MAXPATHLEN)) { + JimFixPath(newname); + Jim_SetResult(interp, Jim_NewStringObjNoAlloc(interp, newname, -1)); + return JIM_OK; + } + Jim_Free(newname); + Jim_SetResultFormatted(interp, "can't normalize \"%#s\": %s", argv[0], strerror(errno)); + return JIM_ERR; +} + +static int file_cmd_join(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + int i; + char *newname = Jim_Alloc(MAXPATHLEN + 1); + char *last = newname; + + *newname = 0; + + + for (i = 0; i < argc; i++) { + int len; + const char *part = Jim_GetString(argv[i], &len); + + if (*part == '/') { + + last = newname; + } + else if (ISWINDOWS && strchr(part, ':')) { + + last = newname; + } + else if (part[0] == '.') { + if (part[1] == '/') { + part += 2; + len -= 2; + } + else if (part[1] == 0 && last != newname) { + + continue; + } + } + + + if (last != newname && last[-1] != '/') { + *last++ = '/'; + } + + if (len) { + if (last + len - newname >= MAXPATHLEN) { + Jim_Free(newname); + Jim_SetResultString(interp, "Path too long", -1); + return JIM_ERR; + } + memcpy(last, part, len); + last += len; + } + + + if (last > newname + 1 && last[-1] == '/') { + + if (!ISWINDOWS || !(last > newname + 2 && last[-2] == ':')) { + *--last = 0; + } + } + } + + *last = 0; + + + + Jim_SetResult(interp, Jim_NewStringObjNoAlloc(interp, newname, last - newname)); + + return JIM_OK; +} + +static int file_access(Jim_Interp *interp, Jim_Obj *filename, int mode) +{ + Jim_SetResultBool(interp, access(Jim_String(filename), mode) != -1); + + return JIM_OK; +} + +static int file_cmd_readable(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + return file_access(interp, argv[0], R_OK); +} + +static int file_cmd_writable(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + return file_access(interp, argv[0], W_OK); +} + +static int file_cmd_executable(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ +#ifdef X_OK + return file_access(interp, argv[0], X_OK); +#else + + Jim_SetResultBool(interp, 1); + return JIM_OK; +#endif +} + +static int file_cmd_exists(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + return file_access(interp, argv[0], F_OK); +} + +static int file_cmd_delete(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + int force = Jim_CompareStringImmediate(interp, argv[0], "-force"); + + if (force || Jim_CompareStringImmediate(interp, argv[0], "--")) { + argc--; + argv++; + } + + while (argc--) { + const char *path = Jim_String(argv[0]); + + if (unlink(path) == -1 && errno != ENOENT) { + if (rmdir(path) == -1) { + + if (!force || Jim_EvalPrefix(interp, "file delete force", 1, argv) != JIM_OK) { + Jim_SetResultFormatted(interp, "couldn't delete file \"%s\": %s", path, + strerror(errno)); + return JIM_ERR; + } + } + } + argv++; + } + return JIM_OK; +} + +#ifdef HAVE_MKDIR_ONE_ARG +#define MKDIR_DEFAULT(PATHNAME) mkdir(PATHNAME) +#else +#define MKDIR_DEFAULT(PATHNAME) mkdir(PATHNAME, 0755) +#endif + +static int mkdir_all(char *path) +{ + int ok = 1; + + + goto first; + + while (ok--) { + + { + char *slash = strrchr(path, '/'); + + if (slash && slash != path) { + *slash = 0; + if (mkdir_all(path) != 0) { + return -1; + } + *slash = '/'; + } + } + first: + if (MKDIR_DEFAULT(path) == 0) { + return 0; + } + if (errno == ENOENT) { + + continue; + } + + if (errno == EEXIST) { + jim_stat_t sb; + + if (Jim_Stat(path, &sb) == 0 && S_ISDIR(sb.st_mode)) { + return 0; + } + + errno = EEXIST; + } + + break; + } + return -1; +} + +static int file_cmd_mkdir(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + while (argc--) { + char *path = Jim_StrDup(Jim_String(argv[0])); + int rc = mkdir_all(path); + + Jim_Free(path); + if (rc != 0) { + Jim_SetResultFormatted(interp, "can't create directory \"%#s\": %s", argv[0], + strerror(errno)); + return JIM_ERR; + } + argv++; + } + return JIM_OK; +} + +static int file_cmd_tempfile(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + int fd = Jim_MakeTempFile(interp, (argc >= 1) ? Jim_String(argv[0]) : NULL, 0); + + if (fd < 0) { + return JIM_ERR; + } + close(fd); + + return JIM_OK; +} + +static int file_cmd_rename(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + const char *source; + const char *dest; + int force = 0; + + if (argc == 3) { + if (!Jim_CompareStringImmediate(interp, argv[0], "-force")) { + return -1; + } + force++; + argv++; + argc--; + } + + source = Jim_String(argv[0]); + dest = Jim_String(argv[1]); + + if (!force && access(dest, F_OK) == 0) { + Jim_SetResultFormatted(interp, "error renaming \"%#s\" to \"%#s\": target exists", argv[0], + argv[1]); + return JIM_ERR; + } +#if ISWINDOWS + if (access(dest, F_OK) == 0) { + + remove(dest); + } +#endif + if (rename(source, dest) != 0) { + Jim_SetResultFormatted(interp, "error renaming \"%#s\" to \"%#s\": %s", argv[0], argv[1], + strerror(errno)); + return JIM_ERR; + } + + return JIM_OK; +} + +#if defined(HAVE_LINK) && defined(HAVE_SYMLINK) +static int file_cmd_link(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + int ret; + const char *source; + const char *dest; + static const char * const options[] = { "-hard", "-symbolic", NULL }; + enum { OPT_HARD, OPT_SYMBOLIC, }; + int option = OPT_HARD; + + if (argc == 3) { + if (Jim_GetEnum(interp, argv[0], options, &option, NULL, JIM_ENUM_ABBREV | JIM_ERRMSG) != JIM_OK) { + return JIM_ERR; + } + argv++; + argc--; + } + + dest = Jim_String(argv[0]); + source = Jim_String(argv[1]); + + if (option == OPT_HARD) { + ret = link(source, dest); + } + else { + ret = symlink(source, dest); + } + + if (ret != 0) { + Jim_SetResultFormatted(interp, "error linking \"%#s\" to \"%#s\": %s", argv[0], argv[1], + strerror(errno)); + return JIM_ERR; + } + + return JIM_OK; +} +#endif + +static int file_stat(Jim_Interp *interp, Jim_Obj *filename, jim_stat_t *sb) +{ + const char *path = Jim_String(filename); + + if (Jim_Stat(path, sb) == -1) { + Jim_SetResultFormatted(interp, "could not read \"%#s\": %s", filename, strerror(errno)); + return JIM_ERR; + } + return JIM_OK; +} + +#ifdef Jim_LinkStat +static int file_lstat(Jim_Interp *interp, Jim_Obj *filename, jim_stat_t *sb) +{ + const char *path = Jim_String(filename); + + if (Jim_LinkStat(path, sb) == -1) { + Jim_SetResultFormatted(interp, "could not read \"%#s\": %s", filename, strerror(errno)); + return JIM_ERR; + } + return JIM_OK; +} +#else +#define file_lstat file_stat +#endif + +static int file_cmd_atime(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + jim_stat_t sb; + + if (file_stat(interp, argv[0], &sb) != JIM_OK) { + return JIM_ERR; + } + Jim_SetResultInt(interp, sb.st_atime); + return JIM_OK; +} + +static int JimSetFileTimes(Jim_Interp *interp, const char *filename, jim_wide us) +{ +#ifdef HAVE_UTIMES + struct timeval times[2]; + + times[1].tv_sec = times[0].tv_sec = us / 1000000; + times[1].tv_usec = times[0].tv_usec = us % 1000000; + + if (utimes(filename, times) != 0) { + Jim_SetResultFormatted(interp, "can't set time on \"%s\": %s", filename, strerror(errno)); + return JIM_ERR; + } + return JIM_OK; +#else + Jim_SetResultString(interp, "Not implemented", -1); + return JIM_ERR; +#endif +} + +static int file_cmd_mtime(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + jim_stat_t sb; + + if (argc == 2) { + jim_wide secs; + if (Jim_GetWide(interp, argv[1], &secs) != JIM_OK) { + return JIM_ERR; + } + return JimSetFileTimes(interp, Jim_String(argv[0]), secs * 1000000); + } + if (file_stat(interp, argv[0], &sb) != JIM_OK) { + return JIM_ERR; + } + Jim_SetResultInt(interp, sb.st_mtime); + return JIM_OK; +} + +#ifdef STAT_MTIME_US +static int file_cmd_mtimeus(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + jim_stat_t sb; + + if (argc == 2) { + jim_wide us; + if (Jim_GetWide(interp, argv[1], &us) != JIM_OK) { + return JIM_ERR; + } + return JimSetFileTimes(interp, Jim_String(argv[0]), us); + } + if (file_stat(interp, argv[0], &sb) != JIM_OK) { + return JIM_ERR; + } + Jim_SetResultInt(interp, STAT_MTIME_US(sb)); + return JIM_OK; +} +#endif + +static int file_cmd_copy(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + return Jim_EvalPrefix(interp, "file copy", argc, argv); +} + +static int file_cmd_size(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + jim_stat_t sb; + + if (file_stat(interp, argv[0], &sb) != JIM_OK) { + return JIM_ERR; + } + Jim_SetResultInt(interp, sb.st_size); + return JIM_OK; +} + +static int file_cmd_isdirectory(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + jim_stat_t sb; + int ret = 0; + + if (file_stat(interp, argv[0], &sb) == JIM_OK) { + ret = S_ISDIR(sb.st_mode); + } + Jim_SetResultInt(interp, ret); + return JIM_OK; +} + +static int file_cmd_isfile(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + jim_stat_t sb; + int ret = 0; + + if (file_stat(interp, argv[0], &sb) == JIM_OK) { + ret = S_ISREG(sb.st_mode); + } + Jim_SetResultInt(interp, ret); + return JIM_OK; +} + +#ifdef HAVE_GETEUID +static int file_cmd_owned(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + jim_stat_t sb; + int ret = 0; + + if (file_stat(interp, argv[0], &sb) == JIM_OK) { + ret = (geteuid() == sb.st_uid); + } + Jim_SetResultInt(interp, ret); + return JIM_OK; +} +#endif + +#if defined(HAVE_READLINK) +static int file_cmd_readlink(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + const char *path = Jim_String(argv[0]); + char *linkValue = Jim_Alloc(MAXPATHLEN + 1); + + int linkLength = readlink(path, linkValue, MAXPATHLEN); + + if (linkLength == -1) { + Jim_Free(linkValue); + Jim_SetResultFormatted(interp, "could not read link \"%#s\": %s", argv[0], strerror(errno)); + return JIM_ERR; + } + linkValue[linkLength] = 0; + Jim_SetResult(interp, Jim_NewStringObjNoAlloc(interp, linkValue, linkLength)); + return JIM_OK; +} +#endif + +static int file_cmd_type(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + jim_stat_t sb; + + if (file_lstat(interp, argv[0], &sb) != JIM_OK) { + return JIM_ERR; + } + Jim_SetResultString(interp, JimGetFileType((int)sb.st_mode), -1); + return JIM_OK; +} + +#ifdef Jim_LinkStat +static int file_cmd_lstat(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + jim_stat_t sb; + + if (file_lstat(interp, argv[0], &sb) != JIM_OK) { + return JIM_ERR; + } + return Jim_FileStoreStatData(interp, argc == 2 ? argv[1] : NULL, &sb); +} +#else +#define file_cmd_lstat file_cmd_stat +#endif + +static int file_cmd_stat(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + jim_stat_t sb; + + if (file_stat(interp, argv[0], &sb) != JIM_OK) { + return JIM_ERR; + } + return Jim_FileStoreStatData(interp, argc == 2 ? argv[1] : NULL, &sb); +} + +static const jim_subcmd_type file_command_table[] = { + { "atime", + "name", + file_cmd_atime, + 1, + 1, + + }, + { "mtime", + "name ?time?", + file_cmd_mtime, + 1, + 2, + + }, +#ifdef STAT_MTIME_US + { "mtimeus", + "name ?time?", + file_cmd_mtimeus, + 1, + 2, + + }, +#endif + { "copy", + "?-force? source dest", + file_cmd_copy, + 2, + 3, + + }, + { "dirname", + "name", + file_cmd_dirname, + 1, + 1, + + }, + { "rootname", + "name", + file_cmd_rootname, + 1, + 1, + + }, + { "extension", + "name", + file_cmd_extension, + 1, + 1, + + }, + { "tail", + "name", + file_cmd_tail, + 1, + 1, + + }, + { "split", + "name", + file_cmd_split, + 1, + 1, + + }, + { "normalize", + "name", + file_cmd_normalize, + 1, + 1, + + }, + { "join", + "name ?name ...?", + file_cmd_join, + 1, + -1, + + }, + { "readable", + "name", + file_cmd_readable, + 1, + 1, + + }, + { "writable", + "name", + file_cmd_writable, + 1, + 1, + + }, + { "executable", + "name", + file_cmd_executable, + 1, + 1, + + }, + { "exists", + "name", + file_cmd_exists, + 1, + 1, + + }, + { "delete", + "?-force|--? name ...", + file_cmd_delete, + 1, + -1, + + }, + { "mkdir", + "dir ...", + file_cmd_mkdir, + 1, + -1, + + }, + { "tempfile", + "?template?", + file_cmd_tempfile, + 0, + 1, + + }, + { "rename", + "?-force? source dest", + file_cmd_rename, + 2, + 3, + + }, +#if defined(HAVE_LINK) && defined(HAVE_SYMLINK) + { "link", + "?-symbolic|-hard? newname target", + file_cmd_link, + 2, + 3, + + }, +#endif +#if defined(HAVE_READLINK) + { "readlink", + "name", + file_cmd_readlink, + 1, + 1, + + }, +#endif + { "size", + "name", + file_cmd_size, + 1, + 1, + + }, + { "stat", + "name ?var?", + file_cmd_stat, + 1, + 2, + + }, + { "lstat", + "name ?var?", + file_cmd_lstat, + 1, + 2, + + }, + { "type", + "name", + file_cmd_type, + 1, + 1, + + }, +#ifdef HAVE_GETEUID + { "owned", + "name", + file_cmd_owned, + 1, + 1, + + }, +#endif + { "isdirectory", + "name", + file_cmd_isdirectory, + 1, + 1, + + }, + { "isfile", + "name", + file_cmd_isfile, + 1, + 1, + + }, + { + NULL + } +}; + +static int Jim_CdCmd(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + const char *path; + + if (argc != 2) { + Jim_WrongNumArgs(interp, 1, argv, "dirname"); + return JIM_ERR; + } + + path = Jim_String(argv[1]); + + if (chdir(path) != 0) { + Jim_SetResultFormatted(interp, "couldn't change working directory to \"%s\": %s", path, + strerror(errno)); + return JIM_ERR; + } + return JIM_OK; +} + +static int Jim_PwdCmd(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + char *cwd = Jim_Alloc(MAXPATHLEN); + + if (getcwd(cwd, MAXPATHLEN) == NULL) { + Jim_SetResultString(interp, "Failed to get pwd", -1); + Jim_Free(cwd); + return JIM_ERR; + } + JimFixPath(cwd); + Jim_SetResultString(interp, cwd, -1); + + Jim_Free(cwd); + return JIM_OK; +} + +int Jim_fileInit(Jim_Interp *interp) +{ + Jim_PackageProvideCheck(interp, "file"); + Jim_CreateCommand(interp, "file", Jim_SubCmdProc, (void *)file_command_table, NULL); + Jim_CreateCommand(interp, "pwd", Jim_PwdCmd, NULL, NULL); + Jim_CreateCommand(interp, "cd", Jim_CdCmd, NULL, NULL); + return JIM_OK; +} + +#include +#include + + +#if (!(defined(HAVE_VFORK) || defined(HAVE_FORK)) || !defined(HAVE_WAITPID)) && !defined(__MINGW32__) +static int Jim_ExecCmd(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + Jim_Obj *cmdlineObj = Jim_NewEmptyStringObj(interp); + int i, j; + int rc; + + + for (i = 1; i < argc; i++) { + int len; + const char *arg = Jim_GetString(argv[i], &len); + + if (i > 1) { + Jim_AppendString(interp, cmdlineObj, " ", 1); + } + if (strpbrk(arg, "\\\" ") == NULL) { + + Jim_AppendString(interp, cmdlineObj, arg, len); + continue; + } + + Jim_AppendString(interp, cmdlineObj, "\"", 1); + for (j = 0; j < len; j++) { + if (arg[j] == '\\' || arg[j] == '"') { + Jim_AppendString(interp, cmdlineObj, "\\", 1); + } + Jim_AppendString(interp, cmdlineObj, &arg[j], 1); + } + Jim_AppendString(interp, cmdlineObj, "\"", 1); + } + rc = system(Jim_String(cmdlineObj)); + + Jim_FreeNewObj(interp, cmdlineObj); + + if (rc) { + Jim_Obj *errorCode = Jim_NewListObj(interp, NULL, 0); + Jim_ListAppendElement(interp, errorCode, Jim_NewStringObj(interp, "CHILDSTATUS", -1)); + Jim_ListAppendElement(interp, errorCode, Jim_NewIntObj(interp, 0)); + Jim_ListAppendElement(interp, errorCode, Jim_NewIntObj(interp, rc)); + Jim_SetGlobalVariableStr(interp, "errorCode", errorCode); + return JIM_ERR; + } + + return JIM_OK; +} + +int Jim_execInit(Jim_Interp *interp) +{ + Jim_PackageProvideCheck(interp, "exec"); + Jim_CreateCommand(interp, "exec", Jim_ExecCmd, NULL, NULL); + return JIM_OK; +} +#else + + +#include +#include +#include + +struct WaitInfoTable; + +static char **JimOriginalEnviron(void); +static char **JimSaveEnv(char **env); +static void JimRestoreEnv(char **env); +static int JimCreatePipeline(Jim_Interp *interp, int argc, Jim_Obj *const *argv, + phandle_t **pidArrayPtr, int *inPipePtr, int *outPipePtr, int *errFilePtr); +static void JimDetachPids(struct WaitInfoTable *table, int numPids, const phandle_t *pidPtr); +static int JimCleanupChildren(Jim_Interp *interp, int numPids, phandle_t *pidPtr, Jim_Obj *errStrObj); +static int Jim_WaitCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv); + +#if defined(__MINGW32__) +static phandle_t JimStartWinProcess(Jim_Interp *interp, char **argv, char **env, int inputId, int outputId, int errorId); +#endif + +static void Jim_RemoveTrailingNewline(Jim_Obj *objPtr) +{ + int len; + const char *s = Jim_GetString(objPtr, &len); + + if (len > 0 && s[len - 1] == '\n') { + objPtr->length--; + objPtr->bytes[objPtr->length] = '\0'; + } +} + +static int JimAppendStreamToString(Jim_Interp *interp, int fd, Jim_Obj *strObj) +{ + char buf[256]; + int ret = 0; + + while (1) { + int retval = read(fd, buf, sizeof(buf)); + if (retval > 0) { + ret = 1; + Jim_AppendString(interp, strObj, buf, retval); + } + if (retval <= 0) { + break; + } + } + close(fd); + return ret; +} + +static char **JimBuildEnv(Jim_Interp *interp) +{ + int i; + int size; + int num; + int n; + char **envptr; + char *envdata; + + Jim_Obj *objPtr = Jim_GetGlobalVariableStr(interp, "env", JIM_NONE); + + if (!objPtr) { + return JimOriginalEnviron(); + } + + + + num = Jim_ListLength(interp, objPtr); + if (num % 2) { + + num--; + } + size = Jim_Length(objPtr) + 2; + + envptr = Jim_Alloc(sizeof(*envptr) * (num / 2 + 1) + size); + envdata = (char *)&envptr[num / 2 + 1]; + + n = 0; + for (i = 0; i < num; i += 2) { + const char *s1, *s2; + Jim_Obj *elemObj; + + Jim_ListIndex(interp, objPtr, i, &elemObj, JIM_NONE); + s1 = Jim_String(elemObj); + Jim_ListIndex(interp, objPtr, i + 1, &elemObj, JIM_NONE); + s2 = Jim_String(elemObj); + + envptr[n] = envdata; + envdata += sprintf(envdata, "%s=%s", s1, s2); + envdata++; + n++; + } + envptr[n] = NULL; + *envdata = 0; + + return envptr; +} + +static void JimFreeEnv(char **env, char **original_environ) +{ + if (env != original_environ) { + Jim_Free(env); + } +} + +static Jim_Obj *JimMakeErrorCode(Jim_Interp *interp, long pid, int waitStatus, Jim_Obj *errStrObj) +{ + Jim_Obj *errorCode = Jim_NewListObj(interp, NULL, 0); + + if (pid <= 0) { + Jim_ListAppendElement(interp, errorCode, Jim_NewStringObj(interp, "NONE", -1)); + Jim_ListAppendElement(interp, errorCode, Jim_NewIntObj(interp, pid)); + Jim_ListAppendElement(interp, errorCode, Jim_NewIntObj(interp, -1)); + } + else if (WIFEXITED(waitStatus)) { + Jim_ListAppendElement(interp, errorCode, Jim_NewStringObj(interp, "CHILDSTATUS", -1)); + Jim_ListAppendElement(interp, errorCode, Jim_NewIntObj(interp, pid)); + Jim_ListAppendElement(interp, errorCode, Jim_NewIntObj(interp, WEXITSTATUS(waitStatus))); + } + else { + const char *type; + const char *action; + const char *signame; + + if (WIFSIGNALED(waitStatus)) { + type = "CHILDKILLED"; + action = "killed"; + signame = Jim_SignalId(WTERMSIG(waitStatus)); + } + else { + type = "CHILDSUSP"; + action = "suspended"; + signame = "none"; + } + + Jim_ListAppendElement(interp, errorCode, Jim_NewStringObj(interp, type, -1)); + + if (errStrObj) { + Jim_AppendStrings(interp, errStrObj, "child ", action, " by signal ", Jim_SignalId(WTERMSIG(waitStatus)), "\n", NULL); + } + + Jim_ListAppendElement(interp, errorCode, Jim_NewIntObj(interp, pid)); + Jim_ListAppendElement(interp, errorCode, Jim_NewStringObj(interp, signame, -1)); + } + return errorCode; +} + +static int JimCheckWaitStatus(Jim_Interp *interp, long pid, int waitStatus, Jim_Obj *errStrObj) +{ + if (WIFEXITED(waitStatus) && WEXITSTATUS(waitStatus) == 0) { + return JIM_OK; + } + Jim_SetGlobalVariableStr(interp, "errorCode", JimMakeErrorCode(interp, pid, waitStatus, errStrObj)); + + return JIM_ERR; +} + + +struct WaitInfo +{ + phandle_t phandle; + int status; + int flags; +}; + + +struct WaitInfoTable { + struct WaitInfo *info; + int size; + int used; + int refcount; +}; + + +#define WI_DETACHED 2 + +#define WAIT_TABLE_GROW_BY 4 + +static void JimFreeWaitInfoTable(struct Jim_Interp *interp, void *privData) +{ + struct WaitInfoTable *table = privData; + + if (--table->refcount == 0) { + Jim_Free(table->info); + Jim_Free(table); + } +} + +static struct WaitInfoTable *JimAllocWaitInfoTable(void) +{ + struct WaitInfoTable *table = Jim_Alloc(sizeof(*table)); + table->info = NULL; + table->size = table->used = 0; + table->refcount = 1; + + return table; +} + +static int JimWaitRemove(struct WaitInfoTable *table, phandle_t phandle) +{ + int i; + + + for (i = 0; i < table->used; i++) { + if (phandle == table->info[i].phandle) { + if (i != table->used - 1) { + table->info[i] = table->info[table->used - 1]; + } + table->used--; + return 0; + } + } + return -1; +} + +static int Jim_ExecCmd(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + int outputId; + int errorId; + phandle_t *pidPtr; + int numPids, result; + int child_siginfo = 1; + Jim_Obj *childErrObj; + Jim_Obj *errStrObj; + struct WaitInfoTable *table = Jim_CmdPrivData(interp); + + if (argc > 1 && Jim_CompareStringImmediate(interp, argv[argc - 1], "&")) { + Jim_Obj *listObj; + int i; + + argc--; + numPids = JimCreatePipeline(interp, argc - 1, argv + 1, &pidPtr, NULL, NULL, NULL); + if (numPids < 0) { + return JIM_ERR; + } + + listObj = Jim_NewListObj(interp, NULL, 0); + for (i = 0; i < numPids; i++) { + Jim_ListAppendElement(interp, listObj, Jim_NewIntObj(interp, JimProcessPid(pidPtr[i]))); + } + Jim_SetResult(interp, listObj); + JimDetachPids(table, numPids, pidPtr); + Jim_Free(pidPtr); + return JIM_OK; + } + + numPids = + JimCreatePipeline(interp, argc - 1, argv + 1, &pidPtr, NULL, &outputId, &errorId); + + if (numPids < 0) { + return JIM_ERR; + } + + result = JIM_OK; + + errStrObj = Jim_NewStringObj(interp, "", 0); + + + if (outputId != -1) { + if (JimAppendStreamToString(interp, outputId, errStrObj) < 0) { + result = JIM_ERR; + Jim_SetResultErrno(interp, "error reading from output pipe"); + } + } + + + childErrObj = Jim_NewStringObj(interp, "", 0); + Jim_IncrRefCount(childErrObj); + + if (JimCleanupChildren(interp, numPids, pidPtr, childErrObj) != JIM_OK) { + result = JIM_ERR; + } + + if (errorId != -1) { + int ret; + Jim_Lseek(errorId, 0, SEEK_SET); + ret = JimAppendStreamToString(interp, errorId, errStrObj); + if (ret < 0) { + Jim_SetResultErrno(interp, "error reading from error pipe"); + result = JIM_ERR; + } + else if (ret > 0) { + + child_siginfo = 0; + } + } + + if (child_siginfo) { + + Jim_AppendObj(interp, errStrObj, childErrObj); + } + Jim_DecrRefCount(interp, childErrObj); + + + Jim_RemoveTrailingNewline(errStrObj); + + + Jim_SetResult(interp, errStrObj); + + return result; +} + +static long JimWaitForProcess(struct WaitInfoTable *table, phandle_t phandle, int *statusPtr) +{ + if (JimWaitRemove(table, phandle) == 0) { + + return waitpid(phandle, statusPtr, 0); + } + + + return -1; +} + +static void JimDetachPids(struct WaitInfoTable *table, int numPids, const phandle_t *pidPtr) +{ + int j; + + for (j = 0; j < numPids; j++) { + + int i; + for (i = 0; i < table->used; i++) { + if (pidPtr[j] == table->info[i].phandle) { + table->info[i].flags |= WI_DETACHED; + break; + } + } + } +} + +static int JimGetChannelFd(Jim_Interp *interp, const char *name) +{ + Jim_Obj *objv[2]; + + objv[0] = Jim_NewStringObj(interp, name, -1); + objv[1] = Jim_NewStringObj(interp, "getfd", -1); + + if (Jim_EvalObjVector(interp, 2, objv) == JIM_OK) { + jim_wide fd; + if (Jim_GetWide(interp, Jim_GetResult(interp), &fd) == JIM_OK) { + return fd; + } + } + return -1; +} + +static void JimReapDetachedPids(struct WaitInfoTable *table) +{ + struct WaitInfo *waitPtr; + int count; + int dest; + + if (!table) { + return; + } + + waitPtr = table->info; + dest = 0; + for (count = table->used; count > 0; waitPtr++, count--) { + if (waitPtr->flags & WI_DETACHED) { + int status; + long pid = waitpid(waitPtr->phandle, &status, WNOHANG); + if (pid > 0) { + + table->used--; + continue; + } + } + if (waitPtr != &table->info[dest]) { + table->info[dest] = *waitPtr; + } + dest++; + } +} + +static int Jim_WaitCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + struct WaitInfoTable *table = Jim_CmdPrivData(interp); + int nohang = 0; + long pid; + phandle_t phandle; + int status; + Jim_Obj *errCodeObj; + + + if (argc == 1) { + JimReapDetachedPids(table); + return JIM_OK; + } + + if (argc > 1 && Jim_CompareStringImmediate(interp, argv[1], "-nohang")) { + nohang = 1; + } + if (argc != nohang + 2) { + Jim_WrongNumArgs(interp, 1, argv, "?-nohang? ?pid?"); + return JIM_ERR; + } + if (Jim_GetLong(interp, argv[nohang + 1], &pid) != JIM_OK) { + return JIM_ERR; + } + + + phandle = JimWaitPid(pid, &status, nohang ? WNOHANG : 0); + if (phandle == JIM_BAD_PHANDLE) { + pid = -1; + } +#ifndef __MINGW32__ + else if (pid < 0) { + pid = phandle; + } +#endif + + errCodeObj = JimMakeErrorCode(interp, pid, status, NULL); + + if (phandle != JIM_BAD_PHANDLE && (WIFEXITED(status) || WIFSIGNALED(status))) { + + JimWaitRemove(table, phandle); + } + Jim_SetResult(interp, errCodeObj); + return JIM_OK; +} + +static int Jim_PidCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + if (argc != 1) { + Jim_WrongNumArgs(interp, 1, argv, ""); + return JIM_ERR; + } + + Jim_SetResultInt(interp, (jim_wide)getpid()); + return JIM_OK; +} + +static int +JimCreatePipeline(Jim_Interp *interp, int argc, Jim_Obj *const *argv, phandle_t **pidArrayPtr, + int *inPipePtr, int *outPipePtr, int *errFilePtr) +{ + phandle_t *pidPtr = NULL; /* Points to alloc-ed array holding all + * the pids of child processes. */ + int numPids = 0; /* Actual number of processes that exist + * at *pidPtr right now. */ + int cmdCount; /* Count of number of distinct commands + * found in argc/argv. */ + const char *input = NULL; /* Describes input for pipeline, depending + * on "inputFile". NULL means take input + * from stdin/pipe. */ + int input_len = 0; + +#define FILE_NAME 0 +#define FILE_APPEND 1 +#define FILE_HANDLE 2 +#define FILE_TEXT 3 + + int inputFile = FILE_NAME; /* 1 means input is name of input file. + * 2 means input is filehandle name. + * 0 means input holds actual + * text to be input to command. */ + + int outputFile = FILE_NAME; /* 0 means output is the name of output file. + * 1 means output is the name of output file, and append. + * 2 means output is filehandle name. + * All this is ignored if output is NULL + */ + int errorFile = FILE_NAME; /* 0 means error is the name of error file. + * 1 means error is the name of error file, and append. + * 2 means error is filehandle name. + * All this is ignored if error is NULL + */ + const char *output = NULL; /* Holds name of output file to pipe to, + * or NULL if output goes to stdout/pipe. */ + const char *error = NULL; /* Holds name of stderr file to pipe to, + * or NULL if stderr goes to stderr/pipe. */ + int inputId = -1; + int outputId = -1; + int errorId = -1; + int lastOutputId = -1; + int pipeIds[2]; + int firstArg, lastArg; /* Indexes of first and last arguments in + * current command. */ + int lastBar; + int i; + phandle_t phandle; + char **save_environ; +#if defined(HAVE_EXECVPE) && !defined(__MINGW32__) + char **child_environ; +#endif + struct WaitInfoTable *table = Jim_CmdPrivData(interp); + + + char **arg_array = Jim_Alloc(sizeof(*arg_array) * (argc + 1)); + int arg_count = 0; + + if (inPipePtr != NULL) { + *inPipePtr = -1; + } + if (outPipePtr != NULL) { + *outPipePtr = -1; + } + if (errFilePtr != NULL) { + *errFilePtr = -1; + } + pipeIds[0] = pipeIds[1] = -1; + + cmdCount = 1; + lastBar = -1; + for (i = 0; i < argc; i++) { + const char *arg = Jim_String(argv[i]); + + if (arg[0] == '<') { + inputFile = FILE_NAME; + input = arg + 1; + if (*input == '<') { + inputFile = FILE_TEXT; + input_len = Jim_Length(argv[i]) - 2; + input++; + } + else if (*input == '@') { + inputFile = FILE_HANDLE; + input++; + } + + if (!*input && ++i < argc) { + input = Jim_GetString(argv[i], &input_len); + } + } + else if (arg[0] == '>') { + int dup_error = 0; + + outputFile = FILE_NAME; + + output = arg + 1; + if (*output == '>') { + outputFile = FILE_APPEND; + output++; + } + if (*output == '&') { + + output++; + dup_error = 1; + } + if (*output == '@') { + outputFile = FILE_HANDLE; + output++; + } + if (!*output && ++i < argc) { + output = Jim_String(argv[i]); + } + if (dup_error) { + errorFile = outputFile; + error = output; + } + } + else if (arg[0] == '2' && arg[1] == '>') { + error = arg + 2; + errorFile = FILE_NAME; + + if (*error == '@') { + errorFile = FILE_HANDLE; + error++; + } + else if (*error == '>') { + errorFile = FILE_APPEND; + error++; + } + if (!*error && ++i < argc) { + error = Jim_String(argv[i]); + } + } + else { + if (strcmp(arg, "|") == 0 || strcmp(arg, "|&") == 0) { + if (i == lastBar + 1 || i == argc - 1) { + Jim_SetResultString(interp, "illegal use of | or |& in command", -1); + goto badargs; + } + lastBar = i; + cmdCount++; + } + + arg_array[arg_count++] = (char *)arg; + continue; + } + + if (i >= argc) { + Jim_SetResultFormatted(interp, "can't specify \"%s\" as last word in command", arg); + goto badargs; + } + } + + if (arg_count == 0) { + Jim_SetResultString(interp, "didn't specify command to execute", -1); +badargs: + Jim_Free(arg_array); + return -1; + } + + + save_environ = JimSaveEnv(JimBuildEnv(interp)); + + if (input != NULL) { + if (inputFile == FILE_TEXT) { + inputId = Jim_MakeTempFile(interp, NULL, 1); + if (inputId == -1) { + goto error; + } + if (write(inputId, input, input_len) != input_len) { + Jim_SetResultErrno(interp, "couldn't write temp file"); + close(inputId); + goto error; + } + Jim_Lseek(inputId, 0L, SEEK_SET); + } + else if (inputFile == FILE_HANDLE) { + int fd = JimGetChannelFd(interp, input); + + if (fd < 0) { + goto error; + } + inputId = dup(fd); + } + else { + inputId = Jim_OpenForRead(input); + if (inputId == -1) { + Jim_SetResultFormatted(interp, "couldn't read file \"%s\": %s", input, strerror(Jim_Errno())); + goto error; + } + } + } + else if (inPipePtr != NULL) { + if (pipe(pipeIds) != 0) { + Jim_SetResultErrno(interp, "couldn't create input pipe for command"); + goto error; + } + inputId = pipeIds[0]; + *inPipePtr = pipeIds[1]; + pipeIds[0] = pipeIds[1] = -1; + } + + if (output != NULL) { + if (outputFile == FILE_HANDLE) { + int fd = JimGetChannelFd(interp, output); + if (fd < 0) { + goto error; + } + lastOutputId = dup(fd); + } + else { + lastOutputId = Jim_OpenForWrite(output, outputFile == FILE_APPEND); + if (lastOutputId == -1) { + Jim_SetResultFormatted(interp, "couldn't write file \"%s\": %s", output, strerror(Jim_Errno())); + goto error; + } + } + } + else if (outPipePtr != NULL) { + if (pipe(pipeIds) != 0) { + Jim_SetResultErrno(interp, "couldn't create output pipe"); + goto error; + } + lastOutputId = pipeIds[1]; + *outPipePtr = pipeIds[0]; + pipeIds[0] = pipeIds[1] = -1; + } + + if (error != NULL) { + if (errorFile == FILE_HANDLE) { + if (strcmp(error, "1") == 0) { + + if (lastOutputId != -1) { + errorId = dup(lastOutputId); + } + else { + + error = "stdout"; + } + } + if (errorId == -1) { + int fd = JimGetChannelFd(interp, error); + if (fd < 0) { + goto error; + } + errorId = dup(fd); + } + } + else { + errorId = Jim_OpenForWrite(error, errorFile == FILE_APPEND); + if (errorId == -1) { + Jim_SetResultFormatted(interp, "couldn't write file \"%s\": %s", error, strerror(Jim_Errno())); + goto error; + } + } + } + else if (errFilePtr != NULL) { + errorId = Jim_MakeTempFile(interp, NULL, 1); + if (errorId == -1) { + goto error; + } + *errFilePtr = dup(errorId); + } + + + pidPtr = Jim_Alloc(cmdCount * sizeof(*pidPtr)); + for (firstArg = 0; firstArg < arg_count; numPids++, firstArg = lastArg + 1) { + int pipe_dup_err = 0; + int origErrorId = errorId; + + for (lastArg = firstArg; lastArg < arg_count; lastArg++) { + if (strcmp(arg_array[lastArg], "|") == 0) { + break; + } + if (strcmp(arg_array[lastArg], "|&") == 0) { + pipe_dup_err = 1; + break; + } + } + + if (lastArg == firstArg) { + Jim_SetResultString(interp, "missing command to exec", -1); + goto error; + } + + + arg_array[lastArg] = NULL; + if (lastArg == arg_count) { + outputId = lastOutputId; + lastOutputId = -1; + } + else { + if (pipe(pipeIds) != 0) { + Jim_SetResultErrno(interp, "couldn't create pipe"); + goto error; + } + outputId = pipeIds[1]; + } + + + if (pipe_dup_err) { + errorId = outputId; + } + + + +#ifdef __MINGW32__ + phandle = JimStartWinProcess(interp, &arg_array[firstArg], save_environ, inputId, outputId, errorId); + if (phandle == JIM_BAD_PHANDLE) { + Jim_SetResultFormatted(interp, "couldn't exec \"%s\"", arg_array[firstArg]); + goto error; + } +#else + i = strlen(arg_array[firstArg]); + +#ifdef HAVE_EXECVPE + child_environ = Jim_GetEnviron(); +#endif +#ifdef HAVE_VFORK + phandle = vfork(); +#else + phandle = fork(); +#endif + if (phandle < 0) { + Jim_SetResultErrno(interp, "couldn't fork child process"); + goto error; + } + if (phandle == 0) { + + + if (inputId != -1 && inputId != fileno(stdin)) { + dup2(inputId, fileno(stdin)); + close(inputId); + } + if (outputId != -1 && outputId != fileno(stdout)) { + dup2(outputId, fileno(stdout)); + if (outputId != errorId) { + close(outputId); + } + } + if (errorId != -1 && errorId != fileno(stderr)) { + dup2(errorId, fileno(stderr)); + close(errorId); + } + + if (outPipePtr && *outPipePtr != -1) { + close(*outPipePtr); + } + if (errFilePtr && *errFilePtr != -1) { + close(*errFilePtr); + } + if (pipeIds[0] != -1) { + close(pipeIds[0]); + } + if (lastOutputId != -1) { + close(lastOutputId); + } + + execvpe(arg_array[firstArg], &arg_array[firstArg], child_environ); + + if (write(fileno(stderr), "couldn't exec \"", 15) && + write(fileno(stderr), arg_array[firstArg], i) && + write(fileno(stderr), "\"\n", 2)) { + + } +#ifdef JIM_MAINTAINER + { + + static char *const false_argv[2] = {"false", NULL}; + execvp(false_argv[0],false_argv); + } +#endif + _exit(127); + } +#endif + + + + if (table->used == table->size) { + table->size += WAIT_TABLE_GROW_BY; + table->info = Jim_Realloc(table->info, table->size * sizeof(*table->info)); + } + + table->info[table->used].phandle = phandle; + table->info[table->used].flags = 0; + table->used++; + + pidPtr[numPids] = phandle; + + + errorId = origErrorId; + + + if (inputId != -1) { + close(inputId); + } + if (outputId != -1) { + close(outputId); + } + inputId = pipeIds[0]; + pipeIds[0] = pipeIds[1] = -1; + } + *pidArrayPtr = pidPtr; + + + cleanup: + if (inputId != -1) { + close(inputId); + } + if (lastOutputId != -1) { + close(lastOutputId); + } + if (errorId != -1) { + close(errorId); + } + Jim_Free(arg_array); + + JimRestoreEnv(save_environ); + + return numPids; + + + error: + if ((inPipePtr != NULL) && (*inPipePtr != -1)) { + close(*inPipePtr); + *inPipePtr = -1; + } + if ((outPipePtr != NULL) && (*outPipePtr != -1)) { + close(*outPipePtr); + *outPipePtr = -1; + } + if ((errFilePtr != NULL) && (*errFilePtr != -1)) { + close(*errFilePtr); + *errFilePtr = -1; + } + if (pipeIds[0] != -1) { + close(pipeIds[0]); + } + if (pipeIds[1] != -1) { + close(pipeIds[1]); + } + if (pidPtr != NULL) { + for (i = 0; i < numPids; i++) { + if (pidPtr[i] != JIM_BAD_PHANDLE) { + JimDetachPids(table, 1, &pidPtr[i]); + } + } + Jim_Free(pidPtr); + } + numPids = -1; + goto cleanup; +} + + +static int JimCleanupChildren(Jim_Interp *interp, int numPids, phandle_t *pidPtr, Jim_Obj *errStrObj) +{ + struct WaitInfoTable *table = Jim_CmdPrivData(interp); + int result = JIM_OK; + int i; + + + for (i = 0; i < numPids; i++) { + int waitStatus = 0; + long pid = JimWaitForProcess(table, pidPtr[i], &waitStatus); + if (pid > 0) { + if (JimCheckWaitStatus(interp, pid, waitStatus, errStrObj) != JIM_OK) { + result = JIM_ERR; + } + } + } + Jim_Free(pidPtr); + + return result; +} + +int Jim_execInit(Jim_Interp *interp) +{ + struct WaitInfoTable *waitinfo; + + Jim_PackageProvideCheck(interp, "exec"); + + waitinfo = JimAllocWaitInfoTable(); + Jim_CreateCommand(interp, "exec", Jim_ExecCmd, waitinfo, JimFreeWaitInfoTable); + waitinfo->refcount++; + Jim_CreateCommand(interp, "wait", Jim_WaitCommand, waitinfo, JimFreeWaitInfoTable); + Jim_CreateCommand(interp, "pid", Jim_PidCommand, 0, 0); + + return JIM_OK; +} + +#if defined(__MINGW32__) + + +static int +JimWinFindExecutable(const char *originalName, char fullPath[MAX_PATH]) +{ + int i; + static char extensions[][5] = {".exe", "", ".bat"}; + + for (i = 0; i < (int) (sizeof(extensions) / sizeof(extensions[0])); i++) { + snprintf(fullPath, MAX_PATH, "%s%s", originalName, extensions[i]); + + if (SearchPath(NULL, fullPath, NULL, MAX_PATH, fullPath, NULL) == 0) { + continue; + } + if (GetFileAttributes(fullPath) & FILE_ATTRIBUTE_DIRECTORY) { + continue; + } + return 0; + } + + return -1; +} + +static char **JimSaveEnv(char **env) +{ + return env; +} + +static void JimRestoreEnv(char **env) +{ + JimFreeEnv(env, Jim_GetEnviron()); +} + +static char **JimOriginalEnviron(void) +{ + return NULL; +} + +static Jim_Obj * +JimWinBuildCommandLine(Jim_Interp *interp, char **argv) +{ + char *start, *special; + int quote, i; + + Jim_Obj *strObj = Jim_NewStringObj(interp, "", 0); + + for (i = 0; argv[i]; i++) { + if (i > 0) { + Jim_AppendString(interp, strObj, " ", 1); + } + + if (argv[i][0] == '\0') { + quote = 1; + } + else { + quote = 0; + for (start = argv[i]; *start != '\0'; start++) { + if (isspace(UCHAR(*start))) { + quote = 1; + break; + } + } + } + if (quote) { + Jim_AppendString(interp, strObj, "\"" , 1); + } + + start = argv[i]; + for (special = argv[i]; ; ) { + if ((*special == '\\') && (special[1] == '\\' || + special[1] == '"' || (quote && special[1] == '\0'))) { + Jim_AppendString(interp, strObj, start, special - start); + start = special; + while (1) { + special++; + if (*special == '"' || (quote && *special == '\0')) { + + Jim_AppendString(interp, strObj, start, special - start); + break; + } + if (*special != '\\') { + break; + } + } + Jim_AppendString(interp, strObj, start, special - start); + start = special; + } + if (*special == '"') { + if (special == start) { + Jim_AppendString(interp, strObj, "\"", 1); + } + else { + Jim_AppendString(interp, strObj, start, special - start); + } + Jim_AppendString(interp, strObj, "\\\"", 2); + start = special + 1; + } + if (*special == '\0') { + break; + } + special++; + } + Jim_AppendString(interp, strObj, start, special - start); + if (quote) { + Jim_AppendString(interp, strObj, "\"", 1); + } + } + return strObj; +} + +static phandle_t +JimStartWinProcess(Jim_Interp *interp, char **argv, char **env, int inputId, int outputId, int errorId) +{ + STARTUPINFO startInfo; + PROCESS_INFORMATION procInfo; + HANDLE hProcess; + char execPath[MAX_PATH]; + phandle_t phandle = INVALID_HANDLE_VALUE; + Jim_Obj *cmdLineObj; + char *winenv; + + if (JimWinFindExecutable(argv[0], execPath) < 0) { + return phandle; + } + argv[0] = execPath; + + hProcess = GetCurrentProcess(); + cmdLineObj = JimWinBuildCommandLine(interp, argv); + + + ZeroMemory(&startInfo, sizeof(startInfo)); + startInfo.cb = sizeof(startInfo); + startInfo.dwFlags = STARTF_USESTDHANDLES; + startInfo.hStdInput = INVALID_HANDLE_VALUE; + startInfo.hStdOutput= INVALID_HANDLE_VALUE; + startInfo.hStdError = INVALID_HANDLE_VALUE; + + if (inputId == -1) { + inputId = _fileno(stdin); + } + DuplicateHandle(hProcess, (HANDLE)_get_osfhandle(inputId), hProcess, &startInfo.hStdInput, + 0, TRUE, DUPLICATE_SAME_ACCESS); + if (startInfo.hStdInput == INVALID_HANDLE_VALUE) { + goto end; + } + + if (outputId == -1) { + outputId = _fileno(stdout); + } + DuplicateHandle(hProcess, (HANDLE)_get_osfhandle(outputId), hProcess, &startInfo.hStdOutput, + 0, TRUE, DUPLICATE_SAME_ACCESS); + if (startInfo.hStdOutput == INVALID_HANDLE_VALUE) { + goto end; + } + + + if (errorId == -1) { + errorId = _fileno(stderr); + } + DuplicateHandle(hProcess, (HANDLE)_get_osfhandle(errorId), hProcess, &startInfo.hStdError, + 0, TRUE, DUPLICATE_SAME_ACCESS); + if (startInfo.hStdError == INVALID_HANDLE_VALUE) { + goto end; + } + + if (env == NULL) { + + winenv = NULL; + } + else if (env[0] == NULL) { + winenv = (char *)"\0"; + } + else { + winenv = env[0]; + } + + if (!CreateProcess(NULL, (char *)Jim_String(cmdLineObj), NULL, NULL, TRUE, + 0, winenv, NULL, &startInfo, &procInfo)) { + goto end; + } + + + WaitForInputIdle(procInfo.hProcess, 5000); + CloseHandle(procInfo.hThread); + + phandle = procInfo.hProcess; + + end: + Jim_FreeNewObj(interp, cmdLineObj); + if (startInfo.hStdInput != INVALID_HANDLE_VALUE) { + CloseHandle(startInfo.hStdInput); + } + if (startInfo.hStdOutput != INVALID_HANDLE_VALUE) { + CloseHandle(startInfo.hStdOutput); + } + if (startInfo.hStdError != INVALID_HANDLE_VALUE) { + CloseHandle(startInfo.hStdError); + } + return phandle; +} + +#else + +static char **JimOriginalEnviron(void) +{ + return Jim_GetEnviron(); +} + +static char **JimSaveEnv(char **env) +{ + char **saveenv = Jim_GetEnviron(); + Jim_SetEnviron(env); + return saveenv; +} + +static void JimRestoreEnv(char **env) +{ + JimFreeEnv(Jim_GetEnviron(), env); + Jim_SetEnviron(env); +} +#endif +#endif + + +#include +#include +#include +#include + + +#ifdef HAVE_SYS_TIME_H +#include +#endif + +struct clock_options { + int gmt; + const char *format; +}; + +static int parse_clock_options(Jim_Interp *interp, int argc, Jim_Obj *const *argv, struct clock_options *opts) +{ + static const char * const options[] = { "-gmt", "-format", NULL }; + enum { OPT_GMT, OPT_FORMAT, }; + int i; + + for (i = 0; i < argc; i += 2) { + int option; + if (Jim_GetEnum(interp, argv[i], options, &option, NULL, JIM_ERRMSG | JIM_ENUM_ABBREV) != JIM_OK) { + return JIM_ERR; + } + switch (option) { + case OPT_GMT: + if (Jim_GetBoolean(interp, argv[i + 1], &opts->gmt) != JIM_OK) { + return JIM_ERR; + } + break; + case OPT_FORMAT: + opts->format = Jim_String(argv[i + 1]); + break; + } + } + return JIM_OK; +} + +static int clock_cmd_format(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + + char buf[100]; + time_t t; + jim_wide seconds; + struct clock_options options = { 0, "%a %b %d %H:%M:%S %Z %Y" }; + struct tm *tm; + + if (Jim_GetWide(interp, argv[0], &seconds) != JIM_OK) { + return JIM_ERR; + } + if (argc % 2 == 0) { + return -1; + } + if (parse_clock_options(interp, argc - 1, argv + 1, &options) == JIM_ERR) { + return JIM_ERR; + } + + t = seconds; + tm = options.gmt ? gmtime(&t) : localtime(&t); + + if (tm == NULL || strftime(buf, sizeof(buf), options.format, tm) == 0) { + Jim_SetResultString(interp, "format string too long or invalid time", -1); + return JIM_ERR; + } + + Jim_SetResultString(interp, buf, -1); + + return JIM_OK; +} + +#ifdef HAVE_STRPTIME +static time_t jim_timegm(const struct tm *tm) +{ + int m = tm->tm_mon + 1; + int y = 1900 + tm->tm_year - (m <= 2); + int era = (y >= 0 ? y : y - 399) / 400; + unsigned yoe = (unsigned)(y - era * 400); + unsigned doy = (153 * (m + (m > 2 ? -3 : 9)) + 2) / 5 + tm->tm_mday - 1; + unsigned doe = yoe * 365 + yoe / 4 - yoe / 100 + doy; + long days = (era * 146097 + (int)doe - 719468); + int secs = tm->tm_hour * 3600 + tm->tm_min * 60 + tm->tm_sec; + + return days * 24 * 60 * 60 + secs; +} + +static int clock_cmd_scan(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + char *pt; + struct tm tm; + time_t now = time(NULL); + + struct clock_options options = { 0, NULL }; + + if (argc % 2 == 0) { + return -1; + } + + if (parse_clock_options(interp, argc - 1, argv + 1, &options) == JIM_ERR) { + return JIM_ERR; + } + if (options.format == NULL) { + return -1; + } + + localtime_r(&now, &tm); + + pt = strptime(Jim_String(argv[0]), options.format, &tm); + if (pt == 0 || *pt != 0) { + Jim_SetResultString(interp, "Failed to parse time according to format", -1); + return JIM_ERR; + } + + + tm.tm_isdst = options.gmt ? 0 : -1; + Jim_SetResultInt(interp, options.gmt ? jim_timegm(&tm) : mktime(&tm)); + + return JIM_OK; +} +#endif + +static int clock_cmd_seconds(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + Jim_SetResultInt(interp, Jim_GetTimeUsec(CLOCK_REALTIME) / 1000000); + return JIM_OK; +} + +static int clock_cmd_clicks(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + Jim_SetResultInt(interp, Jim_GetTimeUsec(CLOCK_MONOTONIC_RAW)); + return JIM_OK; +} + +static int clock_cmd_micros(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + Jim_SetResultInt(interp, Jim_GetTimeUsec(CLOCK_REALTIME)); + return JIM_OK; +} + +static int clock_cmd_millis(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + Jim_SetResultInt(interp, Jim_GetTimeUsec(CLOCK_REALTIME) / 1000); + return JIM_OK; +} + +static const jim_subcmd_type clock_command_table[] = { + { "clicks", + NULL, + clock_cmd_clicks, + 0, + 0, + + }, + { "format", + "seconds ?-format string? ?-gmt boolean?", + clock_cmd_format, + 1, + 5, + + }, + { "microseconds", + NULL, + clock_cmd_micros, + 0, + 0, + + }, + { "milliseconds", + NULL, + clock_cmd_millis, + 0, + 0, + + }, +#ifdef HAVE_STRPTIME + { "scan", + "str -format format ?-gmt boolean?", + clock_cmd_scan, + 3, + 5, + + }, +#endif + { "seconds", + NULL, + clock_cmd_seconds, + 0, + 0, + + }, + { NULL } +}; + +int Jim_clockInit(Jim_Interp *interp) +{ + Jim_PackageProvideCheck(interp, "clock"); + Jim_CreateCommand(interp, "clock", Jim_SubCmdProc, (void *)clock_command_table, NULL); + return JIM_OK; +} + +#include +#include +#include +#include +#include + + +static int array_cmd_exists(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + + Jim_Obj *dictObj = Jim_GetVariable(interp, argv[0], JIM_UNSHARED); + Jim_SetResultInt(interp, dictObj && Jim_DictSize(interp, dictObj) != -1); + return JIM_OK; +} + +static int array_cmd_get(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + Jim_Obj *objPtr = Jim_GetVariable(interp, argv[0], JIM_NONE); + Jim_Obj *patternObj; + + if (!objPtr) { + return JIM_OK; + } + + patternObj = (argc == 1) ? NULL : argv[1]; + + + if (patternObj == NULL || Jim_CompareStringImmediate(interp, patternObj, "*")) { + if (Jim_IsList(objPtr) && Jim_ListLength(interp, objPtr) % 2 == 0) { + + Jim_SetResult(interp, objPtr); + return JIM_OK; + } + } + + return Jim_DictMatchTypes(interp, objPtr, patternObj, JIM_DICTMATCH_KEYS, JIM_DICTMATCH_KEYS | JIM_DICTMATCH_VALUES); +} + +static int array_cmd_names(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + Jim_Obj *objPtr = Jim_GetVariable(interp, argv[0], JIM_NONE); + + if (!objPtr) { + return JIM_OK; + } + + return Jim_DictMatchTypes(interp, objPtr, argc == 1 ? NULL : argv[1], JIM_DICTMATCH_KEYS, JIM_DICTMATCH_KEYS); +} + +static int array_cmd_unset(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + int i; + int len; + Jim_Obj *resultObj; + Jim_Obj *objPtr; + Jim_Obj **dictValuesObj; + + if (argc == 1 || Jim_CompareStringImmediate(interp, argv[1], "*")) { + + Jim_UnsetVariable(interp, argv[0], JIM_NONE); + return JIM_OK; + } + + objPtr = Jim_GetVariable(interp, argv[0], JIM_NONE); + + if (objPtr == NULL) { + + return JIM_OK; + } + + dictValuesObj = Jim_DictPairs(interp, objPtr, &len); + if (dictValuesObj == NULL) { + + Jim_SetResultString(interp, "", -1); + return JIM_OK; + } + + + resultObj = Jim_NewDictObj(interp, NULL, 0); + + for (i = 0; i < len; i += 2) { + if (!Jim_StringMatchObj(interp, argv[1], dictValuesObj[i], 0)) { + Jim_DictAddElement(interp, resultObj, dictValuesObj[i], dictValuesObj[i + 1]); + } + } + + Jim_SetVariable(interp, argv[0], resultObj); + return JIM_OK; +} + +static int array_cmd_size(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + Jim_Obj *objPtr; + int len = 0; + + + objPtr = Jim_GetVariable(interp, argv[0], JIM_NONE); + if (objPtr) { + len = Jim_DictSize(interp, objPtr); + if (len < 0) { + + Jim_SetResultInt(interp, 0); + return JIM_OK; + } + } + + Jim_SetResultInt(interp, len); + + return JIM_OK; +} + +static int array_cmd_stat(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + Jim_Obj *objPtr = Jim_GetVariable(interp, argv[0], JIM_NONE); + if (objPtr) { + return Jim_DictInfo(interp, objPtr); + } + Jim_SetResultFormatted(interp, "\"%#s\" isn't an array", argv[0], NULL); + return JIM_ERR; +} + +static int array_cmd_set(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + int i; + int len; + Jim_Obj *listObj = argv[1]; + Jim_Obj *dictObj; + + len = Jim_ListLength(interp, listObj); + if (len % 2) { + Jim_SetResultString(interp, "list must have an even number of elements", -1); + return JIM_ERR; + } + + dictObj = Jim_GetVariable(interp, argv[0], JIM_UNSHARED); + if (!dictObj) { + + return Jim_SetVariable(interp, argv[0], listObj); + } + else if (Jim_DictSize(interp, dictObj) < 0) { + return JIM_ERR; + } + + if (Jim_IsShared(dictObj)) { + dictObj = Jim_DuplicateObj(interp, dictObj); + } + + for (i = 0; i < len; i += 2) { + Jim_Obj *nameObj; + Jim_Obj *valueObj; + + Jim_ListIndex(interp, listObj, i, &nameObj, JIM_NONE); + Jim_ListIndex(interp, listObj, i + 1, &valueObj, JIM_NONE); + + Jim_DictAddElement(interp, dictObj, nameObj, valueObj); + } + return Jim_SetVariable(interp, argv[0], dictObj); +} + +static const jim_subcmd_type array_command_table[] = { + { "exists", + "arrayName", + array_cmd_exists, + 1, + 1, + + }, + { "get", + "arrayName ?pattern?", + array_cmd_get, + 1, + 2, + + }, + { "names", + "arrayName ?pattern?", + array_cmd_names, + 1, + 2, + + }, + { "set", + "arrayName list", + array_cmd_set, + 2, + 2, + + }, + { "size", + "arrayName", + array_cmd_size, + 1, + 1, + + }, + { "stat", + "arrayName", + array_cmd_stat, + 1, + 1, + + }, + { "unset", + "arrayName ?pattern?", + array_cmd_unset, + 1, + 2, + + }, + { NULL + } +}; + +int Jim_arrayInit(Jim_Interp *interp) +{ + Jim_PackageProvideCheck(interp, "array"); + Jim_CreateCommand(interp, "array", Jim_SubCmdProc, (void *)array_command_table, NULL); + return JIM_OK; +} +int Jim_InitStaticExtensions(Jim_Interp *interp) +{ +extern int Jim_bootstrapInit(Jim_Interp *); +extern int Jim_aioInit(Jim_Interp *); +extern int Jim_readdirInit(Jim_Interp *); +extern int Jim_regexpInit(Jim_Interp *); +extern int Jim_fileInit(Jim_Interp *); +extern int Jim_globInit(Jim_Interp *); +extern int Jim_execInit(Jim_Interp *); +extern int Jim_clockInit(Jim_Interp *); +extern int Jim_arrayInit(Jim_Interp *); +extern int Jim_stdlibInit(Jim_Interp *); +extern int Jim_tclcompatInit(Jim_Interp *); +Jim_bootstrapInit(interp); +Jim_aioInit(interp); +Jim_readdirInit(interp); +Jim_regexpInit(interp); +Jim_fileInit(interp); +Jim_globInit(interp); +Jim_execInit(interp); +Jim_clockInit(interp); +Jim_arrayInit(interp); +Jim_stdlibInit(interp); +Jim_tclcompatInit(interp); +return JIM_OK; +} +#ifndef JIM_TINY +#define JIM_OPTIMIZATION +#endif + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + + +#ifdef HAVE_SYS_TIME_H +#include +#endif +#ifdef HAVE_EXECINFO_H +#include +#endif +#ifdef HAVE_CRT_EXTERNS_H +#include +#endif + + +#include + + + + + +#ifndef TCL_LIBRARY +#define TCL_LIBRARY "." +#endif +#ifndef TCL_PLATFORM_OS +#define TCL_PLATFORM_OS "unknown" +#endif +#ifndef TCL_PLATFORM_PLATFORM +#define TCL_PLATFORM_PLATFORM "unknown" +#endif +#ifndef TCL_PLATFORM_PATH_SEPARATOR +#define TCL_PLATFORM_PATH_SEPARATOR ":" +#endif + + + + + + + +#ifdef JIM_MAINTAINER +#define JIM_DEBUG_COMMAND +#define JIM_DEBUG_PANIC +#endif + + + +#define JIM_INTEGER_SPACE 24 + +#if defined(DEBUG_SHOW_SCRIPT) || defined(DEBUG_SHOW_SCRIPT_TOKENS) || defined(JIM_DEBUG_COMMAND) || defined(DEBUG_SHOW_SUBST) +static const char *jim_tt_name(int type); +#endif + +#ifdef JIM_DEBUG_PANIC +static void JimPanicDump(int fail_condition, const char *fmt, ...); +#define JimPanic(X) JimPanicDump X +#else +#define JimPanic(X) +#endif + +#ifdef JIM_OPTIMIZATION +static int JimIsWide(Jim_Obj *objPtr); +#define JIM_IF_OPTIM(X) X +#else +#define JIM_IF_OPTIM(X) +#endif + + +static char JimEmptyStringRep[] = ""; + +static void JimFreeCallFrame(Jim_Interp *interp, Jim_CallFrame *cf, int action); +static int ListSetIndex(Jim_Interp *interp, Jim_Obj *listPtr, int listindex, Jim_Obj *newObjPtr, + int flags); +static int Jim_ListIndices(Jim_Interp *interp, Jim_Obj *listPtr, Jim_Obj *const *indexv, int indexc, + Jim_Obj **resultObj, int flags); +static int JimDeleteLocalProcs(Jim_Interp *interp, Jim_Stack *localCommands); +static Jim_Obj *JimExpandDictSugar(Jim_Interp *interp, Jim_Obj *objPtr); +static void SetDictSubstFromAny(Jim_Interp *interp, Jim_Obj *objPtr); +static void JimSetFailedEnumResult(Jim_Interp *interp, const char *arg, const char *badtype, + const char *prefix, const char *const *tablePtr, const char *name); +static int JimCallProcedure(Jim_Interp *interp, Jim_Cmd *cmd, int argc, Jim_Obj *const *argv); +static int JimGetWideNoErr(Jim_Interp *interp, Jim_Obj *objPtr, jim_wide * widePtr); +static int JimSign(jim_wide w); +static void JimPrngSeed(Jim_Interp *interp, unsigned char *seed, int seedLen); +static void JimRandomBytes(Jim_Interp *interp, void *dest, unsigned int len); +static int JimSetNewVariable(Jim_HashTable *ht, Jim_Obj *nameObjPtr, Jim_VarVal *vv); +static Jim_VarVal *JimFindVariable(Jim_HashTable *ht, Jim_Obj *nameObjPtr); +static int SetVariableFromAny(Jim_Interp *interp, struct Jim_Obj *objPtr); + +#define JIM_DICT_SUGAR 100 + + + + +#define JimWideValue(objPtr) (objPtr)->internalRep.wideValue + +#define JimObjTypeName(O) ((O)->typePtr ? (O)->typePtr->name : "none") + +static int utf8_tounicode_case(const char *s, int *uc, int upper) +{ + int l = utf8_tounicode(s, uc); + if (upper) { + *uc = utf8_upper(*uc); + } + return l; +} + +static Jim_Obj *JimPushInterpObjImpl(Jim_Obj **iop, Jim_Obj *no) +{ + Jim_Obj *io = *iop; + Jim_IncrRefCount(no); + *iop = no; + return io; +} + +#define JimPushInterpObj(IO, NO) JimPushInterpObjImpl(&(IO), NO) +#define JimPopInterpObj(I, IO, SO) do { Jim_DecrRefCount(I, IO); IO = SO; } while (0) + + +#define JIM_CHARSET_SCAN 2 +#define JIM_CHARSET_GLOB 0 + +static const char *JimCharsetMatch(const char *pattern, int plen, int c, int flags) +{ + int not = 0; + int pchar; + int match = 0; + int nocase = 0; + int n; + + if (flags & JIM_NOCASE) { + nocase++; + c = utf8_upper(c); + } + + if (flags & JIM_CHARSET_SCAN) { + if (*pattern == '^') { + not++; + pattern++; + plen--; + } + + + if (*pattern == ']') { + goto first; + } + } + + while (plen && *pattern != ']') { + + if (pattern[0] == '\\') { +first: + n = utf8_tounicode_case(pattern, &pchar, nocase); + pattern += n; + plen -= n; + } + else { + + int start; + int end; + + n = utf8_tounicode_case(pattern, &start, nocase); + pattern += n; + plen -= n; + if (pattern[0] == '-' && plen > 1) { + + n = 1 + utf8_tounicode_case(pattern + 1, &end, nocase); + pattern += n; + plen -= n; + + + if ((c >= start && c <= end) || (c >= end && c <= start)) { + match = 1; + } + continue; + } + pchar = start; + } + + if (pchar == c) { + match = 1; + } + } + if (not) { + match = !match; + } + + return match ? pattern : NULL; +} + + + +static int JimGlobMatch(const char *pattern, int plen, const char *string, int slen, int nocase) +{ + int c; + int pchar; + int n; + const char *p; + while (plen) { + switch (pattern[0]) { + case '*': + while (pattern[1] == '*' && plen) { + pattern++; + plen--; + } + pattern++; + plen--; + if (!plen) { + return 1; + } + while (slen) { + + if (JimGlobMatch(pattern, plen, string, slen, nocase)) + return 1; + n = utf8_tounicode(string, &c); + string += n; + slen -= n; + } + return 0; + + case '?': + n = utf8_tounicode(string, &c); + string += n; + slen -= n; + break; + + case '[': { + n = utf8_tounicode(string, &c); + string += n; + slen -= n; + p = JimCharsetMatch(pattern + 1, plen - 1, c, nocase ? JIM_NOCASE : 0); + if (!p) { + return 0; + } + plen -= p - pattern; + pattern = p; + + if (!plen) { + + continue; + } + break; + } + case '\\': + if (pattern[1]) { + pattern++; + plen--; + } + + default: + n = utf8_tounicode_case(string, &c, nocase); + string += n; + slen -= n; + utf8_tounicode_case(pattern, &pchar, nocase); + if (pchar != c) { + return 0; + } + break; + } + n = utf8_tounicode_case(pattern, &pchar, nocase); + pattern += n; + plen -= n; + if (!slen) { + while (*pattern == '*' && plen) { + pattern++; + plen--; + } + break; + } + } + if (!plen && !slen) { + return 1; + } + return 0; +} + +static int JimStringCompareUtf8(const char *s1, int l1, const char *s2, int l2, int nocase) +{ + int minlen = l1; + if (l2 < l1) { + minlen = l2; + } + while (minlen) { + int c1, c2; + s1 += utf8_tounicode_case(s1, &c1, nocase); + s2 += utf8_tounicode_case(s2, &c2, nocase); + if (c1 != c2) { + return JimSign(c1 - c2); + } + minlen--; + } + + if (l1 < l2) { + return -1; + } + if (l1 > l2) { + return 1; + } + return 0; +} + +static int JimStringFirst(const char *s1, int l1, const char *s2, int l2, int idx) +{ + int i; + int l1bytelen; + + if (!l1 || !l2 || l1 > l2) { + return -1; + } + if (idx < 0) + idx = 0; + s2 += utf8_index(s2, idx); + + l1bytelen = utf8_index(s1, l1); + + for (i = idx; i <= l2 - l1; i++) { + int c; + if (memcmp(s2, s1, l1bytelen) == 0) { + return i; + } + s2 += utf8_tounicode(s2, &c); + } + return -1; +} + +static int JimStringLast(const char *s1, int l1, const char *s2, int l2) +{ + const char *p; + + if (!l1 || !l2 || l1 > l2) + return -1; + + + for (p = s2 + l2 - 1; p != s2 - 1; p--) { + if (*p == *s1 && memcmp(s1, p, l1) == 0) { + return p - s2; + } + } + return -1; +} + +#ifdef JIM_UTF8 +static int JimStringLastUtf8(const char *s1, int l1, const char *s2, int l2) +{ + int n = JimStringLast(s1, utf8_index(s1, l1), s2, utf8_index(s2, l2)); + if (n > 0) { + n = utf8_strlen(s2, n); + } + return n; +} +#endif + +static int JimCheckConversion(const char *str, const char *endptr) +{ + if (str[0] == '\0' || str == endptr) { + return JIM_ERR; + } + + if (endptr[0] != '\0') { + while (*endptr) { + if (!isspace(UCHAR(*endptr))) { + return JIM_ERR; + } + endptr++; + } + } + return JIM_OK; +} + +static int JimNumberBase(const char *str, int *base, int *sign) +{ + int i = 0; + + *base = 0; + + while (isspace(UCHAR(str[i]))) { + i++; + } + + if (str[i] == '-') { + *sign = -1; + i++; + } + else { + if (str[i] == '+') { + i++; + } + *sign = 1; + } + + if (str[i] != '0') { + + return 0; + } + + + switch (str[i + 1]) { + case 'x': case 'X': *base = 16; break; + case 'o': case 'O': *base = 8; break; + case 'b': case 'B': *base = 2; break; + case 'd': case 'D': *base = 10; break; + default: return 0; + } + i += 2; + + if (str[i] != '-' && str[i] != '+' && !isspace(UCHAR(str[i]))) { + + return i; + } + + *base = 0; + return 0; +} + +static long jim_strtol(const char *str, char **endptr) +{ + int sign; + int base; + int i = JimNumberBase(str, &base, &sign); + + if (base != 0) { + long value = strtol(str + i, endptr, base); + if (endptr == NULL || *endptr != str + i) { + return value * sign; + } + } + + + return strtol(str, endptr, 10); +} + + +static jim_wide jim_strtoull(const char *str, char **endptr) +{ +#ifdef HAVE_LONG_LONG + int sign; + int base; + int i = JimNumberBase(str, &base, &sign); + + if (base != 0) { + jim_wide value = strtoull(str + i, endptr, base); + if (endptr == NULL || *endptr != str + i) { + return value * sign; + } + } + + + return strtoull(str, endptr, 10); +#else + return (unsigned long)jim_strtol(str, endptr); +#endif +} + +int Jim_StringToWide(const char *str, jim_wide * widePtr, int base) +{ + char *endptr; + + if (base) { + *widePtr = strtoull(str, &endptr, base); + } + else { + *widePtr = jim_strtoull(str, &endptr); + } + + return JimCheckConversion(str, endptr); +} + +int Jim_StringToDouble(const char *str, double *doublePtr) +{ + char *endptr; + + + errno = 0; + + *doublePtr = strtod(str, &endptr); + + return JimCheckConversion(str, endptr); +} + +static jim_wide JimPowWide(jim_wide b, jim_wide e) +{ + jim_wide res = 1; + + + if (b == 1) { + + return 1; + } + if (e < 0) { + if (b != -1) { + return 0; + } + e = -e; + } + while (e) + { + if (e & 1) { + res *= b; + } + e >>= 1; + b *= b; + } + return res; +} + +#ifdef JIM_DEBUG_PANIC +static void JimPanicDump(int condition, const char *fmt, ...) +{ + va_list ap; + + if (!condition) { + return; + } + + va_start(ap, fmt); + + fprintf(stderr, "\nJIM INTERPRETER PANIC: "); + vfprintf(stderr, fmt, ap); + fprintf(stderr, "\n\n"); + va_end(ap); + +#if defined(HAVE_BACKTRACE) + { + void *array[40]; + int size, i; + char **strings; + + size = backtrace(array, 40); + strings = backtrace_symbols(array, size); + for (i = 0; i < size; i++) + fprintf(stderr, "[backtrace] %s\n", strings[i]); + fprintf(stderr, "[backtrace] Include the above lines and the output\n"); + fprintf(stderr, "[backtrace] of 'nm ' in the bug report.\n"); + } +#endif + + exit(1); +} +#endif + + +void *JimDefaultAllocator(void *ptr, size_t size) +{ + if (size == 0) { + free(ptr); + return NULL; + } + else if (ptr) { + return realloc(ptr, size); + } + else { + return malloc(size); + } +} + +void *(*Jim_Allocator)(void *ptr, size_t size) = JimDefaultAllocator; + +char *Jim_StrDup(const char *s) +{ + return Jim_StrDupLen(s, strlen(s)); +} + +char *Jim_StrDupLen(const char *s, int l) +{ + char *copy = Jim_Alloc(l + 1); + + memcpy(copy, s, l); + copy[l] = 0; + return copy; +} + + +jim_wide Jim_GetTimeUsec(unsigned type) +{ + long long now; + struct timeval tv; + +#if defined(HAVE_CLOCK_GETTIME) + struct timespec ts; + + if (clock_gettime(type, &ts) == 0) { + now = ts.tv_sec * 1000000LL + ts.tv_nsec / 1000; + } + else +#endif + { + gettimeofday(&tv, NULL); + + now = tv.tv_sec * 1000000LL + tv.tv_usec; + } + + return now; +} + + + + + +static void JimExpandHashTableIfNeeded(Jim_HashTable *ht); +static unsigned int JimHashTableNextPower(unsigned int size); +static Jim_HashEntry *JimInsertHashEntry(Jim_HashTable *ht, const void *key, int replace); + + + + +unsigned int Jim_IntHashFunction(unsigned int key) +{ + key += ~(key << 15); + key ^= (key >> 10); + key += (key << 3); + key ^= (key >> 6); + key += ~(key << 11); + key ^= (key >> 16); + return key; +} + + +unsigned int Jim_GenHashFunction(const unsigned char *string, int length) +{ + unsigned result = 0; + string += length; + while (length--) { + result += (result << 3) + (unsigned char)(*--string); + } + return result; +} + + + +static void JimResetHashTable(Jim_HashTable *ht) +{ + ht->table = NULL; + ht->size = 0; + ht->sizemask = 0; + ht->used = 0; + ht->collisions = 0; +#ifdef JIM_RANDOMISE_HASH + ht->uniq = (rand() ^ time(NULL) ^ clock()); +#else + ht->uniq = 0; +#endif +} + +static void JimInitHashTableIterator(Jim_HashTable *ht, Jim_HashTableIterator *iter) +{ + iter->ht = ht; + iter->index = -1; + iter->entry = NULL; + iter->nextEntry = NULL; +} + + +int Jim_InitHashTable(Jim_HashTable *ht, const Jim_HashTableType *type, void *privDataPtr) +{ + JimResetHashTable(ht); + ht->type = type; + ht->privdata = privDataPtr; + return JIM_OK; +} + + +void Jim_ExpandHashTable(Jim_HashTable *ht, unsigned int size) +{ + Jim_HashTable n; + unsigned int realsize = JimHashTableNextPower(size), i; + + if (size <= ht->used) + return; + + Jim_InitHashTable(&n, ht->type, ht->privdata); + n.size = realsize; + n.sizemask = realsize - 1; + n.table = Jim_Alloc(realsize * sizeof(Jim_HashEntry *)); + + n.uniq = ht->uniq; + + + memset(n.table, 0, realsize * sizeof(Jim_HashEntry *)); + + n.used = ht->used; + for (i = 0; ht->used > 0; i++) { + Jim_HashEntry *he, *nextHe; + + if (ht->table[i] == NULL) + continue; + + + he = ht->table[i]; + while (he) { + unsigned int h; + + nextHe = he->next; + + h = Jim_HashKey(ht, he->key) & n.sizemask; + he->next = n.table[h]; + n.table[h] = he; + ht->used--; + + he = nextHe; + } + } + assert(ht->used == 0); + Jim_Free(ht->table); + + + *ht = n; +} + +int Jim_AddHashEntry(Jim_HashTable *ht, const void *key, void *val) +{ + Jim_HashEntry *entry = JimInsertHashEntry(ht, key, 0);; + if (entry == NULL) + return JIM_ERR; + + + Jim_SetHashKey(ht, entry, key); + Jim_SetHashVal(ht, entry, val); + return JIM_OK; +} + + +int Jim_ReplaceHashEntry(Jim_HashTable *ht, const void *key, void *val) +{ + int existed; + Jim_HashEntry *entry; + + entry = JimInsertHashEntry(ht, key, 1); + if (entry->key) { + if (ht->type->valDestructor && ht->type->valDup) { + void *newval = ht->type->valDup(ht->privdata, val); + ht->type->valDestructor(ht->privdata, entry->u.val); + entry->u.val = newval; + } + else { + Jim_FreeEntryVal(ht, entry); + Jim_SetHashVal(ht, entry, val); + } + existed = 1; + } + else { + + Jim_SetHashKey(ht, entry, key); + Jim_SetHashVal(ht, entry, val); + existed = 0; + } + + return existed; +} + +int Jim_DeleteHashEntry(Jim_HashTable *ht, const void *key) +{ + if (ht->used) { + unsigned int h = Jim_HashKey(ht, key) & ht->sizemask; + Jim_HashEntry *prevHe = NULL; + Jim_HashEntry *he = ht->table[h]; + + while (he) { + if (Jim_CompareHashKeys(ht, key, he->key)) { + + if (prevHe) + prevHe->next = he->next; + else + ht->table[h] = he->next; + ht->used--; + Jim_FreeEntryKey(ht, he); + Jim_FreeEntryVal(ht, he); + Jim_Free(he); + return JIM_OK; + } + prevHe = he; + he = he->next; + } + } + + return JIM_ERR; +} + +void Jim_ClearHashTable(Jim_HashTable *ht) +{ + unsigned int i; + + + for (i = 0; ht->used > 0; i++) { + Jim_HashEntry *he, *nextHe; + + he = ht->table[i]; + while (he) { + nextHe = he->next; + Jim_FreeEntryKey(ht, he); + Jim_FreeEntryVal(ht, he); + Jim_Free(he); + ht->used--; + he = nextHe; + } + ht->table[i] = NULL; + } +} + +int Jim_FreeHashTable(Jim_HashTable *ht) +{ + Jim_ClearHashTable(ht); + + Jim_Free(ht->table); + + JimResetHashTable(ht); + return JIM_OK; +} + +Jim_HashEntry *Jim_FindHashEntry(Jim_HashTable *ht, const void *key) +{ + Jim_HashEntry *he; + unsigned int h; + + if (ht->used == 0) + return NULL; + h = Jim_HashKey(ht, key) & ht->sizemask; + he = ht->table[h]; + while (he) { + if (Jim_CompareHashKeys(ht, key, he->key)) + return he; + he = he->next; + } + return NULL; +} + +Jim_HashTableIterator *Jim_GetHashTableIterator(Jim_HashTable *ht) +{ + Jim_HashTableIterator *iter = Jim_Alloc(sizeof(*iter)); + JimInitHashTableIterator(ht, iter); + return iter; +} + +Jim_HashEntry *Jim_NextHashEntry(Jim_HashTableIterator *iter) +{ + while (1) { + if (iter->entry == NULL) { + iter->index++; + if (iter->index >= (signed)iter->ht->size) + break; + iter->entry = iter->ht->table[iter->index]; + } + else { + iter->entry = iter->nextEntry; + } + if (iter->entry) { + iter->nextEntry = iter->entry->next; + return iter->entry; + } + } + return NULL; +} + + + + +static void JimExpandHashTableIfNeeded(Jim_HashTable *ht) +{ + if (ht->size == 0) + Jim_ExpandHashTable(ht, JIM_HT_INITIAL_SIZE); + if (ht->size == ht->used) + Jim_ExpandHashTable(ht, ht->size * 2); +} + + +static unsigned int JimHashTableNextPower(unsigned int size) +{ + unsigned int i = JIM_HT_INITIAL_SIZE; + + if (size >= 2147483648U) + return 2147483648U; + while (1) { + if (i >= size) + return i; + i *= 2; + } +} + +static Jim_HashEntry *JimInsertHashEntry(Jim_HashTable *ht, const void *key, int replace) +{ + unsigned int h; + Jim_HashEntry *he; + + + JimExpandHashTableIfNeeded(ht); + + + h = Jim_HashKey(ht, key) & ht->sizemask; + + he = ht->table[h]; + while (he) { + if (Jim_CompareHashKeys(ht, key, he->key)) + return replace ? he : NULL; + he = he->next; + } + + + he = Jim_Alloc(sizeof(*he)); + he->next = ht->table[h]; + ht->table[h] = he; + ht->used++; + he->key = NULL; + + return he; +} + + + +static unsigned int JimStringCopyHTHashFunction(const void *key) +{ + return Jim_GenHashFunction(key, strlen(key)); +} + +static void *JimStringCopyHTDup(void *privdata, const void *key) +{ + return Jim_StrDup(key); +} + +static int JimStringCopyHTKeyCompare(void *privdata, const void *key1, const void *key2) +{ + return strcmp(key1, key2) == 0; +} + +static void JimStringCopyHTKeyDestructor(void *privdata, void *key) +{ + Jim_Free(key); +} + +static const Jim_HashTableType JimPackageHashTableType = { + JimStringCopyHTHashFunction, + JimStringCopyHTDup, + NULL, + JimStringCopyHTKeyCompare, + JimStringCopyHTKeyDestructor, + NULL +}; + +typedef struct AssocDataValue +{ + Jim_InterpDeleteProc *delProc; + void *data; +} AssocDataValue; + +static void JimAssocDataHashTableValueDestructor(void *privdata, void *data) +{ + AssocDataValue *assocPtr = (AssocDataValue *) data; + + if (assocPtr->delProc != NULL) + assocPtr->delProc((Jim_Interp *)privdata, assocPtr->data); + Jim_Free(data); +} + +static const Jim_HashTableType JimAssocDataHashTableType = { + JimStringCopyHTHashFunction, + JimStringCopyHTDup, + NULL, + JimStringCopyHTKeyCompare, + JimStringCopyHTKeyDestructor, + JimAssocDataHashTableValueDestructor +}; + +void Jim_InitStack(Jim_Stack *stack) +{ + stack->len = 0; + stack->maxlen = 0; + stack->vector = NULL; +} + +void Jim_FreeStack(Jim_Stack *stack) +{ + Jim_Free(stack->vector); +} + +int Jim_StackLen(Jim_Stack *stack) +{ + return stack->len; +} + +void Jim_StackPush(Jim_Stack *stack, void *element) +{ + int neededLen = stack->len + 1; + + if (neededLen > stack->maxlen) { + stack->maxlen = neededLen < 20 ? 20 : neededLen * 2; + stack->vector = Jim_Realloc(stack->vector, sizeof(void *) * stack->maxlen); + } + stack->vector[stack->len] = element; + stack->len++; +} + +void *Jim_StackPop(Jim_Stack *stack) +{ + if (stack->len == 0) + return NULL; + stack->len--; + return stack->vector[stack->len]; +} + +void *Jim_StackPeek(Jim_Stack *stack) +{ + if (stack->len == 0) + return NULL; + return stack->vector[stack->len - 1]; +} + +void Jim_FreeStackElements(Jim_Stack *stack, void (*freeFunc) (void *ptr)) +{ + int i; + + for (i = 0; i < stack->len; i++) + freeFunc(stack->vector[i]); +} + + + +#define JIM_TT_NONE 0 +#define JIM_TT_STR 1 +#define JIM_TT_ESC 2 +#define JIM_TT_VAR 3 +#define JIM_TT_DICTSUGAR 4 +#define JIM_TT_CMD 5 + +#define JIM_TT_SEP 6 +#define JIM_TT_EOL 7 +#define JIM_TT_EOF 8 + +#define JIM_TT_LINE 9 +#define JIM_TT_WORD 10 + + +#define JIM_TT_SUBEXPR_START 11 +#define JIM_TT_SUBEXPR_END 12 +#define JIM_TT_SUBEXPR_COMMA 13 +#define JIM_TT_EXPR_INT 14 +#define JIM_TT_EXPR_DOUBLE 15 +#define JIM_TT_EXPR_BOOLEAN 16 + +#define JIM_TT_EXPRSUGAR 17 + + +#define JIM_TT_EXPR_OP 20 + +#define TOKEN_IS_SEP(type) (type >= JIM_TT_SEP && type <= JIM_TT_EOF) + +#define TOKEN_IS_EXPR_START(type) (type == JIM_TT_NONE || type == JIM_TT_SUBEXPR_START || type == JIM_TT_SUBEXPR_COMMA) + +#define TOKEN_IS_EXPR_OP(type) (type >= JIM_TT_EXPR_OP) + +struct JimParseMissing { + int ch; + int line; +}; + +struct JimParserCtx +{ + const char *p; + int len; + int linenr; + const char *tstart; + const char *tend; + int tline; + int tt; + int eof; + int inquote; + int comment; + struct JimParseMissing missing; + const char *errmsg; +}; + +static int JimParseScript(struct JimParserCtx *pc); +static int JimParseSep(struct JimParserCtx *pc); +static int JimParseEol(struct JimParserCtx *pc); +static int JimParseCmd(struct JimParserCtx *pc); +static int JimParseQuote(struct JimParserCtx *pc); +static int JimParseVar(struct JimParserCtx *pc); +static int JimParseBrace(struct JimParserCtx *pc); +static int JimParseStr(struct JimParserCtx *pc); +static int JimParseComment(struct JimParserCtx *pc); +static void JimParseSubCmd(struct JimParserCtx *pc); +static int JimParseSubQuote(struct JimParserCtx *pc); +static Jim_Obj *JimParserGetTokenObj(Jim_Interp *interp, struct JimParserCtx *pc); + +static void JimParserInit(struct JimParserCtx *pc, const char *prg, int len, int linenr) +{ + pc->p = prg; + pc->len = len; + pc->tstart = NULL; + pc->tend = NULL; + pc->tline = 0; + pc->tt = JIM_TT_NONE; + pc->eof = 0; + pc->inquote = 0; + pc->linenr = linenr; + pc->comment = 1; + pc->missing.ch = ' '; + pc->missing.line = linenr; +} + +static int JimParseScript(struct JimParserCtx *pc) +{ + while (1) { + if (!pc->len) { + pc->tstart = pc->p; + pc->tend = pc->p - 1; + pc->tline = pc->linenr; + pc->tt = JIM_TT_EOL; + if (pc->inquote) { + pc->missing.ch = '"'; + } + pc->eof = 1; + return JIM_OK; + } + switch (*(pc->p)) { + case '\\': + if (*(pc->p + 1) == '\n' && !pc->inquote) { + return JimParseSep(pc); + } + pc->comment = 0; + return JimParseStr(pc); + case ' ': + case '\t': + case '\r': + case '\f': + if (!pc->inquote) + return JimParseSep(pc); + pc->comment = 0; + return JimParseStr(pc); + case '\n': + case ';': + pc->comment = 1; + if (!pc->inquote) + return JimParseEol(pc); + return JimParseStr(pc); + case '[': + pc->comment = 0; + return JimParseCmd(pc); + case '$': + pc->comment = 0; + if (JimParseVar(pc) == JIM_ERR) { + + pc->tstart = pc->tend = pc->p++; + pc->len--; + pc->tt = JIM_TT_ESC; + } + return JIM_OK; + case '#': + if (pc->comment) { + JimParseComment(pc); + continue; + } + return JimParseStr(pc); + default: + pc->comment = 0; + return JimParseStr(pc); + } + return JIM_OK; + } +} + +static int JimParseSep(struct JimParserCtx *pc) +{ + pc->tstart = pc->p; + pc->tline = pc->linenr; + while (isspace(UCHAR(*pc->p)) || (*pc->p == '\\' && *(pc->p + 1) == '\n')) { + if (*pc->p == '\n') { + break; + } + if (*pc->p == '\\') { + pc->p++; + pc->len--; + pc->linenr++; + } + pc->p++; + pc->len--; + } + pc->tend = pc->p - 1; + pc->tt = JIM_TT_SEP; + return JIM_OK; +} + +static int JimParseEol(struct JimParserCtx *pc) +{ + pc->tstart = pc->p; + pc->tline = pc->linenr; + while (isspace(UCHAR(*pc->p)) || *pc->p == ';') { + if (*pc->p == '\n') + pc->linenr++; + pc->p++; + pc->len--; + } + pc->tend = pc->p - 1; + pc->tt = JIM_TT_EOL; + return JIM_OK; +} + + +static void JimParseSubBrace(struct JimParserCtx *pc) +{ + int level = 1; + + + pc->p++; + pc->len--; + while (pc->len) { + switch (*pc->p) { + case '\\': + if (pc->len > 1) { + if (*++pc->p == '\n') { + pc->linenr++; + } + pc->len--; + } + break; + + case '{': + level++; + break; + + case '}': + if (--level == 0) { + pc->tend = pc->p - 1; + pc->p++; + pc->len--; + return; + } + break; + + case '\n': + pc->linenr++; + break; + } + pc->p++; + pc->len--; + } + pc->missing.ch = '{'; + pc->missing.line = pc->tline; + pc->tend = pc->p - 1; +} + +static int JimParseSubQuote(struct JimParserCtx *pc) +{ + int tt = JIM_TT_STR; + int line = pc->tline; + + + pc->p++; + pc->len--; + while (pc->len) { + switch (*pc->p) { + case '\\': + if (pc->len > 1) { + if (*++pc->p == '\n') { + pc->linenr++; + } + pc->len--; + tt = JIM_TT_ESC; + } + break; + + case '"': + pc->tend = pc->p - 1; + pc->p++; + pc->len--; + return tt; + + case '[': + JimParseSubCmd(pc); + tt = JIM_TT_ESC; + continue; + + case '\n': + pc->linenr++; + break; + + case '$': + tt = JIM_TT_ESC; + break; + } + pc->p++; + pc->len--; + } + pc->missing.ch = '"'; + pc->missing.line = line; + pc->tend = pc->p - 1; + return tt; +} + +static void JimParseSubCmd(struct JimParserCtx *pc) +{ + int level = 1; + int startofword = 1; + int line = pc->tline; + + + pc->p++; + pc->len--; + while (pc->len) { + switch (*pc->p) { + case '\\': + if (pc->len > 1) { + if (*++pc->p == '\n') { + pc->linenr++; + } + pc->len--; + } + break; + + case '[': + level++; + break; + + case ']': + if (--level == 0) { + pc->tend = pc->p - 1; + pc->p++; + pc->len--; + return; + } + break; + + case '"': + if (startofword) { + JimParseSubQuote(pc); + if (pc->missing.ch == '"') { + return; + } + continue; + } + break; + + case '{': + JimParseSubBrace(pc); + startofword = 0; + continue; + + case '\n': + pc->linenr++; + break; + } + startofword = isspace(UCHAR(*pc->p)); + pc->p++; + pc->len--; + } + pc->missing.ch = '['; + pc->missing.line = line; + pc->tend = pc->p - 1; +} + +static int JimParseBrace(struct JimParserCtx *pc) +{ + pc->tstart = pc->p + 1; + pc->tline = pc->linenr; + pc->tt = JIM_TT_STR; + JimParseSubBrace(pc); + return JIM_OK; +} + +static int JimParseCmd(struct JimParserCtx *pc) +{ + pc->tstart = pc->p + 1; + pc->tline = pc->linenr; + pc->tt = JIM_TT_CMD; + JimParseSubCmd(pc); + return JIM_OK; +} + +static int JimParseQuote(struct JimParserCtx *pc) +{ + pc->tstart = pc->p + 1; + pc->tline = pc->linenr; + pc->tt = JimParseSubQuote(pc); + return JIM_OK; +} + +static int JimParseVar(struct JimParserCtx *pc) +{ + + pc->p++; + pc->len--; + +#ifdef EXPRSUGAR_BRACKET + if (*pc->p == '[') { + + JimParseCmd(pc); + pc->tt = JIM_TT_EXPRSUGAR; + return JIM_OK; + } +#endif + + pc->tstart = pc->p; + pc->tt = JIM_TT_VAR; + pc->tline = pc->linenr; + + if (*pc->p == '{') { + pc->tstart = ++pc->p; + pc->len--; + + while (pc->len && *pc->p != '}') { + if (*pc->p == '\n') { + pc->linenr++; + } + pc->p++; + pc->len--; + } + pc->tend = pc->p - 1; + if (pc->len) { + pc->p++; + pc->len--; + } + } + else { + while (1) { + + if (pc->p[0] == ':' && pc->p[1] == ':') { + while (*pc->p == ':') { + pc->p++; + pc->len--; + } + continue; + } + if (isalnum(UCHAR(*pc->p)) || *pc->p == '_' || UCHAR(*pc->p) >= 0x80) { + pc->p++; + pc->len--; + continue; + } + break; + } + + if (*pc->p == '(') { + int count = 1; + const char *paren = NULL; + + pc->tt = JIM_TT_DICTSUGAR; + + while (count && pc->len) { + pc->p++; + pc->len--; + if (*pc->p == '\\' && pc->len >= 1) { + pc->p++; + pc->len--; + } + else if (*pc->p == '(') { + count++; + } + else if (*pc->p == ')') { + paren = pc->p; + count--; + } + } + if (count == 0) { + pc->p++; + pc->len--; + } + else if (paren) { + + paren++; + pc->len += (pc->p - paren); + pc->p = paren; + } +#ifndef EXPRSUGAR_BRACKET + if (*pc->tstart == '(') { + pc->tt = JIM_TT_EXPRSUGAR; + } +#endif + } + pc->tend = pc->p - 1; + } + if (pc->tstart == pc->p) { + pc->p--; + pc->len++; + return JIM_ERR; + } + return JIM_OK; +} + +static int JimParseStr(struct JimParserCtx *pc) +{ + if (pc->tt == JIM_TT_SEP || pc->tt == JIM_TT_EOL || + pc->tt == JIM_TT_NONE || pc->tt == JIM_TT_STR) { + + if (*pc->p == '{') { + return JimParseBrace(pc); + } + if (*pc->p == '"') { + pc->inquote = 1; + pc->p++; + pc->len--; + + pc->missing.line = pc->tline; + } + } + pc->tstart = pc->p; + pc->tline = pc->linenr; + while (1) { + if (pc->len == 0) { + if (pc->inquote) { + pc->missing.ch = '"'; + } + pc->tend = pc->p - 1; + pc->tt = JIM_TT_ESC; + return JIM_OK; + } + switch (*pc->p) { + case '\\': + if (!pc->inquote && *(pc->p + 1) == '\n') { + pc->tend = pc->p - 1; + pc->tt = JIM_TT_ESC; + return JIM_OK; + } + if (pc->len >= 2) { + if (*(pc->p + 1) == '\n') { + pc->linenr++; + } + pc->p++; + pc->len--; + } + else if (pc->len == 1) { + + pc->missing.ch = '\\'; + } + break; + case '(': + + if (pc->len > 1 && pc->p[1] != '$') { + break; + } + + case ')': + + if (*pc->p == '(' || pc->tt == JIM_TT_VAR) { + if (pc->p == pc->tstart) { + + pc->p++; + pc->len--; + } + pc->tend = pc->p - 1; + pc->tt = JIM_TT_ESC; + return JIM_OK; + } + break; + + case '$': + case '[': + pc->tend = pc->p - 1; + pc->tt = JIM_TT_ESC; + return JIM_OK; + case ' ': + case '\t': + case '\n': + case '\r': + case '\f': + case ';': + if (!pc->inquote) { + pc->tend = pc->p - 1; + pc->tt = JIM_TT_ESC; + return JIM_OK; + } + else if (*pc->p == '\n') { + pc->linenr++; + } + break; + case '"': + if (pc->inquote) { + pc->tend = pc->p - 1; + pc->tt = JIM_TT_ESC; + pc->p++; + pc->len--; + pc->inquote = 0; + return JIM_OK; + } + break; + } + pc->p++; + pc->len--; + } + return JIM_OK; +} + +static int JimParseComment(struct JimParserCtx *pc) +{ + while (*pc->p) { + if (*pc->p == '\\') { + pc->p++; + pc->len--; + if (pc->len == 0) { + pc->missing.ch = '\\'; + return JIM_OK; + } + if (*pc->p == '\n') { + pc->linenr++; + } + } + else if (*pc->p == '\n') { + pc->p++; + pc->len--; + pc->linenr++; + break; + } + pc->p++; + pc->len--; + } + return JIM_OK; +} + + +static int xdigitval(int c) +{ + if (c >= '0' && c <= '9') + return c - '0'; + if (c >= 'a' && c <= 'f') + return c - 'a' + 10; + if (c >= 'A' && c <= 'F') + return c - 'A' + 10; + return -1; +} + +static int odigitval(int c) +{ + if (c >= '0' && c <= '7') + return c - '0'; + return -1; +} + +static int JimEscape(char *dest, const char *s, int slen) +{ + char *p = dest; + int i, len; + + for (i = 0; i < slen; i++) { + switch (s[i]) { + case '\\': + switch (s[i + 1]) { + case 'a': + *p++ = 0x7; + i++; + break; + case 'b': + *p++ = 0x8; + i++; + break; + case 'f': + *p++ = 0xc; + i++; + break; + case 'n': + *p++ = 0xa; + i++; + break; + case 'r': + *p++ = 0xd; + i++; + break; + case 't': + *p++ = 0x9; + i++; + break; + case 'u': + case 'U': + case 'x': + { + unsigned val = 0; + int k; + int maxchars = 2; + + i++; + + if (s[i] == 'U') { + maxchars = 8; + } + else if (s[i] == 'u') { + if (s[i + 1] == '{') { + maxchars = 6; + i++; + } + else { + maxchars = 4; + } + } + + for (k = 0; k < maxchars; k++) { + int c = xdigitval(s[i + k + 1]); + if (c == -1) { + break; + } + val = (val << 4) | c; + } + + if (s[i] == '{') { + if (k == 0 || val > 0x1fffff || s[i + k + 1] != '}') { + + i--; + k = 0; + } + else { + + k++; + } + } + if (k) { + + if (s[i] == 'x') { + *p++ = val; + } + else { + p += utf8_fromunicode(p, val); + } + i += k; + break; + } + + *p++ = s[i]; + } + break; + case 'v': + *p++ = 0xb; + i++; + break; + case '\0': + *p++ = '\\'; + i++; + break; + case '\n': + + *p++ = ' '; + do { + i++; + } while (s[i + 1] == ' ' || s[i + 1] == '\t'); + break; + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + + { + int val = 0; + int c = odigitval(s[i + 1]); + + val = c; + c = odigitval(s[i + 2]); + if (c == -1) { + *p++ = val; + i++; + break; + } + val = (val * 8) + c; + c = odigitval(s[i + 3]); + if (c == -1) { + *p++ = val; + i += 2; + break; + } + val = (val * 8) + c; + *p++ = val; + i += 3; + } + break; + default: + *p++ = s[i + 1]; + i++; + break; + } + break; + default: + *p++ = s[i]; + break; + } + } + len = p - dest; + *p = '\0'; + return len; +} + +static Jim_Obj *JimParserGetTokenObj(Jim_Interp *interp, struct JimParserCtx *pc) +{ + const char *start, *end; + char *token; + int len; + + start = pc->tstart; + end = pc->tend; + len = (end - start) + 1; + if (len < 0) { + len = 0; + } + token = Jim_Alloc(len + 1); + if (pc->tt != JIM_TT_ESC) { + + memcpy(token, start, len); + token[len] = '\0'; + } + else { + + len = JimEscape(token, start, len); + } + + return Jim_NewStringObjNoAlloc(interp, token, len); +} + +static int JimParseListSep(struct JimParserCtx *pc); +static int JimParseListStr(struct JimParserCtx *pc); +static int JimParseListQuote(struct JimParserCtx *pc); + +static int JimParseList(struct JimParserCtx *pc) +{ + if (isspace(UCHAR(*pc->p))) { + return JimParseListSep(pc); + } + switch (*pc->p) { + case '"': + return JimParseListQuote(pc); + + case '{': + return JimParseBrace(pc); + + default: + if (pc->len) { + return JimParseListStr(pc); + } + break; + } + + pc->tstart = pc->tend = pc->p; + pc->tline = pc->linenr; + pc->tt = JIM_TT_EOL; + pc->eof = 1; + return JIM_OK; +} + +static int JimParseListSep(struct JimParserCtx *pc) +{ + pc->tstart = pc->p; + pc->tline = pc->linenr; + while (isspace(UCHAR(*pc->p))) { + if (*pc->p == '\n') { + pc->linenr++; + } + pc->p++; + pc->len--; + } + pc->tend = pc->p - 1; + pc->tt = JIM_TT_SEP; + return JIM_OK; +} + +static int JimParseListQuote(struct JimParserCtx *pc) +{ + pc->p++; + pc->len--; + + pc->tstart = pc->p; + pc->tline = pc->linenr; + pc->tt = JIM_TT_STR; + + while (pc->len) { + switch (*pc->p) { + case '\\': + pc->tt = JIM_TT_ESC; + if (--pc->len == 0) { + + pc->tend = pc->p; + return JIM_OK; + } + pc->p++; + break; + case '\n': + pc->linenr++; + break; + case '"': + pc->tend = pc->p - 1; + pc->p++; + pc->len--; + return JIM_OK; + } + pc->p++; + pc->len--; + } + + pc->tend = pc->p - 1; + return JIM_OK; +} + +static int JimParseListStr(struct JimParserCtx *pc) +{ + pc->tstart = pc->p; + pc->tline = pc->linenr; + pc->tt = JIM_TT_STR; + + while (pc->len) { + if (isspace(UCHAR(*pc->p))) { + pc->tend = pc->p - 1; + return JIM_OK; + } + if (*pc->p == '\\') { + if (--pc->len == 0) { + + pc->tend = pc->p; + return JIM_OK; + } + pc->tt = JIM_TT_ESC; + pc->p++; + } + pc->p++; + pc->len--; + } + pc->tend = pc->p - 1; + return JIM_OK; +} + + + +Jim_Obj *Jim_NewObj(Jim_Interp *interp) +{ + Jim_Obj *objPtr; + + + if (interp->freeList != NULL) { + + objPtr = interp->freeList; + interp->freeList = objPtr->nextObjPtr; + } + else { + + objPtr = Jim_Alloc(sizeof(*objPtr)); + } + + objPtr->refCount = 0; + + + objPtr->prevObjPtr = NULL; + objPtr->nextObjPtr = interp->liveList; + if (interp->liveList) + interp->liveList->prevObjPtr = objPtr; + interp->liveList = objPtr; + + return objPtr; +} + +void Jim_FreeObj(Jim_Interp *interp, Jim_Obj *objPtr) +{ + + JimPanic((objPtr->refCount != 0, "!!!Object %p freed with bad refcount %d, type=%s", objPtr, + objPtr->refCount, objPtr->typePtr ? objPtr->typePtr->name : "")); + + + Jim_FreeIntRep(interp, objPtr); + + if (objPtr->bytes != NULL) { + if (objPtr->bytes != JimEmptyStringRep) + Jim_Free(objPtr->bytes); + } + + if (objPtr->prevObjPtr) + objPtr->prevObjPtr->nextObjPtr = objPtr->nextObjPtr; + if (objPtr->nextObjPtr) + objPtr->nextObjPtr->prevObjPtr = objPtr->prevObjPtr; + if (interp->liveList == objPtr) + interp->liveList = objPtr->nextObjPtr; +#ifdef JIM_DISABLE_OBJECT_POOL + Jim_Free(objPtr); +#else + + objPtr->prevObjPtr = NULL; + objPtr->nextObjPtr = interp->freeList; + if (interp->freeList) + interp->freeList->prevObjPtr = objPtr; + interp->freeList = objPtr; + objPtr->refCount = -1; +#endif +} + + +void Jim_InvalidateStringRep(Jim_Obj *objPtr) +{ + if (objPtr->bytes != NULL) { + if (objPtr->bytes != JimEmptyStringRep) + Jim_Free(objPtr->bytes); + } + objPtr->bytes = NULL; +} + + +Jim_Obj *Jim_DuplicateObj(Jim_Interp *interp, Jim_Obj *objPtr) +{ + Jim_Obj *dupPtr; + + dupPtr = Jim_NewObj(interp); + if (objPtr->bytes == NULL) { + + dupPtr->bytes = NULL; + } + else if (objPtr->length == 0) { + dupPtr->bytes = JimEmptyStringRep; + dupPtr->length = 0; + dupPtr->typePtr = NULL; + return dupPtr; + } + else { + dupPtr->bytes = Jim_Alloc(objPtr->length + 1); + dupPtr->length = objPtr->length; + + memcpy(dupPtr->bytes, objPtr->bytes, objPtr->length + 1); + } + + + dupPtr->typePtr = objPtr->typePtr; + if (objPtr->typePtr != NULL) { + if (objPtr->typePtr->dupIntRepProc == NULL) { + dupPtr->internalRep = objPtr->internalRep; + } + else { + + objPtr->typePtr->dupIntRepProc(interp, objPtr, dupPtr); + } + } + return dupPtr; +} + +const char *Jim_GetString(Jim_Obj *objPtr, int *lenPtr) +{ + if (objPtr->bytes == NULL) { + + JimPanic((objPtr->typePtr->updateStringProc == NULL, "UpdateStringProc called against '%s' type.", objPtr->typePtr->name)); + objPtr->typePtr->updateStringProc(objPtr); + } + if (lenPtr) + *lenPtr = objPtr->length; + return objPtr->bytes; +} + + +int Jim_Length(Jim_Obj *objPtr) +{ + if (objPtr->bytes == NULL) { + + Jim_GetString(objPtr, NULL); + } + return objPtr->length; +} + + +const char *Jim_String(Jim_Obj *objPtr) +{ + if (objPtr->bytes == NULL) { + + Jim_GetString(objPtr, NULL); + } + return objPtr->bytes; +} + +static void JimSetStringBytes(Jim_Obj *objPtr, const char *str) +{ + objPtr->bytes = Jim_StrDup(str); + objPtr->length = strlen(str); +} + +static void FreeDictSubstInternalRep(Jim_Interp *interp, Jim_Obj *objPtr); +static void DupDictSubstInternalRep(Jim_Interp *interp, Jim_Obj *srcPtr, Jim_Obj *dupPtr); + +static const Jim_ObjType dictSubstObjType = { + "dict-substitution", + FreeDictSubstInternalRep, + DupDictSubstInternalRep, + NULL, + JIM_TYPE_NONE, +}; + +static void FreeInterpolatedInternalRep(Jim_Interp *interp, Jim_Obj *objPtr); +static void DupInterpolatedInternalRep(Jim_Interp *interp, Jim_Obj *srcPtr, Jim_Obj *dupPtr); + +static const Jim_ObjType interpolatedObjType = { + "interpolated", + FreeInterpolatedInternalRep, + DupInterpolatedInternalRep, + NULL, + JIM_TYPE_NONE, +}; + +static void FreeInterpolatedInternalRep(Jim_Interp *interp, Jim_Obj *objPtr) +{ + Jim_DecrRefCount(interp, objPtr->internalRep.dictSubstValue.indexObjPtr); +} + +static void DupInterpolatedInternalRep(Jim_Interp *interp, Jim_Obj *srcPtr, Jim_Obj *dupPtr) +{ + + dupPtr->internalRep = srcPtr->internalRep; + + Jim_IncrRefCount(dupPtr->internalRep.dictSubstValue.indexObjPtr); +} + +static void DupStringInternalRep(Jim_Interp *interp, Jim_Obj *srcPtr, Jim_Obj *dupPtr); +static int SetStringFromAny(Jim_Interp *interp, struct Jim_Obj *objPtr); + +static const Jim_ObjType stringObjType = { + "string", + NULL, + DupStringInternalRep, + NULL, + JIM_TYPE_REFERENCES, +}; + +static void DupStringInternalRep(Jim_Interp *interp, Jim_Obj *srcPtr, Jim_Obj *dupPtr) +{ + JIM_NOTUSED(interp); + + dupPtr->internalRep.strValue.maxLength = srcPtr->length; + dupPtr->internalRep.strValue.charLength = srcPtr->internalRep.strValue.charLength; +} + +static int SetStringFromAny(Jim_Interp *interp, Jim_Obj *objPtr) +{ + if (objPtr->typePtr != &stringObjType) { + + if (objPtr->bytes == NULL) { + + JimPanic((objPtr->typePtr->updateStringProc == NULL, "UpdateStringProc called against '%s' type.", objPtr->typePtr->name)); + objPtr->typePtr->updateStringProc(objPtr); + } + + Jim_FreeIntRep(interp, objPtr); + + objPtr->typePtr = &stringObjType; + objPtr->internalRep.strValue.maxLength = objPtr->length; + + objPtr->internalRep.strValue.charLength = -1; + } + return JIM_OK; +} + +int Jim_Utf8Length(Jim_Interp *interp, Jim_Obj *objPtr) +{ +#ifdef JIM_UTF8 + SetStringFromAny(interp, objPtr); + + if (objPtr->internalRep.strValue.charLength < 0) { + objPtr->internalRep.strValue.charLength = utf8_strlen(objPtr->bytes, objPtr->length); + } + return objPtr->internalRep.strValue.charLength; +#else + return Jim_Length(objPtr); +#endif +} + + +Jim_Obj *Jim_NewStringObj(Jim_Interp *interp, const char *s, int len) +{ + Jim_Obj *objPtr = Jim_NewObj(interp); + + + if (len == -1) + len = strlen(s); + + if (len == 0) { + objPtr->bytes = JimEmptyStringRep; + } + else { + objPtr->bytes = Jim_StrDupLen(s, len); + } + objPtr->length = len; + + + objPtr->typePtr = NULL; + return objPtr; +} + + +Jim_Obj *Jim_NewStringObjUtf8(Jim_Interp *interp, const char *s, int charlen) +{ +#ifdef JIM_UTF8 + + int bytelen = utf8_index(s, charlen); + + Jim_Obj *objPtr = Jim_NewStringObj(interp, s, bytelen); + + + objPtr->typePtr = &stringObjType; + objPtr->internalRep.strValue.maxLength = bytelen; + objPtr->internalRep.strValue.charLength = charlen; + + return objPtr; +#else + return Jim_NewStringObj(interp, s, charlen); +#endif +} + +Jim_Obj *Jim_NewStringObjNoAlloc(Jim_Interp *interp, char *s, int len) +{ + Jim_Obj *objPtr = Jim_NewObj(interp); + + objPtr->bytes = s; + objPtr->length = (len == -1) ? strlen(s) : len; + objPtr->typePtr = NULL; + return objPtr; +} + +static void StringAppendString(Jim_Obj *objPtr, const char *str, int len) +{ + int needlen; + + if (len == -1) + len = strlen(str); + needlen = objPtr->length + len; + if (objPtr->internalRep.strValue.maxLength < needlen || + objPtr->internalRep.strValue.maxLength == 0) { + needlen *= 2; + + if (needlen < 7) { + needlen = 7; + } + if (objPtr->bytes == JimEmptyStringRep) { + objPtr->bytes = Jim_Alloc(needlen + 1); + } + else { + objPtr->bytes = Jim_Realloc(objPtr->bytes, needlen + 1); + } + objPtr->internalRep.strValue.maxLength = needlen; + } + memcpy(objPtr->bytes + objPtr->length, str, len); + objPtr->bytes[objPtr->length + len] = '\0'; + + if (objPtr->internalRep.strValue.charLength >= 0) { + + objPtr->internalRep.strValue.charLength += utf8_strlen(objPtr->bytes + objPtr->length, len); + } + objPtr->length += len; +} + +void Jim_AppendString(Jim_Interp *interp, Jim_Obj *objPtr, const char *str, int len) +{ + JimPanic((Jim_IsShared(objPtr), "Jim_AppendString called with shared object")); + SetStringFromAny(interp, objPtr); + StringAppendString(objPtr, str, len); +} + +void Jim_AppendObj(Jim_Interp *interp, Jim_Obj *objPtr, Jim_Obj *appendObjPtr) +{ + int len; + const char *str = Jim_GetString(appendObjPtr, &len); + Jim_AppendString(interp, objPtr, str, len); +} + +void Jim_AppendStrings(Jim_Interp *interp, Jim_Obj *objPtr, ...) +{ + va_list ap; + + SetStringFromAny(interp, objPtr); + va_start(ap, objPtr); + while (1) { + const char *s = va_arg(ap, const char *); + + if (s == NULL) + break; + Jim_AppendString(interp, objPtr, s, -1); + } + va_end(ap); +} + +int Jim_StringEqObj(Jim_Obj *aObjPtr, Jim_Obj *bObjPtr) +{ + if (aObjPtr == bObjPtr) { + return 1; + } + else { + int Alen, Blen; + const char *sA = Jim_GetString(aObjPtr, &Alen); + const char *sB = Jim_GetString(bObjPtr, &Blen); + + return Alen == Blen && memcmp(sA, sB, Alen) == 0; + } +} + +int Jim_StringMatchObj(Jim_Interp *interp, Jim_Obj *patternObjPtr, Jim_Obj *objPtr, int nocase) +{ + int plen, slen; + const char *pattern = Jim_GetString(patternObjPtr, &plen); + const char *string = Jim_GetString(objPtr, &slen); + return JimGlobMatch(pattern, plen, string, slen, nocase); +} + +int Jim_StringCompareObj(Jim_Interp *interp, Jim_Obj *firstObjPtr, Jim_Obj *secondObjPtr, int nocase) +{ + const char *s1 = Jim_String(firstObjPtr); + int l1 = Jim_Utf8Length(interp, firstObjPtr); + const char *s2 = Jim_String(secondObjPtr); + int l2 = Jim_Utf8Length(interp, secondObjPtr); + return JimStringCompareUtf8(s1, l1, s2, l2, nocase); +} + +static int JimRelToAbsIndex(int len, int idx) +{ + if (idx < 0 && idx > -INT_MAX) + return len + idx; + return idx; +} + +static void JimRelToAbsRange(int len, int *firstPtr, int *lastPtr, int *rangeLenPtr) +{ + int rangeLen; + + if (*firstPtr > *lastPtr) { + rangeLen = 0; + } + else { + rangeLen = *lastPtr - *firstPtr + 1; + if (rangeLen) { + if (*firstPtr < 0) { + rangeLen += *firstPtr; + *firstPtr = 0; + } + if (*lastPtr >= len) { + rangeLen -= (*lastPtr - (len - 1)); + *lastPtr = len - 1; + } + } + } + if (rangeLen < 0) + rangeLen = 0; + + *rangeLenPtr = rangeLen; +} + +static int JimStringGetRange(Jim_Interp *interp, Jim_Obj *firstObjPtr, Jim_Obj *lastObjPtr, + int len, int *first, int *last, int *range) +{ + if (Jim_GetIndex(interp, firstObjPtr, first) != JIM_OK) { + return JIM_ERR; + } + if (Jim_GetIndex(interp, lastObjPtr, last) != JIM_OK) { + return JIM_ERR; + } + *first = JimRelToAbsIndex(len, *first); + *last = JimRelToAbsIndex(len, *last); + JimRelToAbsRange(len, first, last, range); + return JIM_OK; +} + +Jim_Obj *Jim_StringByteRangeObj(Jim_Interp *interp, + Jim_Obj *strObjPtr, Jim_Obj *firstObjPtr, Jim_Obj *lastObjPtr) +{ + int first, last; + const char *str; + int rangeLen; + int bytelen; + + str = Jim_GetString(strObjPtr, &bytelen); + + if (JimStringGetRange(interp, firstObjPtr, lastObjPtr, bytelen, &first, &last, &rangeLen) != JIM_OK) { + return NULL; + } + + if (first == 0 && rangeLen == bytelen) { + return strObjPtr; + } + return Jim_NewStringObj(interp, str + first, rangeLen); +} + +Jim_Obj *Jim_StringRangeObj(Jim_Interp *interp, + Jim_Obj *strObjPtr, Jim_Obj *firstObjPtr, Jim_Obj *lastObjPtr) +{ +#ifdef JIM_UTF8 + int first, last; + const char *str; + int len, rangeLen; + int bytelen; + + str = Jim_GetString(strObjPtr, &bytelen); + len = Jim_Utf8Length(interp, strObjPtr); + + if (JimStringGetRange(interp, firstObjPtr, lastObjPtr, len, &first, &last, &rangeLen) != JIM_OK) { + return NULL; + } + + if (first == 0 && rangeLen == len) { + return strObjPtr; + } + if (len == bytelen) { + + return Jim_NewStringObj(interp, str + first, rangeLen); + } + return Jim_NewStringObjUtf8(interp, str + utf8_index(str, first), rangeLen); +#else + return Jim_StringByteRangeObj(interp, strObjPtr, firstObjPtr, lastObjPtr); +#endif +} + +Jim_Obj *JimStringReplaceObj(Jim_Interp *interp, + Jim_Obj *strObjPtr, Jim_Obj *firstObjPtr, Jim_Obj *lastObjPtr, Jim_Obj *newStrObj) +{ + int first, last; + const char *str; + int len, rangeLen; + Jim_Obj *objPtr; + + len = Jim_Utf8Length(interp, strObjPtr); + + if (JimStringGetRange(interp, firstObjPtr, lastObjPtr, len, &first, &last, &rangeLen) != JIM_OK) { + return NULL; + } + + if (last < first) { + return strObjPtr; + } + + str = Jim_String(strObjPtr); + + + objPtr = Jim_NewStringObjUtf8(interp, str, first); + + + if (newStrObj) { + Jim_AppendObj(interp, objPtr, newStrObj); + } + + + Jim_AppendString(interp, objPtr, str + utf8_index(str, last + 1), len - last - 1); + + return objPtr; +} + +static void JimStrCopyUpperLower(char *dest, const char *str, int uc) +{ + while (*str) { + int c; + str += utf8_tounicode(str, &c); + dest += utf8_getchars(dest, uc ? utf8_upper(c) : utf8_lower(c)); + } + *dest = 0; +} + +static Jim_Obj *JimStringToLower(Jim_Interp *interp, Jim_Obj *strObjPtr) +{ + char *buf; + int len; + const char *str; + + str = Jim_GetString(strObjPtr, &len); + +#ifdef JIM_UTF8 + len *= 2; +#endif + buf = Jim_Alloc(len + 1); + JimStrCopyUpperLower(buf, str, 0); + return Jim_NewStringObjNoAlloc(interp, buf, -1); +} + +static Jim_Obj *JimStringToUpper(Jim_Interp *interp, Jim_Obj *strObjPtr) +{ + char *buf; + const char *str; + int len; + + str = Jim_GetString(strObjPtr, &len); + +#ifdef JIM_UTF8 + len *= 2; +#endif + buf = Jim_Alloc(len + 1); + JimStrCopyUpperLower(buf, str, 1); + return Jim_NewStringObjNoAlloc(interp, buf, -1); +} + +static Jim_Obj *JimStringToTitle(Jim_Interp *interp, Jim_Obj *strObjPtr) +{ + char *buf, *p; + int len; + int c; + const char *str; + + str = Jim_GetString(strObjPtr, &len); + +#ifdef JIM_UTF8 + len *= 2; +#endif + buf = p = Jim_Alloc(len + 1); + + str += utf8_tounicode(str, &c); + p += utf8_getchars(p, utf8_title(c)); + + JimStrCopyUpperLower(p, str, 0); + + return Jim_NewStringObjNoAlloc(interp, buf, -1); +} + +static const char *utf8_memchr(const char *str, int len, int c) +{ +#ifdef JIM_UTF8 + while (len) { + int sc; + int n = utf8_tounicode(str, &sc); + if (sc == c) { + return str; + } + str += n; + len -= n; + } + return NULL; +#else + return memchr(str, c, len); +#endif +} + +static const char *JimFindTrimLeft(const char *str, int len, const char *trimchars, int trimlen) +{ + while (len) { + int c; + int n = utf8_tounicode(str, &c); + + if (utf8_memchr(trimchars, trimlen, c) == NULL) { + + break; + } + str += n; + len -= n; + } + return str; +} + +static const char *JimFindTrimRight(const char *str, int len, const char *trimchars, int trimlen) +{ + str += len; + + while (len) { + int c; + int n = utf8_prev_len(str, len); + + len -= n; + str -= n; + + n = utf8_tounicode(str, &c); + + if (utf8_memchr(trimchars, trimlen, c) == NULL) { + return str + n; + } + } + + return NULL; +} + +static const char default_trim_chars[] = " \t\n\r"; + +static int default_trim_chars_len = sizeof(default_trim_chars); + +static Jim_Obj *JimStringTrimLeft(Jim_Interp *interp, Jim_Obj *strObjPtr, Jim_Obj *trimcharsObjPtr) +{ + int len; + const char *str = Jim_GetString(strObjPtr, &len); + const char *trimchars = default_trim_chars; + int trimcharslen = default_trim_chars_len; + const char *newstr; + + if (trimcharsObjPtr) { + trimchars = Jim_GetString(trimcharsObjPtr, &trimcharslen); + } + + newstr = JimFindTrimLeft(str, len, trimchars, trimcharslen); + if (newstr == str) { + return strObjPtr; + } + + return Jim_NewStringObj(interp, newstr, len - (newstr - str)); +} + +static Jim_Obj *JimStringTrimRight(Jim_Interp *interp, Jim_Obj *strObjPtr, Jim_Obj *trimcharsObjPtr) +{ + int len; + const char *trimchars = default_trim_chars; + int trimcharslen = default_trim_chars_len; + const char *nontrim; + + if (trimcharsObjPtr) { + trimchars = Jim_GetString(trimcharsObjPtr, &trimcharslen); + } + + SetStringFromAny(interp, strObjPtr); + + len = Jim_Length(strObjPtr); + nontrim = JimFindTrimRight(strObjPtr->bytes, len, trimchars, trimcharslen); + + if (nontrim == NULL) { + + return Jim_NewEmptyStringObj(interp); + } + if (nontrim == strObjPtr->bytes + len) { + + return strObjPtr; + } + + if (Jim_IsShared(strObjPtr)) { + strObjPtr = Jim_NewStringObj(interp, strObjPtr->bytes, (nontrim - strObjPtr->bytes)); + } + else { + + strObjPtr->bytes[nontrim - strObjPtr->bytes] = 0; + strObjPtr->length = (nontrim - strObjPtr->bytes); + } + + return strObjPtr; +} + +static Jim_Obj *JimStringTrim(Jim_Interp *interp, Jim_Obj *strObjPtr, Jim_Obj *trimcharsObjPtr) +{ + + Jim_Obj *objPtr = JimStringTrimLeft(interp, strObjPtr, trimcharsObjPtr); + + + strObjPtr = JimStringTrimRight(interp, objPtr, trimcharsObjPtr); + + + if (objPtr != strObjPtr && objPtr->refCount == 0) { + + Jim_FreeNewObj(interp, objPtr); + } + + return strObjPtr; +} + + +#ifdef HAVE_ISASCII +#define jim_isascii isascii +#else +static int jim_isascii(int c) +{ + return !(c & ~0x7f); +} +#endif + +static int JimStringIs(Jim_Interp *interp, Jim_Obj *strObjPtr, Jim_Obj *strClass, int strict) +{ + static const char * const strclassnames[] = { + "integer", "alpha", "alnum", "ascii", "digit", + "double", "lower", "upper", "space", "xdigit", + "control", "print", "graph", "punct", "boolean", + NULL + }; + enum { + STR_IS_INTEGER, STR_IS_ALPHA, STR_IS_ALNUM, STR_IS_ASCII, STR_IS_DIGIT, + STR_IS_DOUBLE, STR_IS_LOWER, STR_IS_UPPER, STR_IS_SPACE, STR_IS_XDIGIT, + STR_IS_CONTROL, STR_IS_PRINT, STR_IS_GRAPH, STR_IS_PUNCT, STR_IS_BOOLEAN, + }; + int strclass; + int len; + int i; + const char *str; + int (*isclassfunc)(int c) = NULL; + + if (Jim_GetEnum(interp, strClass, strclassnames, &strclass, "class", JIM_ERRMSG | JIM_ENUM_ABBREV) != JIM_OK) { + return JIM_ERR; + } + + str = Jim_GetString(strObjPtr, &len); + if (len == 0) { + Jim_SetResultBool(interp, !strict); + return JIM_OK; + } + + switch (strclass) { + case STR_IS_INTEGER: + { + jim_wide w; + Jim_SetResultBool(interp, JimGetWideNoErr(interp, strObjPtr, &w) == JIM_OK); + return JIM_OK; + } + + case STR_IS_DOUBLE: + { + double d; + Jim_SetResultBool(interp, Jim_GetDouble(interp, strObjPtr, &d) == JIM_OK && errno != ERANGE); + return JIM_OK; + } + + case STR_IS_BOOLEAN: + { + int b; + Jim_SetResultBool(interp, Jim_GetBoolean(interp, strObjPtr, &b) == JIM_OK); + return JIM_OK; + } + + case STR_IS_ALPHA: isclassfunc = isalpha; break; + case STR_IS_ALNUM: isclassfunc = isalnum; break; + case STR_IS_ASCII: isclassfunc = jim_isascii; break; + case STR_IS_DIGIT: isclassfunc = isdigit; break; + case STR_IS_LOWER: isclassfunc = islower; break; + case STR_IS_UPPER: isclassfunc = isupper; break; + case STR_IS_SPACE: isclassfunc = isspace; break; + case STR_IS_XDIGIT: isclassfunc = isxdigit; break; + case STR_IS_CONTROL: isclassfunc = iscntrl; break; + case STR_IS_PRINT: isclassfunc = isprint; break; + case STR_IS_GRAPH: isclassfunc = isgraph; break; + case STR_IS_PUNCT: isclassfunc = ispunct; break; + default: + return JIM_ERR; + } + + for (i = 0; i < len; i++) { + if (!isclassfunc(UCHAR(str[i]))) { + Jim_SetResultBool(interp, 0); + return JIM_OK; + } + } + Jim_SetResultBool(interp, 1); + return JIM_OK; +} + + + +static const Jim_ObjType comparedStringObjType = { + "compared-string", + NULL, + NULL, + NULL, + JIM_TYPE_REFERENCES, +}; + +int Jim_CompareStringImmediate(Jim_Interp *interp, Jim_Obj *objPtr, const char *str) +{ + if (objPtr->typePtr == &comparedStringObjType && objPtr->internalRep.ptr == str) { + return 1; + } + else { + if (strcmp(str, Jim_String(objPtr)) != 0) + return 0; + + if (objPtr->typePtr != &comparedStringObjType) { + Jim_FreeIntRep(interp, objPtr); + objPtr->typePtr = &comparedStringObjType; + } + objPtr->internalRep.ptr = (char *)str; + return 1; + } +} + +static int qsortCompareStringPointers(const void *a, const void *b) +{ + char *const *sa = (char *const *)a; + char *const *sb = (char *const *)b; + + return strcmp(*sa, *sb); +} + + + +static void FreeSourceInternalRep(Jim_Interp *interp, Jim_Obj *objPtr); +static void DupSourceInternalRep(Jim_Interp *interp, Jim_Obj *srcPtr, Jim_Obj *dupPtr); + +static const Jim_ObjType sourceObjType = { + "source", + FreeSourceInternalRep, + DupSourceInternalRep, + NULL, + JIM_TYPE_REFERENCES, +}; + +void FreeSourceInternalRep(Jim_Interp *interp, Jim_Obj *objPtr) +{ + Jim_DecrRefCount(interp, objPtr->internalRep.sourceValue.fileNameObj); +} + +void DupSourceInternalRep(Jim_Interp *interp, Jim_Obj *srcPtr, Jim_Obj *dupPtr) +{ + dupPtr->internalRep.sourceValue = srcPtr->internalRep.sourceValue; + Jim_IncrRefCount(dupPtr->internalRep.sourceValue.fileNameObj); +} + +static const Jim_ObjType scriptLineObjType = { + "scriptline", + NULL, + NULL, + NULL, + JIM_NONE, +}; + +static Jim_Obj *JimNewScriptLineObj(Jim_Interp *interp, int argc, int line) +{ + Jim_Obj *objPtr; + +#ifdef DEBUG_SHOW_SCRIPT + char buf[100]; + snprintf(buf, sizeof(buf), "line=%d, argc=%d", line, argc); + objPtr = Jim_NewStringObj(interp, buf, -1); +#else + objPtr = Jim_NewEmptyStringObj(interp); +#endif + objPtr->typePtr = &scriptLineObjType; + objPtr->internalRep.scriptLineValue.argc = argc; + objPtr->internalRep.scriptLineValue.line = line; + + return objPtr; +} + +static void FreeScriptInternalRep(Jim_Interp *interp, Jim_Obj *objPtr); +static void DupScriptInternalRep(Jim_Interp *interp, Jim_Obj *srcPtr, Jim_Obj *dupPtr); + +static const Jim_ObjType scriptObjType = { + "script", + FreeScriptInternalRep, + DupScriptInternalRep, + NULL, + JIM_TYPE_NONE, +}; + +typedef struct ScriptToken +{ + Jim_Obj *objPtr; + int type; +} ScriptToken; + +typedef struct ScriptObj +{ + ScriptToken *token; + Jim_Obj *fileNameObj; + int len; + int substFlags; + int inUse; /* Used to share a ScriptObj. Currently + only used by Jim_EvalObj() as protection against + shimmering of the currently evaluated object. */ + int firstline; + int linenr; + int missing; +} ScriptObj; + +static void JimSetScriptFromAny(Jim_Interp *interp, struct Jim_Obj *objPtr); +static int JimParseCheckMissing(Jim_Interp *interp, int ch); +static ScriptObj *JimGetScript(Jim_Interp *interp, Jim_Obj *objPtr); +static void JimSetErrorStack(Jim_Interp *interp, ScriptObj *script); + +void FreeScriptInternalRep(Jim_Interp *interp, Jim_Obj *objPtr) +{ + int i; + struct ScriptObj *script = (void *)objPtr->internalRep.ptr; + + if (--script->inUse != 0) + return; + for (i = 0; i < script->len; i++) { + Jim_DecrRefCount(interp, script->token[i].objPtr); + } + Jim_Free(script->token); + Jim_DecrRefCount(interp, script->fileNameObj); + Jim_Free(script); +} + +void DupScriptInternalRep(Jim_Interp *interp, Jim_Obj *srcPtr, Jim_Obj *dupPtr) +{ + JIM_NOTUSED(interp); + JIM_NOTUSED(srcPtr); + + dupPtr->typePtr = NULL; +} + +typedef struct +{ + const char *token; + int len; + int type; + int line; +} ParseToken; + +typedef struct +{ + + ParseToken *list; + int size; + int count; + ParseToken static_list[20]; +} ParseTokenList; + +static void ScriptTokenListInit(ParseTokenList *tokenlist) +{ + tokenlist->list = tokenlist->static_list; + tokenlist->size = sizeof(tokenlist->static_list) / sizeof(ParseToken); + tokenlist->count = 0; +} + +static void ScriptTokenListFree(ParseTokenList *tokenlist) +{ + if (tokenlist->list != tokenlist->static_list) { + Jim_Free(tokenlist->list); + } +} + +static void ScriptAddToken(ParseTokenList *tokenlist, const char *token, int len, int type, + int line) +{ + ParseToken *t; + + if (tokenlist->count == tokenlist->size) { + + tokenlist->size *= 2; + if (tokenlist->list != tokenlist->static_list) { + tokenlist->list = + Jim_Realloc(tokenlist->list, tokenlist->size * sizeof(*tokenlist->list)); + } + else { + + tokenlist->list = Jim_Alloc(tokenlist->size * sizeof(*tokenlist->list)); + memcpy(tokenlist->list, tokenlist->static_list, + tokenlist->count * sizeof(*tokenlist->list)); + } + } + t = &tokenlist->list[tokenlist->count++]; + t->token = token; + t->len = len; + t->type = type; + t->line = line; +} + +static int JimCountWordTokens(struct ScriptObj *script, ParseToken *t) +{ + int expand = 1; + int count = 0; + + + if (t->type == JIM_TT_STR && !TOKEN_IS_SEP(t[1].type)) { + if ((t->len == 1 && *t->token == '*') || (t->len == 6 && strncmp(t->token, "expand", 6) == 0)) { + + expand = -1; + t++; + } + else { + if (script->missing == ' ') { + + script->missing = '}'; + script->linenr = t[1].line; + } + } + } + + + while (!TOKEN_IS_SEP(t->type)) { + t++; + count++; + } + + return count * expand; +} + +static Jim_Obj *JimMakeScriptObj(Jim_Interp *interp, const ParseToken *t) +{ + Jim_Obj *objPtr; + + if (t->type == JIM_TT_ESC && memchr(t->token, '\\', t->len) != NULL) { + + int len = t->len; + char *str = Jim_Alloc(len + 1); + len = JimEscape(str, t->token, len); + objPtr = Jim_NewStringObjNoAlloc(interp, str, len); + } + else { + objPtr = Jim_NewStringObj(interp, t->token, t->len); + } + return objPtr; +} + +static void ScriptObjAddTokens(Jim_Interp *interp, struct ScriptObj *script, + ParseTokenList *tokenlist) +{ + int i; + struct ScriptToken *token; + + int lineargs = 0; + + ScriptToken *linefirst; + int count; + int linenr; + +#ifdef DEBUG_SHOW_SCRIPT_TOKENS + printf("==== Tokens ====\n"); + for (i = 0; i < tokenlist->count; i++) { + printf("[%2d]@%d %s '%.*s'\n", i, tokenlist->list[i].line, jim_tt_name(tokenlist->list[i].type), + tokenlist->list[i].len, tokenlist->list[i].token); + } +#endif + + + count = tokenlist->count; + for (i = 0; i < tokenlist->count; i++) { + if (tokenlist->list[i].type == JIM_TT_EOL) { + count++; + } + } + linenr = script->firstline = tokenlist->list[0].line; + + token = script->token = Jim_Alloc(sizeof(ScriptToken) * count); + + + linefirst = token++; + + for (i = 0; i < tokenlist->count; ) { + + int wordtokens; + + + while (tokenlist->list[i].type == JIM_TT_SEP) { + i++; + } + + wordtokens = JimCountWordTokens(script, tokenlist->list + i); + + if (wordtokens == 0) { + + if (lineargs) { + linefirst->type = JIM_TT_LINE; + linefirst->objPtr = JimNewScriptLineObj(interp, lineargs, linenr); + Jim_IncrRefCount(linefirst->objPtr); + + + lineargs = 0; + linefirst = token++; + } + i++; + continue; + } + else if (wordtokens != 1) { + + token->type = JIM_TT_WORD; + token->objPtr = Jim_NewIntObj(interp, wordtokens); + Jim_IncrRefCount(token->objPtr); + token++; + if (wordtokens < 0) { + + i++; + wordtokens = -wordtokens - 1; + lineargs--; + } + } + + if (lineargs == 0) { + + linenr = tokenlist->list[i].line; + } + lineargs++; + + + while (wordtokens--) { + const ParseToken *t = &tokenlist->list[i++]; + + token->type = t->type; + token->objPtr = JimMakeScriptObj(interp, t); + Jim_IncrRefCount(token->objPtr); + + Jim_SetSourceInfo(interp, token->objPtr, script->fileNameObj, t->line); + token++; + } + } + + if (lineargs == 0) { + token--; + } + + script->len = token - script->token; + + JimPanic((script->len >= count, "allocated script array is too short")); + +#ifdef DEBUG_SHOW_SCRIPT + printf("==== Script (%s) ====\n", Jim_String(script->fileNameObj)); + for (i = 0; i < script->len; i++) { + const ScriptToken *t = &script->token[i]; + printf("[%2d] %s %s\n", i, jim_tt_name(t->type), Jim_String(t->objPtr)); + } +#endif + +} + +int Jim_ScriptIsComplete(Jim_Interp *interp, Jim_Obj *scriptObj, char *stateCharPtr) +{ + ScriptObj *script = JimGetScript(interp, scriptObj); + if (stateCharPtr) { + *stateCharPtr = script->missing; + } + return script->missing == ' ' || script->missing == '}'; +} + +static int JimParseCheckMissing(Jim_Interp *interp, int ch) +{ + const char *msg; + + switch (ch) { + case '\\': + case ' ': + return JIM_OK; + + case '[': + msg = "unmatched \"[\""; + break; + case '{': + msg = "missing close-brace"; + break; + case '}': + msg = "extra characters after close-brace"; + break; + case '"': + default: + msg = "missing quote"; + break; + } + + Jim_SetResultString(interp, msg, -1); + return JIM_ERR; +} + +Jim_Obj *Jim_GetSourceInfo(Jim_Interp *interp, Jim_Obj *objPtr, int *lineptr) +{ + int line; + Jim_Obj *fileNameObj; + + if (objPtr->typePtr == &sourceObjType) { + fileNameObj = objPtr->internalRep.sourceValue.fileNameObj; + line = objPtr->internalRep.sourceValue.lineNumber; + } + else if (objPtr->typePtr == &scriptObjType) { + ScriptObj *script = JimGetScript(interp, objPtr); + fileNameObj = script->fileNameObj; + line = script->firstline; + } + else { + fileNameObj = interp->emptyObj; + line = 1; + } + *lineptr = line; + return fileNameObj; +} + +void Jim_SetSourceInfo(Jim_Interp *interp, Jim_Obj *objPtr, + Jim_Obj *fileNameObj, int lineNumber) +{ + JimPanic((Jim_IsShared(objPtr), "Jim_SetSourceInfo called with shared object")); + Jim_FreeIntRep(interp, objPtr); + Jim_IncrRefCount(fileNameObj); + objPtr->internalRep.sourceValue.fileNameObj = fileNameObj; + objPtr->internalRep.sourceValue.lineNumber = lineNumber; + objPtr->typePtr = &sourceObjType; +} + +static void SubstObjAddTokens(Jim_Interp *interp, struct ScriptObj *script, + ParseTokenList *tokenlist) +{ + int i; + struct ScriptToken *token; + + token = script->token = Jim_Alloc(sizeof(ScriptToken) * tokenlist->count); + + for (i = 0; i < tokenlist->count; i++) { + const ParseToken *t = &tokenlist->list[i]; + + + token->type = t->type; + token->objPtr = JimMakeScriptObj(interp, t); + Jim_IncrRefCount(token->objPtr); + token++; + } + + script->len = i; +} + +static void JimSetScriptFromAny(Jim_Interp *interp, struct Jim_Obj *objPtr) +{ + int scriptTextLen; + const char *scriptText = Jim_GetString(objPtr, &scriptTextLen); + struct JimParserCtx parser; + struct ScriptObj *script; + ParseTokenList tokenlist; + Jim_Obj *fileNameObj; + int line; + + + fileNameObj = Jim_GetSourceInfo(interp, objPtr, &line); + + + ScriptTokenListInit(&tokenlist); + + JimParserInit(&parser, scriptText, scriptTextLen, line); + while (!parser.eof) { + JimParseScript(&parser); + ScriptAddToken(&tokenlist, parser.tstart, parser.tend - parser.tstart + 1, parser.tt, + parser.tline); + } + + + ScriptAddToken(&tokenlist, scriptText + scriptTextLen, 0, JIM_TT_EOF, 0); + + + script = Jim_Alloc(sizeof(*script)); + memset(script, 0, sizeof(*script)); + script->inUse = 1; + script->fileNameObj = fileNameObj; + Jim_IncrRefCount(script->fileNameObj); + script->missing = parser.missing.ch; + script->linenr = parser.missing.line; + + ScriptObjAddTokens(interp, script, &tokenlist); + + + ScriptTokenListFree(&tokenlist); + + + Jim_FreeIntRep(interp, objPtr); + Jim_SetIntRepPtr(objPtr, script); + objPtr->typePtr = &scriptObjType; +} + +static ScriptObj *JimGetScript(Jim_Interp *interp, Jim_Obj *objPtr) +{ + if (objPtr == interp->emptyObj) { + + objPtr = interp->nullScriptObj; + } + + if (objPtr->typePtr != &scriptObjType || ((struct ScriptObj *)Jim_GetIntRepPtr(objPtr))->substFlags) { + JimSetScriptFromAny(interp, objPtr); + } + + return (ScriptObj *)Jim_GetIntRepPtr(objPtr); +} + +void Jim_InterpIncrProcEpoch(Jim_Interp *interp) +{ + interp->procEpoch++; + + + while (interp->oldCmdCache) { + Jim_Cmd *next = interp->oldCmdCache->prevCmd; + Jim_Free(interp->oldCmdCache); + interp->oldCmdCache = next; + } + interp->oldCmdCacheSize = 0; +} + +static void JimIncrCmdRefCount(Jim_Cmd *cmdPtr) +{ + cmdPtr->inUse++; +} + +static void JimDecrCmdRefCount(Jim_Interp *interp, Jim_Cmd *cmdPtr) +{ + if (--cmdPtr->inUse == 0) { + if (cmdPtr->isproc) { + Jim_DecrRefCount(interp, cmdPtr->u.proc.argListObjPtr); + Jim_DecrRefCount(interp, cmdPtr->u.proc.bodyObjPtr); + Jim_DecrRefCount(interp, cmdPtr->u.proc.nsObj); + if (cmdPtr->u.proc.staticVars) { + Jim_FreeHashTable(cmdPtr->u.proc.staticVars); + Jim_Free(cmdPtr->u.proc.staticVars); + } + } + else { + + if (cmdPtr->u.native.delProc) { + cmdPtr->u.native.delProc(interp, cmdPtr->u.native.privData); + } + } + if (cmdPtr->prevCmd) { + + JimDecrCmdRefCount(interp, cmdPtr->prevCmd); + } + + cmdPtr->prevCmd = interp->oldCmdCache; + interp->oldCmdCache = cmdPtr; + if (!interp->quitting && ++interp->oldCmdCacheSize >= 1000) { + Jim_InterpIncrProcEpoch(interp); + } + } +} + +static void JimIncrVarRef(Jim_VarVal *vv) +{ + vv->refCount++; +} + +static void JimDecrVarRef(Jim_Interp *interp, Jim_VarVal *vv) +{ + assert(vv->refCount > 0); + if (--vv->refCount == 0) { + if (vv->objPtr) { + Jim_DecrRefCount(interp, vv->objPtr); + } + Jim_Free(vv); + } +} + +static void JimVariablesHTValDestructor(void *interp, void *val) +{ + JimDecrVarRef(interp, val); +} + +static unsigned int JimObjectHTHashFunction(const void *key) +{ + Jim_Obj *keyObj = (Jim_Obj *)key; + int length; + const char *string; + +#ifdef JIM_OPTIMIZATION + if (JimIsWide(keyObj) && keyObj->bytes == NULL) { + + jim_wide objValue = JimWideValue(keyObj); + if (objValue > INT_MIN && objValue < INT_MAX) { + unsigned result = 0; + unsigned value = (unsigned)objValue; + + if (objValue < 0) { + value = (unsigned)-objValue; + } + + + do { + result += (result << 3) + (value % 10 + '0'); + value /= 10; + } while (value); + + if (objValue < 0) { + result += (result << 3) + '-'; + } + return result; + } + } +#endif + string = Jim_GetString(keyObj, &length); + return Jim_GenHashFunction((const unsigned char *)string, length); +} + +static int JimObjectHTKeyCompare(void *privdata, const void *key1, const void *key2) +{ + return Jim_StringEqObj((Jim_Obj *)key1, (Jim_Obj *)key2); +} + +static void *JimObjectHTKeyValDup(void *privdata, const void *val) +{ + Jim_IncrRefCount((Jim_Obj *)val); + return (void *)val; +} + +static void JimObjectHTKeyValDestructor(void *interp, void *val) +{ + Jim_DecrRefCount(interp, (Jim_Obj *)val); +} + + +static void *JimVariablesHTValDup(void *privdata, const void *val) +{ + JimIncrVarRef((Jim_VarVal *)val); + return (void *)val; +} + +static const Jim_HashTableType JimVariablesHashTableType = { + JimObjectHTHashFunction, + JimObjectHTKeyValDup, + JimVariablesHTValDup, + JimObjectHTKeyCompare, + JimObjectHTKeyValDestructor, + JimVariablesHTValDestructor +}; + + +static const char *Jim_GetStringNoQualifier(Jim_Obj *objPtr, int *length) +{ + int len; + const char *str = Jim_GetString(objPtr, &len); + if (len >= 2 && str[0] == ':' && str[1] == ':') { + while (len && *str == ':') { + len--; + str++; + } + } + *length = len; + return str; +} + +static unsigned int JimCommandsHT_HashFunction(const void *key) +{ + int len; + const char *str = Jim_GetStringNoQualifier((Jim_Obj *)key, &len); + return Jim_GenHashFunction((const unsigned char *)str, len); +} + +static int JimCommandsHT_KeyCompare(void *privdata, const void *key1, const void *key2) +{ + int len1, len2; + const char *str1 = Jim_GetStringNoQualifier((Jim_Obj *)key1, &len1); + const char *str2 = Jim_GetStringNoQualifier((Jim_Obj *)key2, &len2); + return len1 == len2 && memcmp(str1, str2, len1) == 0; +} + +static void JimCommandsHT_ValDestructor(void *interp, void *val) +{ + JimDecrCmdRefCount(interp, val); +} + +static const Jim_HashTableType JimCommandsHashTableType = { + JimCommandsHT_HashFunction, + JimObjectHTKeyValDup, + NULL, + JimCommandsHT_KeyCompare, + JimObjectHTKeyValDestructor, + JimCommandsHT_ValDestructor +}; + + + +Jim_Obj *Jim_MakeGlobalNamespaceName(Jim_Interp *interp, Jim_Obj *nameObjPtr) +{ +#ifdef jim_ext_namespace + Jim_Obj *resultObj; + + const char *name = Jim_String(nameObjPtr); + if (name[0] == ':' && name[1] == ':') { + return nameObjPtr; + } + Jim_IncrRefCount(nameObjPtr); + resultObj = Jim_NewStringObj(interp, "::", -1); + Jim_AppendObj(interp, resultObj, nameObjPtr); + Jim_DecrRefCount(interp, nameObjPtr); + + return resultObj; +#else + return nameObjPtr; +#endif +} + +static Jim_Obj *JimQualifyName(Jim_Interp *interp, Jim_Obj *objPtr) +{ +#ifdef jim_ext_namespace + if (Jim_Length(interp->framePtr->nsObj)) { + int len; + const char *name = Jim_GetString(objPtr, &len); + if (len < 2 || name[0] != ':' || name[1] != ':') { + + objPtr = Jim_DuplicateObj(interp, interp->framePtr->nsObj); + Jim_AppendStrings(interp, objPtr, "::", name, NULL); + } + } +#endif + Jim_IncrRefCount(objPtr); + return objPtr; +} + +static void JimCreateCommand(Jim_Interp *interp, Jim_Obj *nameObjPtr, Jim_Cmd *cmd) +{ + JimPanic((nameObjPtr->refCount == 0, "JimCreateCommand called with zero ref count name")); + + if (interp->local) { + Jim_HashEntry *he = Jim_FindHashEntry(&interp->commands, nameObjPtr); + if (he) { + + cmd->prevCmd = Jim_GetHashEntryVal(he); + Jim_SetHashVal(&interp->commands, he, cmd); + + Jim_InterpIncrProcEpoch(interp); + return; + } + } + + + + Jim_ReplaceHashEntry(&interp->commands, nameObjPtr, cmd); +} + +int Jim_CreateCommandObj(Jim_Interp *interp, Jim_Obj *cmdNameObj, + Jim_CmdProc *cmdProc, void *privData, Jim_DelCmdProc *delProc) +{ + Jim_Cmd *cmdPtr = Jim_Alloc(sizeof(*cmdPtr)); + + + memset(cmdPtr, 0, sizeof(*cmdPtr)); + cmdPtr->inUse = 1; + cmdPtr->u.native.delProc = delProc; + cmdPtr->u.native.cmdProc = cmdProc; + cmdPtr->u.native.privData = privData; + + Jim_IncrRefCount(cmdNameObj); + JimCreateCommand(interp, cmdNameObj, cmdPtr); + Jim_DecrRefCount(interp, cmdNameObj); + + return JIM_OK; +} + + +int Jim_CreateCommand(Jim_Interp *interp, const char *cmdNameStr, + Jim_CmdProc *cmdProc, void *privData, Jim_DelCmdProc *delProc) +{ + return Jim_CreateCommandObj(interp, Jim_NewStringObj(interp, cmdNameStr, -1), cmdProc, privData, delProc); +} + +static int JimCreateProcedureStatics(Jim_Interp *interp, Jim_Cmd *cmdPtr, Jim_Obj *staticsListObjPtr) +{ + int len, i; + + len = Jim_ListLength(interp, staticsListObjPtr); + if (len == 0) { + return JIM_OK; + } + + cmdPtr->u.proc.staticVars = Jim_Alloc(sizeof(Jim_HashTable)); + Jim_InitHashTable(cmdPtr->u.proc.staticVars, &JimVariablesHashTableType, interp); + for (i = 0; i < len; i++) { + Jim_Obj *initObjPtr = NULL; + Jim_Obj *nameObjPtr; + Jim_VarVal *vv = NULL; + Jim_Obj *objPtr = Jim_ListGetIndex(interp, staticsListObjPtr, i); + int subLen = Jim_ListLength(interp, objPtr); + int byref = 0; + + + if (subLen != 1 && subLen != 2) { + Jim_SetResultFormatted(interp, "too many fields in static specifier \"%#s\"", + objPtr); + return JIM_ERR; + } + + nameObjPtr = Jim_ListGetIndex(interp, objPtr, 0); + + + if (subLen == 1) { + int len; + const char *pt = Jim_GetString(nameObjPtr, &len); + if (*pt == '&') { + + nameObjPtr = Jim_NewStringObj(interp, pt + 1, len - 1); + byref = 1; + } + } + Jim_IncrRefCount(nameObjPtr); + + if (subLen == 1) { + switch (SetVariableFromAny(interp, nameObjPtr)) { + case JIM_DICT_SUGAR: + + if (byref) { + Jim_SetResultFormatted(interp, "Can't link to array element \"%#s\"", nameObjPtr); + } + else { + Jim_SetResultFormatted(interp, "Can't initialise array element \"%#s\"", nameObjPtr); + } + Jim_DecrRefCount(interp, nameObjPtr); + return JIM_ERR; + + case JIM_OK: + if (byref) { + vv = nameObjPtr->internalRep.varValue.vv; + } + else { + initObjPtr = Jim_GetVariable(interp, nameObjPtr, JIM_NONE); + } + break; + + case JIM_ERR: + + Jim_SetResultFormatted(interp, + "variable for initialization of static \"%#s\" not found in the local context", + nameObjPtr); + Jim_DecrRefCount(interp, nameObjPtr); + return JIM_ERR; + } + } + else { + initObjPtr = Jim_ListGetIndex(interp, objPtr, 1); + } + + if (vv == NULL) { + vv = Jim_Alloc(sizeof(*vv)); + vv->objPtr = initObjPtr; + Jim_IncrRefCount(vv->objPtr); + vv->linkFramePtr = NULL; + vv->refCount = 0; + } + + if (JimSetNewVariable(cmdPtr->u.proc.staticVars, nameObjPtr, vv) != JIM_OK) { + Jim_SetResultFormatted(interp, + "static variable name \"%#s\" duplicated in statics list", nameObjPtr); + JimIncrVarRef(vv); + JimDecrVarRef(interp, vv); + Jim_DecrRefCount(interp, nameObjPtr); + return JIM_ERR; + } + + Jim_DecrRefCount(interp, nameObjPtr); + } + return JIM_OK; +} + + +#ifdef jim_ext_namespace +static const char *Jim_memrchr(const char *p, int c, int len) +{ + int i; + for (i = len; i > 0; i--) { + if (p[i] == c) { + return p + i; + } + } + return NULL; +} +#endif + +static void JimUpdateProcNamespace(Jim_Interp *interp, Jim_Cmd *cmdPtr, Jim_Obj *nameObjPtr) +{ +#ifdef jim_ext_namespace + if (cmdPtr->isproc) { + int len; + const char *cmdname = Jim_GetStringNoQualifier(nameObjPtr, &len); + + const char *pt = Jim_memrchr(cmdname, ':', len); + if (pt && pt != cmdname && pt[-1] == ':') { + pt++; + Jim_DecrRefCount(interp, cmdPtr->u.proc.nsObj); + cmdPtr->u.proc.nsObj = Jim_NewStringObj(interp, cmdname, pt - cmdname - 2); + Jim_IncrRefCount(cmdPtr->u.proc.nsObj); + + Jim_Obj *tempObj = Jim_NewStringObj(interp, pt, len - (pt - cmdname)); + if (Jim_FindHashEntry(&interp->commands, tempObj)) { + + Jim_InterpIncrProcEpoch(interp); + } + Jim_FreeNewObj(interp, tempObj); + } + } +#endif +} + +static Jim_Cmd *JimCreateProcedureCmd(Jim_Interp *interp, Jim_Obj *argListObjPtr, + Jim_Obj *staticsListObjPtr, Jim_Obj *bodyObjPtr, Jim_Obj *nsObj) +{ + Jim_Cmd *cmdPtr; + int argListLen; + int i; + + argListLen = Jim_ListLength(interp, argListObjPtr); + + + cmdPtr = Jim_Alloc(sizeof(*cmdPtr) + sizeof(struct Jim_ProcArg) * argListLen); + assert(cmdPtr); + memset(cmdPtr, 0, sizeof(*cmdPtr)); + cmdPtr->inUse = 1; + cmdPtr->isproc = 1; + cmdPtr->u.proc.argListObjPtr = argListObjPtr; + cmdPtr->u.proc.argListLen = argListLen; + cmdPtr->u.proc.bodyObjPtr = bodyObjPtr; + cmdPtr->u.proc.argsPos = -1; + cmdPtr->u.proc.arglist = (struct Jim_ProcArg *)(cmdPtr + 1); + cmdPtr->u.proc.nsObj = nsObj ? nsObj : interp->emptyObj; + Jim_IncrRefCount(argListObjPtr); + Jim_IncrRefCount(bodyObjPtr); + Jim_IncrRefCount(cmdPtr->u.proc.nsObj); + + + if (staticsListObjPtr && JimCreateProcedureStatics(interp, cmdPtr, staticsListObjPtr) != JIM_OK) { + goto err; + } + + + + for (i = 0; i < argListLen; i++) { + Jim_Obj *argPtr; + Jim_Obj *nameObjPtr; + Jim_Obj *defaultObjPtr; + int len; + + + argPtr = Jim_ListGetIndex(interp, argListObjPtr, i); + len = Jim_ListLength(interp, argPtr); + if (len == 0) { + Jim_SetResultString(interp, "argument with no name", -1); +err: + JimDecrCmdRefCount(interp, cmdPtr); + return NULL; + } + if (len > 2) { + Jim_SetResultFormatted(interp, "too many fields in argument specifier \"%#s\"", argPtr); + goto err; + } + + if (len == 2) { + + nameObjPtr = Jim_ListGetIndex(interp, argPtr, 0); + defaultObjPtr = Jim_ListGetIndex(interp, argPtr, 1); + } + else { + + nameObjPtr = argPtr; + defaultObjPtr = NULL; + } + + + if (Jim_CompareStringImmediate(interp, nameObjPtr, "args")) { + if (cmdPtr->u.proc.argsPos >= 0) { + Jim_SetResultString(interp, "'args' specified more than once", -1); + goto err; + } + cmdPtr->u.proc.argsPos = i; + } + else { + if (len == 2) { + cmdPtr->u.proc.optArity++; + } + else { + cmdPtr->u.proc.reqArity++; + } + } + + cmdPtr->u.proc.arglist[i].nameObjPtr = nameObjPtr; + cmdPtr->u.proc.arglist[i].defaultObjPtr = defaultObjPtr; + } + + return cmdPtr; +} + +int Jim_DeleteCommand(Jim_Interp *interp, Jim_Obj *nameObj) +{ + int ret = JIM_OK; + + nameObj = JimQualifyName(interp, nameObj); + + if (Jim_DeleteHashEntry(&interp->commands, nameObj) == JIM_ERR) { + Jim_SetResultFormatted(interp, "can't delete \"%#s\": command doesn't exist", nameObj); + ret = JIM_ERR; + } + Jim_DecrRefCount(interp, nameObj); + + return ret; +} + +int Jim_RenameCommand(Jim_Interp *interp, Jim_Obj *oldNameObj, Jim_Obj *newNameObj) +{ + int ret = JIM_ERR; + Jim_HashEntry *he; + Jim_Cmd *cmdPtr; + + if (Jim_Length(newNameObj) == 0) { + return Jim_DeleteCommand(interp, oldNameObj); + } + + + + oldNameObj = JimQualifyName(interp, oldNameObj); + newNameObj = JimQualifyName(interp, newNameObj); + + + he = Jim_FindHashEntry(&interp->commands, oldNameObj); + if (he == NULL) { + Jim_SetResultFormatted(interp, "can't rename \"%#s\": command doesn't exist", oldNameObj); + } + else if (Jim_FindHashEntry(&interp->commands, newNameObj)) { + Jim_SetResultFormatted(interp, "can't rename to \"%#s\": command already exists", newNameObj); + } + else { + cmdPtr = Jim_GetHashEntryVal(he); + if (cmdPtr->prevCmd) { + Jim_SetResultFormatted(interp, "can't rename local command \"%#s\"", oldNameObj); + } + else { + + JimIncrCmdRefCount(cmdPtr); + JimUpdateProcNamespace(interp, cmdPtr, newNameObj); + Jim_AddHashEntry(&interp->commands, newNameObj, cmdPtr); + + + Jim_DeleteHashEntry(&interp->commands, oldNameObj); + + + Jim_InterpIncrProcEpoch(interp); + + ret = JIM_OK; + } + } + + Jim_DecrRefCount(interp, oldNameObj); + Jim_DecrRefCount(interp, newNameObj); + + return ret; +} + + +static void FreeCommandInternalRep(Jim_Interp *interp, Jim_Obj *objPtr) +{ + Jim_DecrRefCount(interp, objPtr->internalRep.cmdValue.nsObj); +} + +static void DupCommandInternalRep(Jim_Interp *interp, Jim_Obj *srcPtr, Jim_Obj *dupPtr) +{ + dupPtr->internalRep.cmdValue = srcPtr->internalRep.cmdValue; + dupPtr->typePtr = srcPtr->typePtr; + Jim_IncrRefCount(dupPtr->internalRep.cmdValue.nsObj); +} + +static const Jim_ObjType commandObjType = { + "command", + FreeCommandInternalRep, + DupCommandInternalRep, + NULL, + JIM_TYPE_REFERENCES, +}; + +Jim_Cmd *Jim_GetCommand(Jim_Interp *interp, Jim_Obj *objPtr, int flags) +{ + Jim_Cmd *cmd; + + if (objPtr->typePtr == &commandObjType + && objPtr->internalRep.cmdValue.procEpoch == interp->procEpoch +#ifdef jim_ext_namespace + && Jim_StringEqObj(objPtr->internalRep.cmdValue.nsObj, interp->framePtr->nsObj) +#endif + && objPtr->internalRep.cmdValue.cmdPtr->inUse) { + + cmd = objPtr->internalRep.cmdValue.cmdPtr; + } + else { + Jim_Obj *qualifiedNameObj = JimQualifyName(interp, objPtr); + Jim_HashEntry *he = Jim_FindHashEntry(&interp->commands, qualifiedNameObj); +#ifdef jim_ext_namespace + if (he == NULL && Jim_Length(interp->framePtr->nsObj)) { + he = Jim_FindHashEntry(&interp->commands, objPtr); + } +#endif + if (he == NULL) { + if (flags & JIM_ERRMSG) { + Jim_SetResultFormatted(interp, "invalid command name \"%#s\"", objPtr); + } + Jim_DecrRefCount(interp, qualifiedNameObj); + return NULL; + } + cmd = Jim_GetHashEntryVal(he); + + cmd->cmdNameObj = Jim_GetHashEntryKey(he); + + + Jim_FreeIntRep(interp, objPtr); + objPtr->typePtr = &commandObjType; + objPtr->internalRep.cmdValue.procEpoch = interp->procEpoch; + objPtr->internalRep.cmdValue.cmdPtr = cmd; + objPtr->internalRep.cmdValue.nsObj = interp->framePtr->nsObj; + Jim_IncrRefCount(interp->framePtr->nsObj); + Jim_DecrRefCount(interp, qualifiedNameObj); + } + while (cmd->u.proc.upcall) { + cmd = cmd->prevCmd; + } + return cmd; +} + + + +static const Jim_ObjType variableObjType = { + "variable", + NULL, + NULL, + NULL, + JIM_TYPE_REFERENCES, +}; + +static int SetVariableFromAny(Jim_Interp *interp, struct Jim_Obj *objPtr) +{ + const char *varName; + Jim_CallFrame *framePtr; + int global; + int len; + Jim_VarVal *vv; + + + if (objPtr->typePtr == &variableObjType) { + framePtr = objPtr->internalRep.varValue.global ? interp->topFramePtr : interp->framePtr; + if (objPtr->internalRep.varValue.callFrameId == framePtr->id) { + + return JIM_OK; + } + + } + else if (objPtr->typePtr == &dictSubstObjType) { + return JIM_DICT_SUGAR; + } + + varName = Jim_GetString(objPtr, &len); + + + if (len && varName[len - 1] == ')' && strchr(varName, '(') != NULL) { + return JIM_DICT_SUGAR; + } + + if (varName[0] == ':' && varName[1] == ':') { + while (*varName == ':') { + varName++; + len--; + } + global = 1; + framePtr = interp->topFramePtr; + + Jim_Obj *tempObj = Jim_NewStringObj(interp, varName, len); + vv = JimFindVariable(&framePtr->vars, tempObj); + Jim_FreeNewObj(interp, tempObj); + } + else { + global = 0; + framePtr = interp->framePtr; + + vv = JimFindVariable(&framePtr->vars, objPtr); + if (vv == NULL && framePtr->staticVars) { + + vv = JimFindVariable(framePtr->staticVars, objPtr); + } + } + + if (vv == NULL) { + return JIM_ERR; + } + + + Jim_FreeIntRep(interp, objPtr); + objPtr->typePtr = &variableObjType; + objPtr->internalRep.varValue.callFrameId = framePtr->id; + objPtr->internalRep.varValue.vv = vv; + objPtr->internalRep.varValue.global = global; + return JIM_OK; +} + + +static int JimDictSugarSet(Jim_Interp *interp, Jim_Obj *ObjPtr, Jim_Obj *valObjPtr); +static Jim_Obj *JimDictSugarGet(Jim_Interp *interp, Jim_Obj *ObjPtr, int flags); + +static int JimSetNewVariable(Jim_HashTable *ht, Jim_Obj *nameObjPtr, Jim_VarVal *vv) +{ + return Jim_AddHashEntry(ht, nameObjPtr, vv); +} + +static Jim_VarVal *JimFindVariable(Jim_HashTable *ht, Jim_Obj *nameObjPtr) +{ + Jim_HashEntry *he = Jim_FindHashEntry(ht, nameObjPtr); + if (he) { + return (Jim_VarVal *)Jim_GetHashEntryVal(he); + } + return NULL; +} + +static int JimUnsetVariable(Jim_HashTable *ht, Jim_Obj *nameObjPtr) +{ + return Jim_DeleteHashEntry(ht, nameObjPtr); +} + +static Jim_VarVal *JimCreateVariable(Jim_Interp *interp, Jim_Obj *nameObjPtr, Jim_Obj *valObjPtr) +{ + const char *name; + Jim_CallFrame *framePtr; + int global; + int len; + + + Jim_VarVal *vv = Jim_Alloc(sizeof(*vv)); + + vv->objPtr = valObjPtr; + Jim_IncrRefCount(valObjPtr); + vv->linkFramePtr = NULL; + vv->refCount = 0; + + name = Jim_GetString(nameObjPtr, &len); + if (name[0] == ':' && name[1] == ':') { + while (*name == ':') { + name++; + len--; + } + framePtr = interp->topFramePtr; + global = 1; + JimSetNewVariable(&framePtr->vars, Jim_NewStringObj(interp, name, len), vv); + } + else { + framePtr = interp->framePtr; + global = 0; + JimSetNewVariable(&framePtr->vars, nameObjPtr, vv); + } + + + Jim_FreeIntRep(interp, nameObjPtr); + nameObjPtr->typePtr = &variableObjType; + nameObjPtr->internalRep.varValue.callFrameId = framePtr->id; + nameObjPtr->internalRep.varValue.vv = vv; + nameObjPtr->internalRep.varValue.global = global; + + return vv; +} + +int Jim_SetVariable(Jim_Interp *interp, Jim_Obj *nameObjPtr, Jim_Obj *valObjPtr) +{ + int err; + Jim_VarVal *vv; + + switch (SetVariableFromAny(interp, nameObjPtr)) { + case JIM_DICT_SUGAR: + return JimDictSugarSet(interp, nameObjPtr, valObjPtr); + + case JIM_ERR: + JimCreateVariable(interp, nameObjPtr, valObjPtr); + break; + + case JIM_OK: + vv = nameObjPtr->internalRep.varValue.vv; + if (vv->linkFramePtr == NULL) { + Jim_IncrRefCount(valObjPtr); + Jim_DecrRefCount(interp, vv->objPtr); + vv->objPtr = valObjPtr; + } + else { + Jim_CallFrame *savedCallFrame; + + savedCallFrame = interp->framePtr; + interp->framePtr = vv->linkFramePtr; + err = Jim_SetVariable(interp, vv->objPtr, valObjPtr); + interp->framePtr = savedCallFrame; + if (err != JIM_OK) + return err; + } + } + return JIM_OK; +} + +int Jim_SetVariableStr(Jim_Interp *interp, const char *name, Jim_Obj *objPtr) +{ + Jim_Obj *nameObjPtr; + int result; + + nameObjPtr = Jim_NewStringObj(interp, name, -1); + Jim_IncrRefCount(nameObjPtr); + result = Jim_SetVariable(interp, nameObjPtr, objPtr); + Jim_DecrRefCount(interp, nameObjPtr); + return result; +} + +int Jim_SetGlobalVariableStr(Jim_Interp *interp, const char *name, Jim_Obj *objPtr) +{ + Jim_CallFrame *savedFramePtr; + int result; + + savedFramePtr = interp->framePtr; + interp->framePtr = interp->topFramePtr; + result = Jim_SetVariableStr(interp, name, objPtr); + interp->framePtr = savedFramePtr; + return result; +} + +int Jim_SetVariableStrWithStr(Jim_Interp *interp, const char *name, const char *val) +{ + Jim_Obj *valObjPtr; + int result; + + valObjPtr = Jim_NewStringObj(interp, val, -1); + Jim_IncrRefCount(valObjPtr); + result = Jim_SetVariableStr(interp, name, valObjPtr); + Jim_DecrRefCount(interp, valObjPtr); + return result; +} + +int Jim_SetVariableLink(Jim_Interp *interp, Jim_Obj *nameObjPtr, + Jim_Obj *targetNameObjPtr, Jim_CallFrame *targetCallFrame) +{ + const char *varName; + const char *targetName; + Jim_CallFrame *framePtr; + Jim_VarVal *vv; + int len; + int varnamelen; + + + switch (SetVariableFromAny(interp, nameObjPtr)) { + case JIM_DICT_SUGAR: + + Jim_SetResultFormatted(interp, "bad variable name \"%#s\": upvar won't create a scalar variable that looks like an array element", nameObjPtr); + return JIM_ERR; + + case JIM_OK: + vv = nameObjPtr->internalRep.varValue.vv; + + if (vv->linkFramePtr == NULL) { + Jim_SetResultFormatted(interp, "variable \"%#s\" already exists", nameObjPtr); + return JIM_ERR; + } + + + vv->linkFramePtr = NULL; + break; + } + + + + varName = Jim_GetString(nameObjPtr, &varnamelen); + + if (varName[0] == ':' && varName[1] == ':') { + while (*varName == ':') { + varName++; + varnamelen--; + } + + framePtr = interp->topFramePtr; + } + else { + framePtr = interp->framePtr; + } + + targetName = Jim_GetString(targetNameObjPtr, &len); + if (targetName[0] == ':' && targetName[1] == ':') { + while (*targetName == ':') { + targetName++; + len--; + } + targetNameObjPtr = Jim_NewStringObj(interp, targetName, len); + targetCallFrame = interp->topFramePtr; + } + Jim_IncrRefCount(targetNameObjPtr); + + if (framePtr->level < targetCallFrame->level) { + Jim_SetResultFormatted(interp, + "bad variable name \"%#s\": upvar won't create namespace variable that refers to procedure variable", + nameObjPtr); + Jim_DecrRefCount(interp, targetNameObjPtr); + return JIM_ERR; + } + + + if (framePtr == targetCallFrame) { + Jim_Obj *objPtr = targetNameObjPtr; + + + while (1) { + if (Jim_Length(objPtr) == varnamelen && memcmp(Jim_String(objPtr), varName, varnamelen) == 0) { + Jim_SetResultString(interp, "can't upvar from variable to itself", -1); + Jim_DecrRefCount(interp, targetNameObjPtr); + return JIM_ERR; + } + if (SetVariableFromAny(interp, objPtr) != JIM_OK) + break; + vv = objPtr->internalRep.varValue.vv; + if (vv->linkFramePtr != targetCallFrame) + break; + objPtr = vv->objPtr; + } + } + + + Jim_SetVariable(interp, nameObjPtr, targetNameObjPtr); + + nameObjPtr->internalRep.varValue.vv->linkFramePtr = targetCallFrame; + Jim_DecrRefCount(interp, targetNameObjPtr); + return JIM_OK; +} + +Jim_Obj *Jim_GetVariable(Jim_Interp *interp, Jim_Obj *nameObjPtr, int flags) +{ + if (interp->safeexpr) { + return nameObjPtr; + } + switch (SetVariableFromAny(interp, nameObjPtr)) { + case JIM_OK:{ + Jim_VarVal *vv = nameObjPtr->internalRep.varValue.vv; + + if (vv->linkFramePtr == NULL) { + return vv->objPtr; + } + else { + Jim_Obj *objPtr; + + + Jim_CallFrame *savedCallFrame = interp->framePtr; + + interp->framePtr = vv->linkFramePtr; + objPtr = Jim_GetVariable(interp, vv->objPtr, flags); + interp->framePtr = savedCallFrame; + if (objPtr) { + return objPtr; + } + + } + } + break; + + case JIM_DICT_SUGAR: + + return JimDictSugarGet(interp, nameObjPtr, flags); + } + if (flags & JIM_ERRMSG) { + Jim_SetResultFormatted(interp, "can't read \"%#s\": no such variable", nameObjPtr); + } + return NULL; +} + +Jim_Obj *Jim_GetGlobalVariable(Jim_Interp *interp, Jim_Obj *nameObjPtr, int flags) +{ + Jim_CallFrame *savedFramePtr; + Jim_Obj *objPtr; + + savedFramePtr = interp->framePtr; + interp->framePtr = interp->topFramePtr; + objPtr = Jim_GetVariable(interp, nameObjPtr, flags); + interp->framePtr = savedFramePtr; + + return objPtr; +} + +Jim_Obj *Jim_GetVariableStr(Jim_Interp *interp, const char *name, int flags) +{ + Jim_Obj *nameObjPtr, *varObjPtr; + + nameObjPtr = Jim_NewStringObj(interp, name, -1); + Jim_IncrRefCount(nameObjPtr); + varObjPtr = Jim_GetVariable(interp, nameObjPtr, flags); + Jim_DecrRefCount(interp, nameObjPtr); + return varObjPtr; +} + +Jim_Obj *Jim_GetGlobalVariableStr(Jim_Interp *interp, const char *name, int flags) +{ + Jim_CallFrame *savedFramePtr; + Jim_Obj *objPtr; + + savedFramePtr = interp->framePtr; + interp->framePtr = interp->topFramePtr; + objPtr = Jim_GetVariableStr(interp, name, flags); + interp->framePtr = savedFramePtr; + + return objPtr; +} + +int Jim_UnsetVariable(Jim_Interp *interp, Jim_Obj *nameObjPtr, int flags) +{ + Jim_VarVal *vv; + int retval; + Jim_CallFrame *framePtr; + + retval = SetVariableFromAny(interp, nameObjPtr); + if (retval == JIM_DICT_SUGAR) { + + return JimDictSugarSet(interp, nameObjPtr, NULL); + } + else if (retval == JIM_OK) { + vv = nameObjPtr->internalRep.varValue.vv; + + + if (vv->linkFramePtr) { + framePtr = interp->framePtr; + interp->framePtr = vv->linkFramePtr; + retval = Jim_UnsetVariable(interp, vv->objPtr, JIM_NONE); + interp->framePtr = framePtr; + } + else { + if (nameObjPtr->internalRep.varValue.global) { + int len; + const char *name = Jim_GetString(nameObjPtr, &len); + while (*name == ':') { + name++; + len--; + } + framePtr = interp->topFramePtr; + Jim_Obj *tempObj = Jim_NewStringObj(interp, name, len); + retval = JimUnsetVariable(&framePtr->vars, tempObj); + Jim_FreeNewObj(interp, tempObj); + } + else { + framePtr = interp->framePtr; + retval = JimUnsetVariable(&framePtr->vars, nameObjPtr); + } + + if (retval == JIM_OK) { + + framePtr->id = interp->callFrameEpoch++; + } + } + } + if (retval != JIM_OK && (flags & JIM_ERRMSG)) { + Jim_SetResultFormatted(interp, "can't unset \"%#s\": no such variable", nameObjPtr); + } + return retval; +} + + + +static void JimDictSugarParseVarKey(Jim_Interp *interp, Jim_Obj *objPtr, + Jim_Obj **varPtrPtr, Jim_Obj **keyPtrPtr) +{ + const char *str, *p; + int len, keyLen; + Jim_Obj *varObjPtr, *keyObjPtr; + + str = Jim_GetString(objPtr, &len); + + p = strchr(str, '('); + JimPanic((p == NULL, "JimDictSugarParseVarKey() called for non-dict-sugar (%s)", str)); + + varObjPtr = Jim_NewStringObj(interp, str, p - str); + + p++; + keyLen = (str + len) - p; + if (str[len - 1] == ')') { + keyLen--; + } + + + keyObjPtr = Jim_NewStringObj(interp, p, keyLen); + + Jim_IncrRefCount(varObjPtr); + Jim_IncrRefCount(keyObjPtr); + *varPtrPtr = varObjPtr; + *keyPtrPtr = keyObjPtr; +} + +static int JimDictSugarSet(Jim_Interp *interp, Jim_Obj *objPtr, Jim_Obj *valObjPtr) +{ + int err; + + SetDictSubstFromAny(interp, objPtr); + + err = Jim_SetDictKeysVector(interp, objPtr->internalRep.dictSubstValue.varNameObjPtr, + &objPtr->internalRep.dictSubstValue.indexObjPtr, 1, valObjPtr, JIM_MUSTEXIST); + + if (err == JIM_OK) { + + Jim_SetEmptyResult(interp); + } + else { + if (!valObjPtr) { + + if (Jim_GetVariable(interp, objPtr->internalRep.dictSubstValue.varNameObjPtr, JIM_NONE)) { + Jim_SetResultFormatted(interp, "can't unset \"%#s\": no such element in array", + objPtr); + return err; + } + } + + Jim_SetResultFormatted(interp, "can't %s \"%#s\": variable isn't array", + (valObjPtr ? "set" : "unset"), objPtr); + } + return err; +} + +static Jim_Obj *JimDictExpandArrayVariable(Jim_Interp *interp, Jim_Obj *varObjPtr, + Jim_Obj *keyObjPtr, int flags) +{ + Jim_Obj *dictObjPtr; + Jim_Obj *resObjPtr = NULL; + int ret; + + dictObjPtr = Jim_GetVariable(interp, varObjPtr, JIM_ERRMSG); + if (!dictObjPtr) { + return NULL; + } + + ret = Jim_DictKey(interp, dictObjPtr, keyObjPtr, &resObjPtr, JIM_NONE); + if (ret != JIM_OK) { + Jim_SetResultFormatted(interp, + "can't read \"%#s(%#s)\": %s array", varObjPtr, keyObjPtr, + ret < 0 ? "variable isn't" : "no such element in"); + } + else if ((flags & JIM_UNSHARED) && Jim_IsShared(dictObjPtr)) { + + Jim_SetVariable(interp, varObjPtr, Jim_DuplicateObj(interp, dictObjPtr)); + } + + return resObjPtr; +} + + +static Jim_Obj *JimDictSugarGet(Jim_Interp *interp, Jim_Obj *objPtr, int flags) +{ + SetDictSubstFromAny(interp, objPtr); + + return JimDictExpandArrayVariable(interp, + objPtr->internalRep.dictSubstValue.varNameObjPtr, + objPtr->internalRep.dictSubstValue.indexObjPtr, flags); +} + + + +void FreeDictSubstInternalRep(Jim_Interp *interp, Jim_Obj *objPtr) +{ + Jim_DecrRefCount(interp, objPtr->internalRep.dictSubstValue.varNameObjPtr); + Jim_DecrRefCount(interp, objPtr->internalRep.dictSubstValue.indexObjPtr); +} + +static void DupDictSubstInternalRep(Jim_Interp *interp, Jim_Obj *srcPtr, Jim_Obj *dupPtr) +{ + + dupPtr->internalRep = srcPtr->internalRep; + + Jim_IncrRefCount(dupPtr->internalRep.dictSubstValue.varNameObjPtr); + Jim_IncrRefCount(dupPtr->internalRep.dictSubstValue.indexObjPtr); +} + + +static void SetDictSubstFromAny(Jim_Interp *interp, Jim_Obj *objPtr) +{ + if (objPtr->typePtr != &dictSubstObjType) { + Jim_Obj *varObjPtr, *keyObjPtr; + + if (objPtr->typePtr == &interpolatedObjType) { + + + varObjPtr = objPtr->internalRep.dictSubstValue.varNameObjPtr; + keyObjPtr = objPtr->internalRep.dictSubstValue.indexObjPtr; + + Jim_IncrRefCount(varObjPtr); + Jim_IncrRefCount(keyObjPtr); + } + else { + JimDictSugarParseVarKey(interp, objPtr, &varObjPtr, &keyObjPtr); + } + + Jim_FreeIntRep(interp, objPtr); + objPtr->typePtr = &dictSubstObjType; + objPtr->internalRep.dictSubstValue.varNameObjPtr = varObjPtr; + objPtr->internalRep.dictSubstValue.indexObjPtr = keyObjPtr; + } +} + +static Jim_Obj *JimExpandDictSugar(Jim_Interp *interp, Jim_Obj *objPtr) +{ + Jim_Obj *resObjPtr = NULL; + Jim_Obj *substKeyObjPtr = NULL; + + if (interp->safeexpr) { + return objPtr; + } + + SetDictSubstFromAny(interp, objPtr); + + if (Jim_SubstObj(interp, objPtr->internalRep.dictSubstValue.indexObjPtr, + &substKeyObjPtr, JIM_NONE) + != JIM_OK) { + return NULL; + } + Jim_IncrRefCount(substKeyObjPtr); + resObjPtr = + JimDictExpandArrayVariable(interp, objPtr->internalRep.dictSubstValue.varNameObjPtr, + substKeyObjPtr, 0); + Jim_DecrRefCount(interp, substKeyObjPtr); + + return resObjPtr; +} + + +static Jim_CallFrame *JimCreateCallFrame(Jim_Interp *interp, Jim_CallFrame *parent, Jim_Obj *nsObj) +{ + Jim_CallFrame *cf; + + if (interp->freeFramesList) { + cf = interp->freeFramesList; + interp->freeFramesList = cf->next; + + cf->argv = NULL; + cf->argc = 0; + cf->procArgsObjPtr = NULL; + cf->procBodyObjPtr = NULL; + cf->next = NULL; + cf->staticVars = NULL; + cf->localCommands = NULL; + cf->tailcallObj = NULL; + cf->tailcallCmd = NULL; + } + else { + cf = Jim_Alloc(sizeof(*cf)); + memset(cf, 0, sizeof(*cf)); + + Jim_InitHashTable(&cf->vars, &JimVariablesHashTableType, interp); + } + + cf->id = interp->callFrameEpoch++; + cf->parent = parent; + cf->level = parent ? parent->level + 1 : 0; + cf->nsObj = nsObj; + Jim_IncrRefCount(nsObj); + + return cf; +} + +static int JimDeleteLocalProcs(Jim_Interp *interp, Jim_Stack *localCommands) +{ + + if (localCommands) { + Jim_Obj *cmdNameObj; + + while ((cmdNameObj = Jim_StackPop(localCommands)) != NULL) { + Jim_HashTable *ht = &interp->commands; + Jim_HashEntry *he = Jim_FindHashEntry(ht, cmdNameObj); + if (he) { + Jim_Cmd *cmd = Jim_GetHashEntryVal(he); + if (cmd->prevCmd) { + Jim_Cmd *prevCmd = cmd->prevCmd; + cmd->prevCmd = NULL; + + + JimDecrCmdRefCount(interp, cmd); + + + Jim_SetHashVal(ht, he, prevCmd); + } + else { + Jim_DeleteHashEntry(ht, cmdNameObj); + } + } + Jim_DecrRefCount(interp, cmdNameObj); + } + Jim_FreeStack(localCommands); + Jim_Free(localCommands); + } + return JIM_OK; +} + +static int JimInvokeDefer(Jim_Interp *interp, int retcode) +{ + Jim_Obj *objPtr; + + + if (JimFindVariable(&interp->framePtr->vars, interp->defer) == NULL) { + return retcode; + } + objPtr = Jim_GetVariable(interp, interp->defer, JIM_NONE); + + if (objPtr) { + int ret = JIM_OK; + int i; + int listLen = Jim_ListLength(interp, objPtr); + Jim_Obj *resultObjPtr; + + Jim_IncrRefCount(objPtr); + + resultObjPtr = Jim_GetResult(interp); + Jim_IncrRefCount(resultObjPtr); + Jim_SetEmptyResult(interp); + + + for (i = listLen; i > 0; i--) { + + Jim_Obj *scriptObjPtr = Jim_ListGetIndex(interp, objPtr, i - 1); + ret = Jim_EvalObj(interp, scriptObjPtr); + if (ret != JIM_OK) { + break; + } + } + + if (ret == JIM_OK || retcode == JIM_ERR) { + + Jim_SetResult(interp, resultObjPtr); + } + else { + retcode = ret; + } + + Jim_DecrRefCount(interp, resultObjPtr); + Jim_DecrRefCount(interp, objPtr); + } + return retcode; +} + +#define JIM_FCF_FULL 0 +#define JIM_FCF_REUSE 1 +static void JimFreeCallFrame(Jim_Interp *interp, Jim_CallFrame *cf, int action) + { + JimDeleteLocalProcs(interp, cf->localCommands); + + if (cf->procArgsObjPtr) + Jim_DecrRefCount(interp, cf->procArgsObjPtr); + if (cf->procBodyObjPtr) + Jim_DecrRefCount(interp, cf->procBodyObjPtr); + Jim_DecrRefCount(interp, cf->nsObj); + if (action == JIM_FCF_FULL || cf->vars.size != JIM_HT_INITIAL_SIZE) + Jim_FreeHashTable(&cf->vars); + else { + Jim_ClearHashTable(&cf->vars); + } + cf->next = interp->freeFramesList; + interp->freeFramesList = cf; +} + + + +int Jim_IsBigEndian(void) +{ + union { + unsigned short s; + unsigned char c[2]; + } uval = {0x0102}; + + return uval.c[0] == 1; +} + + +Jim_Interp *Jim_CreateInterp(void) +{ + Jim_Interp *i = Jim_Alloc(sizeof(*i)); + + memset(i, 0, sizeof(*i)); + + i->maxCallFrameDepth = JIM_MAX_CALLFRAME_DEPTH; + i->maxEvalDepth = JIM_MAX_EVAL_DEPTH; + i->lastCollectTime = Jim_GetTimeUsec(CLOCK_MONOTONIC_RAW); + + Jim_InitHashTable(&i->commands, &JimCommandsHashTableType, i); +#ifdef JIM_REFERENCES + Jim_InitHashTable(&i->references, &JimReferencesHashTableType, i); +#endif + Jim_InitHashTable(&i->assocData, &JimAssocDataHashTableType, i); + Jim_InitHashTable(&i->packages, &JimPackageHashTableType, NULL); + i->emptyObj = Jim_NewEmptyStringObj(i); + i->trueObj = Jim_NewIntObj(i, 1); + i->falseObj = Jim_NewIntObj(i, 0); + i->framePtr = i->topFramePtr = JimCreateCallFrame(i, NULL, i->emptyObj); + i->result = i->emptyObj; + i->stackTrace = Jim_NewListObj(i, NULL, 0); + i->unknown = Jim_NewStringObj(i, "unknown", -1); + i->defer = Jim_NewStringObj(i, "jim::defer", -1); + i->errorProc = i->emptyObj; + i->nullScriptObj = Jim_NewEmptyStringObj(i); + i->evalFrame = &i->topEvalFrame; + i->currentFilenameObj = Jim_NewEmptyStringObj(i); + Jim_IncrRefCount(i->emptyObj); + Jim_IncrRefCount(i->result); + Jim_IncrRefCount(i->stackTrace); + Jim_IncrRefCount(i->unknown); + Jim_IncrRefCount(i->defer); + Jim_IncrRefCount(i->nullScriptObj); + Jim_IncrRefCount(i->errorProc); + Jim_IncrRefCount(i->trueObj); + Jim_IncrRefCount(i->falseObj); + Jim_IncrRefCount(i->currentFilenameObj); + + + Jim_SetVariableStrWithStr(i, JIM_LIBPATH, TCL_LIBRARY); + Jim_SetVariableStrWithStr(i, JIM_INTERACTIVE, "0"); + + Jim_SetVariableStrWithStr(i, "tcl_platform(engine)", "Jim"); + Jim_SetVariableStrWithStr(i, "tcl_platform(os)", TCL_PLATFORM_OS); + Jim_SetVariableStrWithStr(i, "tcl_platform(platform)", TCL_PLATFORM_PLATFORM); + Jim_SetVariableStrWithStr(i, "tcl_platform(pathSeparator)", TCL_PLATFORM_PATH_SEPARATOR); + Jim_SetVariableStrWithStr(i, "tcl_platform(byteOrder)", Jim_IsBigEndian() ? "bigEndian" : "littleEndian"); + Jim_SetVariableStrWithStr(i, "tcl_platform(threaded)", "0"); + Jim_SetVariableStrWithStr(i, "tcl_platform(bootstrap)", "0"); + Jim_SetVariableStr(i, "tcl_platform(pointerSize)", Jim_NewIntObj(i, sizeof(void *))); + Jim_SetVariableStr(i, "tcl_platform(wordSize)", Jim_NewIntObj(i, sizeof(jim_wide))); + Jim_SetVariableStr(i, "tcl_platform(stackFormat)", Jim_NewIntObj(i, 4)); + + return i; +} + +void Jim_FreeInterp(Jim_Interp *i) +{ + Jim_CallFrame *cf, *cfx; + + Jim_Obj *objPtr, *nextObjPtr; + + i->quitting = 1; + + + for (cf = i->framePtr; cf; cf = cfx) { + + JimInvokeDefer(i, JIM_OK); + cfx = cf->parent; + JimFreeCallFrame(i, cf, JIM_FCF_FULL); + } + + + Jim_FreeHashTable(&i->commands); + + Jim_DecrRefCount(i, i->emptyObj); + Jim_DecrRefCount(i, i->trueObj); + Jim_DecrRefCount(i, i->falseObj); + Jim_DecrRefCount(i, i->result); + Jim_DecrRefCount(i, i->stackTrace); + Jim_DecrRefCount(i, i->errorProc); + Jim_DecrRefCount(i, i->unknown); + Jim_DecrRefCount(i, i->defer); + Jim_DecrRefCount(i, i->nullScriptObj); + Jim_DecrRefCount(i, i->currentFilenameObj); + + + Jim_InterpIncrProcEpoch(i); + +#ifdef JIM_REFERENCES + Jim_FreeHashTable(&i->references); +#endif + Jim_FreeHashTable(&i->packages); + Jim_Free(i->prngState); + Jim_FreeHashTable(&i->assocData); + if (i->traceCmdObj) { + Jim_DecrRefCount(i, i->traceCmdObj); + } + +#ifdef JIM_MAINTAINER + if (i->liveList != NULL) { + objPtr = i->liveList; + + printf("\n-------------------------------------\n"); + printf("Objects still in the free list:\n"); + while (objPtr) { + const char *type = objPtr->typePtr ? objPtr->typePtr->name : "string"; + Jim_String(objPtr); + + if (objPtr->bytes && strlen(objPtr->bytes) > 20) { + printf("%p (%d) %-10s: '%.20s...'\n", + (void *)objPtr, objPtr->refCount, type, objPtr->bytes); + } + else { + printf("%p (%d) %-10s: '%s'\n", + (void *)objPtr, objPtr->refCount, type, objPtr->bytes ? objPtr->bytes : "(null)"); + } + if (objPtr->typePtr == &sourceObjType) { + printf("FILE %s LINE %d\n", + Jim_String(objPtr->internalRep.sourceValue.fileNameObj), + objPtr->internalRep.sourceValue.lineNumber); + } + objPtr = objPtr->nextObjPtr; + } + printf("-------------------------------------\n\n"); + JimPanic((1, "Live list non empty freeing the interpreter! Leak?")); + } +#endif + + + objPtr = i->freeList; + while (objPtr) { + nextObjPtr = objPtr->nextObjPtr; + Jim_Free(objPtr); + objPtr = nextObjPtr; + } + + + for (cf = i->freeFramesList; cf; cf = cfx) { + cfx = cf->next; + if (cf->vars.table) + Jim_FreeHashTable(&cf->vars); + Jim_Free(cf); + } + + + Jim_Free(i); +} + +Jim_CallFrame *Jim_GetCallFrameByLevel(Jim_Interp *interp, Jim_Obj *levelObjPtr) +{ + long level; + const char *str; + Jim_CallFrame *framePtr; + + if (levelObjPtr) { + str = Jim_String(levelObjPtr); + if (str[0] == '#') { + char *endptr; + + level = jim_strtol(str + 1, &endptr); + if (str[1] == '\0' || endptr[0] != '\0') { + level = -1; + } + } + else { + if (Jim_GetLong(interp, levelObjPtr, &level) != JIM_OK || level < 0) { + level = -1; + } + else { + + level = interp->framePtr->level - level; + } + } + } + else { + str = "1"; + level = interp->framePtr->level - 1; + } + + if (level == 0) { + return interp->topFramePtr; + } + if (level > 0) { + + for (framePtr = interp->framePtr; framePtr; framePtr = framePtr->parent) { + if (framePtr->level == level) { + return framePtr; + } + } + } + + Jim_SetResultFormatted(interp, "bad level \"%s\"", str); + return NULL; +} + +static Jim_CallFrame *JimGetCallFrameByInteger(Jim_Interp *interp, long level) +{ + Jim_CallFrame *framePtr; + + if (level == 0) { + return interp->framePtr; + } + + if (level < 0) { + + level = interp->framePtr->level + level; + } + + if (level > 0) { + + for (framePtr = interp->framePtr; framePtr; framePtr = framePtr->parent) { + if (framePtr->level == level) { + return framePtr; + } + } + } + return NULL; +} + +static Jim_EvalFrame *JimGetEvalFrameByProcLevel(Jim_Interp *interp, int proclevel) +{ + Jim_EvalFrame *evalFrame; + + if (proclevel == 0) { + return interp->evalFrame; + } + + if (proclevel < 0) { + + proclevel = interp->procLevel + proclevel; + } + + if (proclevel >= 0) { + + for (evalFrame = interp->evalFrame; evalFrame; evalFrame = evalFrame->parent) { + if (evalFrame->procLevel == proclevel) { + return evalFrame; + } + } + } + return NULL; +} + +static Jim_Obj *JimProcForEvalFrame(Jim_Interp *interp, Jim_EvalFrame *frame) +{ + if (frame == interp->evalFrame || (frame->cmd && frame->cmd->cmdNameObj)) { + Jim_EvalFrame *e; + for (e = frame->parent; e; e = e->parent) { + if (e->cmd && e->cmd->isproc && e->cmd->cmdNameObj) { + break; + } + } + if (e && e->cmd && e->cmd->cmdNameObj) { + return e->cmd->cmdNameObj; + } + } + return NULL; +} + +static void JimAddStackFrame(Jim_Interp *interp, Jim_EvalFrame *frame, Jim_Obj *listObj) +{ + Jim_Obj *procNameObj = JimProcForEvalFrame(interp, frame); + Jim_Obj *fileNameObj = interp->emptyObj; + int linenr = 1; + + if (frame->scriptObj) { + ScriptObj *script = JimGetScript(interp, frame->scriptObj); + fileNameObj = script->fileNameObj; + linenr = script->linenr; + } + + Jim_ListAppendElement(interp, listObj, procNameObj ? procNameObj : interp->emptyObj); + Jim_ListAppendElement(interp, listObj, fileNameObj); + Jim_ListAppendElement(interp, listObj, Jim_NewIntObj(interp, linenr)); + Jim_ListAppendElement(interp, listObj, Jim_NewListObj(interp, frame->argv, frame->argc)); +} + +static void JimSetStackTrace(Jim_Interp *interp, Jim_Obj *stackTraceObj) +{ + + Jim_IncrRefCount(stackTraceObj); + Jim_DecrRefCount(interp, interp->stackTrace); + interp->stackTrace = stackTraceObj; + interp->errorFlag = 1; +} + +static void JimSetErrorStack(Jim_Interp *interp, ScriptObj *script) +{ + if (!interp->errorFlag) { + int i; + Jim_Obj *stackTrace = Jim_NewListObj(interp, NULL, 0); + + if (interp->procLevel == 0 && script) { + Jim_ListAppendElement(interp, stackTrace, interp->emptyObj); + Jim_ListAppendElement(interp, stackTrace, script->fileNameObj); + Jim_ListAppendElement(interp, stackTrace, Jim_NewIntObj(interp, script->linenr)); + Jim_ListAppendElement(interp, stackTrace, interp->emptyObj); + } + else { + for (i = 0; i <= interp->procLevel; i++) { + Jim_EvalFrame *frame = JimGetEvalFrameByProcLevel(interp, -i); + if (frame) { + JimAddStackFrame(interp, frame, stackTrace); + } + } + } + JimSetStackTrace(interp, stackTrace); + } +} + +int Jim_SetAssocData(Jim_Interp *interp, const char *key, Jim_InterpDeleteProc * delProc, + void *data) +{ + AssocDataValue *assocEntryPtr = (AssocDataValue *) Jim_Alloc(sizeof(AssocDataValue)); + + assocEntryPtr->delProc = delProc; + assocEntryPtr->data = data; + return Jim_AddHashEntry(&interp->assocData, key, assocEntryPtr); +} + +void *Jim_GetAssocData(Jim_Interp *interp, const char *key) +{ + Jim_HashEntry *entryPtr = Jim_FindHashEntry(&interp->assocData, key); + + if (entryPtr != NULL) { + AssocDataValue *assocEntryPtr = Jim_GetHashEntryVal(entryPtr); + return assocEntryPtr->data; + } + return NULL; +} + +int Jim_DeleteAssocData(Jim_Interp *interp, const char *key) +{ + return Jim_DeleteHashEntry(&interp->assocData, key); +} + +int Jim_GetExitCode(Jim_Interp *interp) +{ + return interp->exitCode; +} + +static void UpdateStringOfInt(struct Jim_Obj *objPtr); +static int SetIntFromAny(Jim_Interp *interp, Jim_Obj *objPtr, int flags); + +static const Jim_ObjType intObjType = { + "int", + NULL, + NULL, + UpdateStringOfInt, + JIM_TYPE_NONE, +}; + +static const Jim_ObjType coercedDoubleObjType = { + "coerced-double", + NULL, + NULL, + UpdateStringOfInt, + JIM_TYPE_NONE, +}; + + +static void UpdateStringOfInt(struct Jim_Obj *objPtr) +{ + char buf[JIM_INTEGER_SPACE + 1]; + jim_wide wideValue = JimWideValue(objPtr); + int pos = 0; + + if (wideValue == 0) { + buf[pos++] = '0'; + } + else { + char tmp[JIM_INTEGER_SPACE]; + int num = 0; + int i; + + if (wideValue < 0) { + buf[pos++] = '-'; + i = wideValue % 10; + tmp[num++] = (i > 0) ? (10 - i) : -i; + wideValue /= -10; + } + + while (wideValue) { + tmp[num++] = wideValue % 10; + wideValue /= 10; + } + + for (i = 0; i < num; i++) { + buf[pos++] = '0' + tmp[num - i - 1]; + } + } + buf[pos] = 0; + + JimSetStringBytes(objPtr, buf); +} + +static int SetIntFromAny(Jim_Interp *interp, Jim_Obj *objPtr, int flags) +{ + jim_wide wideValue; + const char *str; + + if (objPtr->typePtr == &coercedDoubleObjType) { + + objPtr->typePtr = &intObjType; + return JIM_OK; + } + + + str = Jim_String(objPtr); + + if (Jim_StringToWide(str, &wideValue, 0) != JIM_OK) { + if (flags & JIM_ERRMSG) { + Jim_SetResultFormatted(interp, "expected integer but got \"%#s\"", objPtr); + } + return JIM_ERR; + } + if ((wideValue == JIM_WIDE_MIN || wideValue == JIM_WIDE_MAX) && errno == ERANGE) { + Jim_SetResultString(interp, "Integer value too big to be represented", -1); + return JIM_ERR; + } + + Jim_FreeIntRep(interp, objPtr); + objPtr->typePtr = &intObjType; + objPtr->internalRep.wideValue = wideValue; + return JIM_OK; +} + +#ifdef JIM_OPTIMIZATION +static int JimIsWide(Jim_Obj *objPtr) +{ + return objPtr->typePtr == &intObjType; +} +#endif + +int Jim_GetWide(Jim_Interp *interp, Jim_Obj *objPtr, jim_wide * widePtr) +{ + if (objPtr->typePtr != &intObjType && SetIntFromAny(interp, objPtr, JIM_ERRMSG) == JIM_ERR) + return JIM_ERR; + *widePtr = JimWideValue(objPtr); + return JIM_OK; +} + +int Jim_GetWideExpr(Jim_Interp *interp, Jim_Obj *objPtr, jim_wide * widePtr) +{ + int ret = JIM_OK; + + if (objPtr->typePtr == &sourceObjType || objPtr->typePtr == NULL) { + SetIntFromAny(interp, objPtr, 0); + } + if (objPtr->typePtr == &intObjType) { + *widePtr = JimWideValue(objPtr); + } + else { + JimPanic((interp->safeexpr, "interp->safeexpr is set")); + interp->safeexpr++; + ret = Jim_EvalExpression(interp, objPtr); + interp->safeexpr--; + + if (ret == JIM_OK) { + ret = Jim_GetWide(interp, Jim_GetResult(interp), widePtr); + } + if (ret != JIM_OK) { + Jim_SetResultFormatted(interp, "expected integer expression but got \"%#s\"", objPtr); + } + } + return ret; +} + + +static int JimGetWideNoErr(Jim_Interp *interp, Jim_Obj *objPtr, jim_wide * widePtr) +{ + if (objPtr->typePtr != &intObjType && SetIntFromAny(interp, objPtr, JIM_NONE) == JIM_ERR) + return JIM_ERR; + *widePtr = JimWideValue(objPtr); + return JIM_OK; +} + +int Jim_GetLong(Jim_Interp *interp, Jim_Obj *objPtr, long *longPtr) +{ + jim_wide wideValue; + int retval; + + retval = Jim_GetWide(interp, objPtr, &wideValue); + if (retval == JIM_OK) { + *longPtr = (long)wideValue; + return JIM_OK; + } + return JIM_ERR; +} + +Jim_Obj *Jim_NewIntObj(Jim_Interp *interp, jim_wide wideValue) +{ + Jim_Obj *objPtr; + + objPtr = Jim_NewObj(interp); + objPtr->typePtr = &intObjType; + objPtr->bytes = NULL; + objPtr->internalRep.wideValue = wideValue; + return objPtr; +} + +#define JIM_DOUBLE_SPACE 30 + +static void UpdateStringOfDouble(struct Jim_Obj *objPtr); +static int SetDoubleFromAny(Jim_Interp *interp, Jim_Obj *objPtr); + +static const Jim_ObjType doubleObjType = { + "double", + NULL, + NULL, + UpdateStringOfDouble, + JIM_TYPE_NONE, +}; + +#if !HAVE_DECL_ISNAN +#undef isnan +#define isnan(X) ((X) != (X)) +#endif +#if !HAVE_DECL_ISINF +#undef isinf +#define isinf(X) (1.0 / (X) == 0.0) +#endif + +static void UpdateStringOfDouble(struct Jim_Obj *objPtr) +{ + double value = objPtr->internalRep.doubleValue; + + if (isnan(value)) { + JimSetStringBytes(objPtr, "NaN"); + return; + } + if (isinf(value)) { + if (value < 0) { + JimSetStringBytes(objPtr, "-Inf"); + } + else { + JimSetStringBytes(objPtr, "Inf"); + } + return; + } + { + char buf[JIM_DOUBLE_SPACE + 1]; + int i; + int len = sprintf(buf, "%.12g", value); + + + for (i = 0; i < len; i++) { + if (buf[i] == '.' || buf[i] == 'e') { +#if defined(JIM_SPRINTF_DOUBLE_NEEDS_FIX) + char *e = strchr(buf, 'e'); + if (e && (e[1] == '-' || e[1] == '+') && e[2] == '0') { + + e += 2; + memmove(e, e + 1, len - (e - buf)); + } +#endif + break; + } + } + if (buf[i] == '\0') { + buf[i++] = '.'; + buf[i++] = '0'; + buf[i] = '\0'; + } + JimSetStringBytes(objPtr, buf); + } +} + +static int SetDoubleFromAny(Jim_Interp *interp, Jim_Obj *objPtr) +{ + double doubleValue; + jim_wide wideValue; + const char *str; + +#ifdef HAVE_LONG_LONG + +#define MIN_INT_IN_DOUBLE -(1LL << 53) +#define MAX_INT_IN_DOUBLE -(MIN_INT_IN_DOUBLE + 1) + + if (objPtr->typePtr == &intObjType + && JimWideValue(objPtr) >= MIN_INT_IN_DOUBLE + && JimWideValue(objPtr) <= MAX_INT_IN_DOUBLE) { + + + objPtr->typePtr = &coercedDoubleObjType; + return JIM_OK; + } +#endif + str = Jim_String(objPtr); + + if (Jim_StringToWide(str, &wideValue, 10) == JIM_OK) { + + Jim_FreeIntRep(interp, objPtr); + objPtr->typePtr = &coercedDoubleObjType; + objPtr->internalRep.wideValue = wideValue; + return JIM_OK; + } + else { + + if (Jim_StringToDouble(str, &doubleValue) != JIM_OK) { + Jim_SetResultFormatted(interp, "expected floating-point number but got \"%#s\"", objPtr); + return JIM_ERR; + } + + Jim_FreeIntRep(interp, objPtr); + } + objPtr->typePtr = &doubleObjType; + objPtr->internalRep.doubleValue = doubleValue; + return JIM_OK; +} + +int Jim_GetDouble(Jim_Interp *interp, Jim_Obj *objPtr, double *doublePtr) +{ + if (objPtr->typePtr == &coercedDoubleObjType) { + *doublePtr = JimWideValue(objPtr); + return JIM_OK; + } + if (objPtr->typePtr != &doubleObjType && SetDoubleFromAny(interp, objPtr) == JIM_ERR) + return JIM_ERR; + + if (objPtr->typePtr == &coercedDoubleObjType) { + *doublePtr = JimWideValue(objPtr); + } + else { + *doublePtr = objPtr->internalRep.doubleValue; + } + return JIM_OK; +} + +Jim_Obj *Jim_NewDoubleObj(Jim_Interp *interp, double doubleValue) +{ + Jim_Obj *objPtr; + + objPtr = Jim_NewObj(interp); + objPtr->typePtr = &doubleObjType; + objPtr->bytes = NULL; + objPtr->internalRep.doubleValue = doubleValue; + return objPtr; +} + +static int SetBooleanFromAny(Jim_Interp *interp, Jim_Obj *objPtr, int flags); + +int Jim_GetBoolean(Jim_Interp *interp, Jim_Obj *objPtr, int * booleanPtr) +{ + if (objPtr->typePtr != &intObjType && SetBooleanFromAny(interp, objPtr, JIM_ERRMSG) == JIM_ERR) + return JIM_ERR; + *booleanPtr = (int) JimWideValue(objPtr); + return JIM_OK; +} + +static const char * const jim_true_false_strings[8] = { + "1", "true", "yes", "on", + "0", "false", "no", "off" +}; + +static const int jim_true_false_lens[8] = { + 1, 4, 3, 2, + 1, 5, 2, 3, +}; + +static int SetBooleanFromAny(Jim_Interp *interp, Jim_Obj *objPtr, int flags) +{ + int index = Jim_FindByName(Jim_String(objPtr), jim_true_false_strings, + sizeof(jim_true_false_strings) / sizeof(*jim_true_false_strings)); + if (index < 0) { + if (flags & JIM_ERRMSG) { + Jim_SetResultFormatted(interp, "expected boolean but got \"%#s\"", objPtr); + } + return JIM_ERR; + } + + + Jim_FreeIntRep(interp, objPtr); + objPtr->typePtr = &intObjType; + + objPtr->internalRep.wideValue = index < 4 ? 1 : 0; + return JIM_OK; +} + +static void ListInsertElements(Jim_Obj *listPtr, int idx, int elemc, Jim_Obj *const *elemVec); +static void ListAppendElement(Jim_Obj *listPtr, Jim_Obj *objPtr); +static void FreeListInternalRep(Jim_Interp *interp, Jim_Obj *objPtr); +static void DupListInternalRep(Jim_Interp *interp, Jim_Obj *srcPtr, Jim_Obj *dupPtr); +static void UpdateStringOfList(struct Jim_Obj *objPtr); +static int SetListFromAny(Jim_Interp *interp, struct Jim_Obj *objPtr); + +static const Jim_ObjType listObjType = { + "list", + FreeListInternalRep, + DupListInternalRep, + UpdateStringOfList, + JIM_TYPE_NONE, +}; + +void FreeListInternalRep(Jim_Interp *interp, Jim_Obj *objPtr) +{ + int i; + + for (i = 0; i < objPtr->internalRep.listValue.len; i++) { + Jim_DecrRefCount(interp, objPtr->internalRep.listValue.ele[i]); + } + Jim_Free(objPtr->internalRep.listValue.ele); +} + +void DupListInternalRep(Jim_Interp *interp, Jim_Obj *srcPtr, Jim_Obj *dupPtr) +{ + int i; + + JIM_NOTUSED(interp); + + dupPtr->internalRep.listValue.len = srcPtr->internalRep.listValue.len; + dupPtr->internalRep.listValue.maxLen = srcPtr->internalRep.listValue.maxLen; + dupPtr->internalRep.listValue.ele = + Jim_Alloc(sizeof(Jim_Obj *) * srcPtr->internalRep.listValue.maxLen); + memcpy(dupPtr->internalRep.listValue.ele, srcPtr->internalRep.listValue.ele, + sizeof(Jim_Obj *) * srcPtr->internalRep.listValue.len); + for (i = 0; i < dupPtr->internalRep.listValue.len; i++) { + Jim_IncrRefCount(dupPtr->internalRep.listValue.ele[i]); + } + dupPtr->typePtr = &listObjType; +} + +#define JIM_ELESTR_SIMPLE 0 +#define JIM_ELESTR_BRACE 1 +#define JIM_ELESTR_QUOTE 2 +static unsigned char ListElementQuotingType(const char *s, int len) +{ + int i, level, blevel, trySimple = 1; + + + if (len == 0) + return JIM_ELESTR_BRACE; + if (s[0] == '"' || s[0] == '{') { + trySimple = 0; + goto testbrace; + } + for (i = 0; i < len; i++) { + switch (s[i]) { + case ' ': + case '$': + case '"': + case '[': + case ']': + case ';': + case '\\': + case '\r': + case '\n': + case '\t': + case '\f': + case '\v': + trySimple = 0; + + case '{': + case '}': + goto testbrace; + } + } + return JIM_ELESTR_SIMPLE; + + testbrace: + + if (s[len - 1] == '\\') + return JIM_ELESTR_QUOTE; + level = 0; + blevel = 0; + for (i = 0; i < len; i++) { + switch (s[i]) { + case '{': + level++; + break; + case '}': + level--; + if (level < 0) + return JIM_ELESTR_QUOTE; + break; + case '[': + blevel++; + break; + case ']': + blevel--; + break; + case '\\': + if (s[i + 1] == '\n') + return JIM_ELESTR_QUOTE; + else if (s[i + 1] != '\0') + i++; + break; + } + } + if (blevel < 0) { + return JIM_ELESTR_QUOTE; + } + + if (level == 0) { + if (!trySimple) + return JIM_ELESTR_BRACE; + for (i = 0; i < len; i++) { + switch (s[i]) { + case ' ': + case '$': + case '"': + case '[': + case ']': + case ';': + case '\\': + case '\r': + case '\n': + case '\t': + case '\f': + case '\v': + return JIM_ELESTR_BRACE; + break; + } + } + return JIM_ELESTR_SIMPLE; + } + return JIM_ELESTR_QUOTE; +} + +static int BackslashQuoteString(const char *s, int len, char *q) +{ + char *p = q; + + while (len--) { + switch (*s) { + case ' ': + case '$': + case '"': + case '[': + case ']': + case '{': + case '}': + case ';': + case '\\': + *p++ = '\\'; + *p++ = *s++; + break; + case '\n': + *p++ = '\\'; + *p++ = 'n'; + s++; + break; + case '\r': + *p++ = '\\'; + *p++ = 'r'; + s++; + break; + case '\t': + *p++ = '\\'; + *p++ = 't'; + s++; + break; + case '\f': + *p++ = '\\'; + *p++ = 'f'; + s++; + break; + case '\v': + *p++ = '\\'; + *p++ = 'v'; + s++; + break; + default: + *p++ = *s++; + break; + } + } + *p = '\0'; + + return p - q; +} + +static void JimMakeListStringRep(Jim_Obj *objPtr, Jim_Obj **objv, int objc) +{ + #define STATIC_QUOTING_LEN 32 + int i, bufLen, realLength; + const char *strRep; + char *p; + unsigned char *quotingType, staticQuoting[STATIC_QUOTING_LEN]; + + + if (objc > STATIC_QUOTING_LEN) { + quotingType = Jim_Alloc(objc); + } + else { + quotingType = staticQuoting; + } + bufLen = 0; + for (i = 0; i < objc; i++) { + int len; + + strRep = Jim_GetString(objv[i], &len); + quotingType[i] = ListElementQuotingType(strRep, len); + switch (quotingType[i]) { + case JIM_ELESTR_SIMPLE: + if (i != 0 || strRep[0] != '#') { + bufLen += len; + break; + } + + quotingType[i] = JIM_ELESTR_BRACE; + + case JIM_ELESTR_BRACE: + bufLen += len + 2; + break; + case JIM_ELESTR_QUOTE: + bufLen += len * 2; + break; + } + bufLen++; + } + bufLen++; + + + p = objPtr->bytes = Jim_Alloc(bufLen + 1); + realLength = 0; + for (i = 0; i < objc; i++) { + int len, qlen; + + strRep = Jim_GetString(objv[i], &len); + + switch (quotingType[i]) { + case JIM_ELESTR_SIMPLE: + memcpy(p, strRep, len); + p += len; + realLength += len; + break; + case JIM_ELESTR_BRACE: + *p++ = '{'; + memcpy(p, strRep, len); + p += len; + *p++ = '}'; + realLength += len + 2; + break; + case JIM_ELESTR_QUOTE: + if (i == 0 && strRep[0] == '#') { + *p++ = '\\'; + realLength++; + } + qlen = BackslashQuoteString(strRep, len, p); + p += qlen; + realLength += qlen; + break; + } + + if (i + 1 != objc) { + *p++ = ' '; + realLength++; + } + } + *p = '\0'; + objPtr->length = realLength; + + if (quotingType != staticQuoting) { + Jim_Free(quotingType); + } +} + +static void UpdateStringOfList(struct Jim_Obj *objPtr) +{ + JimMakeListStringRep(objPtr, objPtr->internalRep.listValue.ele, objPtr->internalRep.listValue.len); +} + +static int SetListFromAny(Jim_Interp *interp, struct Jim_Obj *objPtr) +{ + struct JimParserCtx parser; + const char *str; + int strLen; + Jim_Obj *fileNameObj; + int linenr; + + if (objPtr->typePtr == &listObjType) { + return JIM_OK; + } + + + if (Jim_IsDict(objPtr) && objPtr->bytes == NULL) { + Jim_Dict *dict = objPtr->internalRep.dictValue; + + + objPtr->typePtr = &listObjType; + objPtr->internalRep.listValue.len = dict->len; + objPtr->internalRep.listValue.maxLen = dict->maxLen; + objPtr->internalRep.listValue.ele = dict->table; + + + Jim_Free(dict->ht); + + + Jim_Free(dict); + return JIM_OK; + } + + + fileNameObj = Jim_GetSourceInfo(interp, objPtr, &linenr); + Jim_IncrRefCount(fileNameObj); + + + str = Jim_GetString(objPtr, &strLen); + + Jim_FreeIntRep(interp, objPtr); + objPtr->typePtr = &listObjType; + objPtr->internalRep.listValue.len = 0; + objPtr->internalRep.listValue.maxLen = 0; + objPtr->internalRep.listValue.ele = NULL; + + + if (strLen) { + JimParserInit(&parser, str, strLen, linenr); + while (!parser.eof) { + Jim_Obj *elementPtr; + + JimParseList(&parser); + if (parser.tt != JIM_TT_STR && parser.tt != JIM_TT_ESC) + continue; + elementPtr = JimParserGetTokenObj(interp, &parser); + Jim_SetSourceInfo(interp, elementPtr, fileNameObj, parser.tline); + ListAppendElement(objPtr, elementPtr); + } + } + Jim_DecrRefCount(interp, fileNameObj); + return JIM_OK; +} + +Jim_Obj *Jim_NewListObj(Jim_Interp *interp, Jim_Obj *const *elements, int len) +{ + Jim_Obj *objPtr; + + objPtr = Jim_NewObj(interp); + objPtr->typePtr = &listObjType; + objPtr->bytes = NULL; + objPtr->internalRep.listValue.ele = NULL; + objPtr->internalRep.listValue.len = 0; + objPtr->internalRep.listValue.maxLen = 0; + + if (len) { + ListInsertElements(objPtr, 0, len, elements); + } + + return objPtr; +} + +static void JimListGetElements(Jim_Interp *interp, Jim_Obj *listObj, int *listLen, + Jim_Obj ***listVec) +{ + *listLen = Jim_ListLength(interp, listObj); + *listVec = listObj->internalRep.listValue.ele; +} + + +static int JimSign(jim_wide w) +{ + if (w == 0) { + return 0; + } + else if (w < 0) { + return -1; + } + return 1; +} + + +struct lsort_info { + jmp_buf jmpbuf; + Jim_Obj *command; + Jim_Interp *interp; + enum { + JIM_LSORT_ASCII, + JIM_LSORT_NOCASE, + JIM_LSORT_INTEGER, + JIM_LSORT_REAL, + JIM_LSORT_COMMAND, + JIM_LSORT_DICT + } type; + int order; + Jim_Obj **indexv; + int indexc; + int unique; + int (*subfn)(Jim_Obj **, Jim_Obj **); +}; + +static struct lsort_info *sort_info; + +static int ListSortIndexHelper(Jim_Obj **lhsObj, Jim_Obj **rhsObj) +{ + Jim_Obj *lObj, *rObj; + + if (Jim_ListIndices(sort_info->interp, *lhsObj, sort_info->indexv, sort_info->indexc, &lObj, JIM_ERRMSG) != JIM_OK || + Jim_ListIndices(sort_info->interp, *rhsObj, sort_info->indexv, sort_info->indexc, &rObj, JIM_ERRMSG) != JIM_OK) { + longjmp(sort_info->jmpbuf, JIM_ERR); + } + return sort_info->subfn(&lObj, &rObj); +} + + +static int ListSortString(Jim_Obj **lhsObj, Jim_Obj **rhsObj) +{ + return Jim_StringCompareObj(sort_info->interp, *lhsObj, *rhsObj, 0) * sort_info->order; +} + +static int ListSortStringNoCase(Jim_Obj **lhsObj, Jim_Obj **rhsObj) +{ + return Jim_StringCompareObj(sort_info->interp, *lhsObj, *rhsObj, 1) * sort_info->order; +} + +static int ListSortDict(Jim_Obj **lhsObj, Jim_Obj **rhsObj) +{ + + const char *left = Jim_String(*lhsObj); + const char *right = Jim_String(*rhsObj); + + while (1) { + if (isdigit(UCHAR(*left)) && isdigit(UCHAR(*right))) { + + jim_wide lint, rint; + char *lend, *rend; + lint = jim_strtoull(left, &lend); + rint = jim_strtoull(right, &rend); + if (lint != rint) { + return JimSign(lint - rint) * sort_info->order; + } + if (lend -left != rend - right) { + return JimSign((lend - left) - (rend - right)) * sort_info->order; + } + left = lend; + right = rend; + } + else { + int cl, cr; + left += utf8_tounicode_case(left, &cl, 1); + right += utf8_tounicode_case(right, &cr, 1); + if (cl != cr) { + return JimSign(cl - cr) * sort_info->order; + } + if (cl == 0) { + + return Jim_StringCompareObj(sort_info->interp, *lhsObj, *rhsObj, 0) * sort_info->order; + } + } + } +} + +static int ListSortInteger(Jim_Obj **lhsObj, Jim_Obj **rhsObj) +{ + jim_wide lhs = 0, rhs = 0; + + if (Jim_GetWide(sort_info->interp, *lhsObj, &lhs) != JIM_OK || + Jim_GetWide(sort_info->interp, *rhsObj, &rhs) != JIM_OK) { + longjmp(sort_info->jmpbuf, JIM_ERR); + } + + return JimSign(lhs - rhs) * sort_info->order; +} + +static int ListSortReal(Jim_Obj **lhsObj, Jim_Obj **rhsObj) +{ + double lhs = 0, rhs = 0; + + if (Jim_GetDouble(sort_info->interp, *lhsObj, &lhs) != JIM_OK || + Jim_GetDouble(sort_info->interp, *rhsObj, &rhs) != JIM_OK) { + longjmp(sort_info->jmpbuf, JIM_ERR); + } + if (lhs == rhs) { + return 0; + } + if (lhs > rhs) { + return sort_info->order; + } + return -sort_info->order; +} + +static int ListSortCommand(Jim_Obj **lhsObj, Jim_Obj **rhsObj) +{ + Jim_Obj *compare_script; + int rc; + + jim_wide ret = 0; + + + compare_script = Jim_DuplicateObj(sort_info->interp, sort_info->command); + Jim_ListAppendElement(sort_info->interp, compare_script, *lhsObj); + Jim_ListAppendElement(sort_info->interp, compare_script, *rhsObj); + + rc = Jim_EvalObj(sort_info->interp, compare_script); + + if (rc != JIM_OK || Jim_GetWide(sort_info->interp, Jim_GetResult(sort_info->interp), &ret) != JIM_OK) { + longjmp(sort_info->jmpbuf, rc); + } + + return JimSign(ret) * sort_info->order; +} + +static void ListRemoveDuplicates(Jim_Obj *listObjPtr, int (*comp)(Jim_Obj **lhs, Jim_Obj **rhs)) +{ + int src; + int dst = 0; + Jim_Obj **ele = listObjPtr->internalRep.listValue.ele; + + for (src = 1; src < listObjPtr->internalRep.listValue.len; src++) { + if (comp(&ele[dst], &ele[src]) == 0) { + + Jim_DecrRefCount(sort_info->interp, ele[dst]); + } + else { + + dst++; + } + ele[dst] = ele[src]; + } + + + dst++; + if (dst < listObjPtr->internalRep.listValue.len) { + ele[dst] = ele[src]; + } + + + listObjPtr->internalRep.listValue.len = dst; +} + + +static int ListSortElements(Jim_Interp *interp, Jim_Obj *listObjPtr, struct lsort_info *info) +{ + struct lsort_info *prev_info; + + typedef int (qsort_comparator) (const void *, const void *); + int (*fn) (Jim_Obj **, Jim_Obj **); + Jim_Obj **vector; + int len; + int rc; + + JimPanic((Jim_IsShared(listObjPtr), "ListSortElements called with shared object")); + SetListFromAny(interp, listObjPtr); + + + prev_info = sort_info; + sort_info = info; + + vector = listObjPtr->internalRep.listValue.ele; + len = listObjPtr->internalRep.listValue.len; + switch (info->type) { + case JIM_LSORT_ASCII: + fn = ListSortString; + break; + case JIM_LSORT_NOCASE: + fn = ListSortStringNoCase; + break; + case JIM_LSORT_INTEGER: + fn = ListSortInteger; + break; + case JIM_LSORT_REAL: + fn = ListSortReal; + break; + case JIM_LSORT_COMMAND: + fn = ListSortCommand; + break; + case JIM_LSORT_DICT: + fn = ListSortDict; + break; + default: + fn = NULL; + JimPanic((1, "ListSort called with invalid sort type")); + return -1; + } + + if (info->indexc) { + + info->subfn = fn; + fn = ListSortIndexHelper; + } + + if ((rc = setjmp(info->jmpbuf)) == 0) { + qsort(vector, len, sizeof(Jim_Obj *), (qsort_comparator *) fn); + + if (info->unique && len > 1) { + ListRemoveDuplicates(listObjPtr, fn); + } + + Jim_InvalidateStringRep(listObjPtr); + } + sort_info = prev_info; + + return rc; +} + + +static void ListEnsureLength(Jim_Obj *listPtr, int idx) +{ + assert(idx >= 0); + if (idx >= listPtr->internalRep.listValue.maxLen) { + if (idx < 4) { + + idx = 4; + } + listPtr->internalRep.listValue.ele = Jim_Realloc(listPtr->internalRep.listValue.ele, + sizeof(Jim_Obj *) * idx); + + listPtr->internalRep.listValue.maxLen = idx; + } +} + +static void ListInsertElements(Jim_Obj *listPtr, int idx, int elemc, Jim_Obj *const *elemVec) +{ + int currentLen = listPtr->internalRep.listValue.len; + int requiredLen = currentLen + elemc; + int i; + Jim_Obj **point; + + if (elemc == 0) { + + return; + } + + if (requiredLen > listPtr->internalRep.listValue.maxLen) { + if (currentLen) { + + requiredLen *= 2; + } + ListEnsureLength(listPtr, requiredLen); + } + if (idx < 0) { + idx = currentLen; + } + point = listPtr->internalRep.listValue.ele + idx; + memmove(point + elemc, point, (currentLen - idx) * sizeof(Jim_Obj *)); + for (i = 0; i < elemc; ++i) { + point[i] = elemVec[i]; + Jim_IncrRefCount(point[i]); + } + listPtr->internalRep.listValue.len += elemc; +} + +static void ListAppendElement(Jim_Obj *listPtr, Jim_Obj *objPtr) +{ + ListInsertElements(listPtr, -1, 1, &objPtr); +} + +static void ListAppendList(Jim_Obj *listPtr, Jim_Obj *appendListPtr) +{ + ListInsertElements(listPtr, -1, + appendListPtr->internalRep.listValue.len, appendListPtr->internalRep.listValue.ele); +} + +void Jim_ListAppendElement(Jim_Interp *interp, Jim_Obj *listPtr, Jim_Obj *objPtr) +{ + JimPanic((Jim_IsShared(listPtr), "Jim_ListAppendElement called with shared object")); + SetListFromAny(interp, listPtr); + Jim_InvalidateStringRep(listPtr); + ListAppendElement(listPtr, objPtr); +} + +void Jim_ListAppendList(Jim_Interp *interp, Jim_Obj *listPtr, Jim_Obj *appendListPtr) +{ + JimPanic((Jim_IsShared(listPtr), "Jim_ListAppendList called with shared object")); + SetListFromAny(interp, listPtr); + SetListFromAny(interp, appendListPtr); + Jim_InvalidateStringRep(listPtr); + ListAppendList(listPtr, appendListPtr); +} + +int Jim_ListLength(Jim_Interp *interp, Jim_Obj *objPtr) +{ + SetListFromAny(interp, objPtr); + return objPtr->internalRep.listValue.len; +} + +void Jim_ListInsertElements(Jim_Interp *interp, Jim_Obj *listPtr, int idx, + int objc, Jim_Obj *const *objVec) +{ + JimPanic((Jim_IsShared(listPtr), "Jim_ListInsertElement called with shared object")); + SetListFromAny(interp, listPtr); + if (idx >= 0 && idx > listPtr->internalRep.listValue.len) + idx = listPtr->internalRep.listValue.len; + else if (idx < 0) + idx = 0; + Jim_InvalidateStringRep(listPtr); + ListInsertElements(listPtr, idx, objc, objVec); +} + +Jim_Obj *Jim_ListGetIndex(Jim_Interp *interp, Jim_Obj *listPtr, int idx) +{ + SetListFromAny(interp, listPtr); + if ((idx >= 0 && idx >= listPtr->internalRep.listValue.len) || + (idx < 0 && (-idx - 1) >= listPtr->internalRep.listValue.len)) { + return NULL; + } + if (idx < 0) + idx = listPtr->internalRep.listValue.len + idx; + return listPtr->internalRep.listValue.ele[idx]; +} + +int Jim_ListIndex(Jim_Interp *interp, Jim_Obj *listPtr, int idx, Jim_Obj **objPtrPtr, int flags) +{ + *objPtrPtr = Jim_ListGetIndex(interp, listPtr, idx); + if (*objPtrPtr == NULL) { + if (flags & JIM_ERRMSG) { + Jim_SetResultString(interp, "list index out of range", -1); + } + return JIM_ERR; + } + return JIM_OK; +} + +static int Jim_ListIndices(Jim_Interp *interp, Jim_Obj *listPtr, + Jim_Obj *const *indexv, int indexc, Jim_Obj **resultObj, int flags) +{ + int i; + int static_idxes[5]; + int *idxes = static_idxes; + int ret = JIM_OK; + + if (indexc > sizeof(static_idxes) / sizeof(*static_idxes)) { + idxes = Jim_Alloc(indexc * sizeof(*idxes)); + } + + for (i = 0; i < indexc; i++) { + ret = Jim_GetIndex(interp, indexv[i], &idxes[i]); + if (ret != JIM_OK) { + goto err; + } + } + + for (i = 0; i < indexc; i++) { + Jim_Obj *objPtr = Jim_ListGetIndex(interp, listPtr, idxes[i]); + if (!objPtr) { + if (flags & JIM_ERRMSG) { + if (idxes[i] < 0 || idxes[i] > Jim_ListLength(interp, listPtr)) { + Jim_SetResultFormatted(interp, "index \"%#s\" out of range", indexv[i]); + } + else { + Jim_SetResultFormatted(interp, "element %#s missing from sublist \"%#s\"", indexv[i], listPtr); + } + } + return -1; + } + listPtr = objPtr; + } + *resultObj = listPtr; +err: + if (idxes != static_idxes) + Jim_Free(idxes); + return ret; +} + +static int ListSetIndex(Jim_Interp *interp, Jim_Obj *listPtr, int idx, + Jim_Obj *newObjPtr, int flags) +{ + SetListFromAny(interp, listPtr); + if ((idx >= 0 && idx >= listPtr->internalRep.listValue.len) || + (idx < 0 && (-idx - 1) >= listPtr->internalRep.listValue.len)) { + if (flags & JIM_ERRMSG) { + Jim_SetResultString(interp, "list index out of range", -1); + } + return JIM_ERR; + } + if (idx < 0) + idx = listPtr->internalRep.listValue.len + idx; + Jim_DecrRefCount(interp, listPtr->internalRep.listValue.ele[idx]); + listPtr->internalRep.listValue.ele[idx] = newObjPtr; + Jim_IncrRefCount(newObjPtr); + return JIM_OK; +} + +int Jim_ListSetIndex(Jim_Interp *interp, Jim_Obj *varNamePtr, + Jim_Obj *const *indexv, int indexc, Jim_Obj *newObjPtr) +{ + Jim_Obj *varObjPtr, *objPtr, *listObjPtr; + int shared, i, idx; + + varObjPtr = objPtr = Jim_GetVariable(interp, varNamePtr, JIM_ERRMSG | JIM_UNSHARED); + if (objPtr == NULL) + return JIM_ERR; + if ((shared = Jim_IsShared(objPtr))) + varObjPtr = objPtr = Jim_DuplicateObj(interp, objPtr); + for (i = 0; i < indexc - 1; i++) { + listObjPtr = objPtr; + if (Jim_GetIndex(interp, indexv[i], &idx) != JIM_OK) + goto err; + + objPtr = Jim_ListGetIndex(interp, listObjPtr, idx); + if (objPtr == NULL) { + Jim_SetResultFormatted(interp, "index \"%#s\" out of range", indexv[i]); + goto err; + } + if (Jim_IsShared(objPtr)) { + objPtr = Jim_DuplicateObj(interp, objPtr); + ListSetIndex(interp, listObjPtr, idx, objPtr, JIM_NONE); + } + Jim_InvalidateStringRep(listObjPtr); + } + if (Jim_GetIndex(interp, indexv[indexc - 1], &idx) != JIM_OK) + goto err; + if (ListSetIndex(interp, objPtr, idx, newObjPtr, JIM_ERRMSG) == JIM_ERR) + goto err; + Jim_InvalidateStringRep(objPtr); + Jim_InvalidateStringRep(varObjPtr); + if (Jim_SetVariable(interp, varNamePtr, varObjPtr) != JIM_OK) + goto err; + Jim_SetResult(interp, varObjPtr); + return JIM_OK; + err: + if (shared) { + Jim_FreeNewObj(interp, varObjPtr); + } + return JIM_ERR; +} + +Jim_Obj *Jim_ListJoin(Jim_Interp *interp, Jim_Obj *listObjPtr, const char *joinStr, int joinStrLen) +{ + int i; + int listLen = Jim_ListLength(interp, listObjPtr); + Jim_Obj *resObjPtr = Jim_NewEmptyStringObj(interp); + + for (i = 0; i < listLen; ) { + Jim_AppendObj(interp, resObjPtr, Jim_ListGetIndex(interp, listObjPtr, i)); + if (++i != listLen) { + Jim_AppendString(interp, resObjPtr, joinStr, joinStrLen); + } + } + return resObjPtr; +} + +Jim_Obj *Jim_ConcatObj(Jim_Interp *interp, int objc, Jim_Obj *const *objv) +{ + int i; + + for (i = 0; i < objc; i++) { + if (!Jim_IsList(objv[i])) + break; + } + if (i == objc) { + Jim_Obj *objPtr = Jim_NewListObj(interp, NULL, 0); + + for (i = 0; i < objc; i++) + ListAppendList(objPtr, objv[i]); + return objPtr; + } + else { + + int len = 0, objLen; + char *bytes, *p; + + + for (i = 0; i < objc; i++) { + len += Jim_Length(objv[i]); + } + if (objc) + len += objc - 1; + + p = bytes = Jim_Alloc(len + 1); + for (i = 0; i < objc; i++) { + const char *s = Jim_GetString(objv[i], &objLen); + + + while (objLen && isspace(UCHAR(*s))) { + s++; + objLen--; + len--; + } + + while (objLen && isspace(UCHAR(s[objLen - 1]))) { + + if (objLen > 1 && s[objLen - 2] == '\\') { + break; + } + objLen--; + len--; + } + memcpy(p, s, objLen); + p += objLen; + if (i + 1 != objc) { + if (objLen) + *p++ = ' '; + else { + len--; + } + } + } + *p = '\0'; + return Jim_NewStringObjNoAlloc(interp, bytes, len); + } +} + +Jim_Obj *Jim_ListRange(Jim_Interp *interp, Jim_Obj *listObjPtr, Jim_Obj *firstObjPtr, + Jim_Obj *lastObjPtr) +{ + int first, last; + int len, rangeLen; + + if (Jim_GetIndex(interp, firstObjPtr, &first) != JIM_OK || + Jim_GetIndex(interp, lastObjPtr, &last) != JIM_OK) + return NULL; + len = Jim_ListLength(interp, listObjPtr); + first = JimRelToAbsIndex(len, first); + last = JimRelToAbsIndex(len, last); + JimRelToAbsRange(len, &first, &last, &rangeLen); + if (first == 0 && last == len) { + return listObjPtr; + } + return Jim_NewListObj(interp, listObjPtr->internalRep.listValue.ele + first, rangeLen); +} + +static void FreeDictInternalRep(Jim_Interp *interp, Jim_Obj *objPtr); +static void DupDictInternalRep(Jim_Interp *interp, Jim_Obj *srcPtr, Jim_Obj *dupPtr); +static void UpdateStringOfDict(struct Jim_Obj *objPtr); +static int SetDictFromAny(Jim_Interp *interp, struct Jim_Obj *objPtr); + + +static const Jim_ObjType dictObjType = { + "dict", + FreeDictInternalRep, + DupDictInternalRep, + UpdateStringOfDict, + JIM_TYPE_NONE, +}; + +static void JimFreeDict(Jim_Interp *interp, Jim_Dict *dict) +{ + int i; + for (i = 0; i < dict->len; i++) { + Jim_DecrRefCount(interp, dict->table[i]); + } + Jim_Free(dict->table); + Jim_Free(dict->ht); + Jim_Free(dict); +} + +enum { + DICT_HASH_FIND = -1, + DICT_HASH_REMOVE = -2, + DICT_HASH_ADD = -3, +}; + +static int JimDictHashFind(Jim_Dict *dict, Jim_Obj *keyObjPtr, int op_tvoffset) +{ + unsigned h = (JimObjectHTHashFunction(keyObjPtr) + dict->uniq); + unsigned idx = h & dict->sizemask; + int tvoffset = 0; + unsigned peturb = h; + unsigned first_removed = ~0; + + if (dict->len) { + while ((tvoffset = dict->ht[idx].offset)) { + if (tvoffset == -1) { + if (first_removed == ~0) { + first_removed = idx; + } + } + else if (dict->ht[idx].hash == h) { + if (Jim_StringEqObj(keyObjPtr, dict->table[tvoffset - 1])) { + break; + } + } + + peturb >>= 5; + idx = (5 * idx + 1 + peturb) & dict->sizemask; + } + } + + switch (op_tvoffset) { + case DICT_HASH_FIND: + + break; + case DICT_HASH_REMOVE: + if (tvoffset) { + + dict->ht[idx].offset = -1; + dict->dummy++; + } + + break; + case DICT_HASH_ADD: + if (tvoffset == 0) { + + if (first_removed != ~0) { + idx = first_removed; + dict->dummy--; + } + dict->ht[idx].offset = dict->len + 1; + dict->ht[idx].hash = h; + } + + break; + default: + assert(tvoffset); + + dict->ht[idx].offset = op_tvoffset; + break; + } + + return tvoffset; +} + +static void JimDictExpandHashTable(Jim_Dict *dict, unsigned int size) +{ + int i; + struct JimDictHashEntry *prevht = dict->ht; + int prevsize = dict->size; + + dict->size = JimHashTableNextPower(size); + dict->sizemask = dict->size - 1; + + + dict->ht = Jim_Alloc(dict->size * sizeof(*dict->ht)); + memset(dict->ht, 0, dict->size * sizeof(*dict->ht)); + + + for (i = 0; i < prevsize; i++) { + if (prevht[i].offset > 0) { + + unsigned h = prevht[i].hash; + unsigned idx = h & dict->sizemask; + unsigned peturb = h; + + while (dict->ht[idx].offset) { + peturb >>= 5; + idx = (5 * idx + 1 + peturb) & dict->sizemask; + } + dict->ht[idx].offset = prevht[i].offset; + dict->ht[idx].hash = h; + } + } + Jim_Free(prevht); +} + +static int JimDictAdd(Jim_Dict *dict, Jim_Obj *keyObjPtr) +{ + if (dict->size <= dict->len + dict->dummy) { + JimDictExpandHashTable(dict, dict->size ? dict->size * 2 : 8); + } + return JimDictHashFind(dict, keyObjPtr, DICT_HASH_ADD); +} + +static Jim_Dict *JimDictNew(Jim_Interp *interp, int table_size, int ht_size) +{ + Jim_Dict *dict = Jim_Alloc(sizeof(*dict)); + memset(dict, 0, sizeof(*dict)); + + if (ht_size) { + JimDictExpandHashTable(dict, ht_size); + } + if (table_size) { + dict->table = Jim_Alloc(table_size * sizeof(*dict->table)); + dict->maxLen = table_size; + } +#ifdef JIM_RANDOMISE_HASH + dict->uniq = (rand() ^ time(NULL) ^ clock()); +#endif + return dict; +} + +static void FreeDictInternalRep(Jim_Interp *interp, Jim_Obj *objPtr) +{ + JimFreeDict(interp, objPtr->internalRep.dictValue); +} + +static void DupDictInternalRep(Jim_Interp *interp, Jim_Obj *srcPtr, Jim_Obj *dupPtr) +{ + Jim_Dict *oldDict = srcPtr->internalRep.dictValue; + int i; + + + Jim_Dict *newDict = JimDictNew(interp, oldDict->maxLen, oldDict->size); + + + for (i = 0; i < oldDict->len; i++) { + newDict->table[i] = oldDict->table[i]; + Jim_IncrRefCount(newDict->table[i]); + } + newDict->len = oldDict->len; + + + newDict->uniq = oldDict->uniq; + + + memcpy(newDict->ht, oldDict->ht, sizeof(*oldDict->ht) * oldDict->size); + + dupPtr->internalRep.dictValue = newDict; + dupPtr->typePtr = &dictObjType; +} + +static void UpdateStringOfDict(struct Jim_Obj *objPtr) +{ + JimMakeListStringRep(objPtr, objPtr->internalRep.dictValue->table, objPtr->internalRep.dictValue->len); +} + +static int SetDictFromAny(Jim_Interp *interp, struct Jim_Obj *objPtr) +{ + int listlen; + + if (objPtr->typePtr == &dictObjType) { + return JIM_OK; + } + + if (Jim_IsList(objPtr) && Jim_IsShared(objPtr)) { + Jim_String(objPtr); + } + + listlen = Jim_ListLength(interp, objPtr); + if (listlen % 2) { + Jim_SetResultString(interp, "missing value to go with key", -1); + return JIM_ERR; + } + else { + + Jim_Dict *dict = JimDictNew(interp, 0, listlen); + int i; + + + dict->table = objPtr->internalRep.listValue.ele; + dict->maxLen = objPtr->internalRep.listValue.maxLen; + + + for (i = 0; i < listlen; i += 2) { + int tvoffset = JimDictAdd(dict, dict->table[i]); + if (tvoffset) { + + + Jim_DecrRefCount(interp, dict->table[tvoffset]); + + dict->table[tvoffset] = dict->table[i + 1]; + + Jim_DecrRefCount(interp, dict->table[i]); + } + else { + if (dict->len != i) { + dict->table[dict->len++] = dict->table[i]; + dict->table[dict->len++] = dict->table[i + 1]; + } + else { + dict->len += 2; + } + } + } + + objPtr->typePtr = &dictObjType; + objPtr->internalRep.dictValue = dict; + + return JIM_OK; + } +} + + + +static int DictAddElement(Jim_Interp *interp, Jim_Obj *objPtr, + Jim_Obj *keyObjPtr, Jim_Obj *valueObjPtr) +{ + Jim_Dict *dict = objPtr->internalRep.dictValue; + if (valueObjPtr == NULL) { + + int tvoffset = JimDictHashFind(dict, keyObjPtr, DICT_HASH_REMOVE); + if (tvoffset) { + + Jim_DecrRefCount(interp, dict->table[tvoffset - 1]); + Jim_DecrRefCount(interp, dict->table[tvoffset]); + dict->len -= 2; + if (tvoffset != dict->len + 1) { + + dict->table[tvoffset - 1] = dict->table[dict->len]; + dict->table[tvoffset] = dict->table[dict->len + 1]; + + + JimDictHashFind(dict, dict->table[tvoffset - 1], tvoffset); + } + return JIM_OK; + } + return JIM_ERR; + } + else { + + int tvoffset = JimDictAdd(dict, keyObjPtr); + if (tvoffset) { + + Jim_IncrRefCount(valueObjPtr); + Jim_DecrRefCount(interp, dict->table[tvoffset]); + dict->table[tvoffset] = valueObjPtr; + } + else { + if (dict->maxLen == dict->len) { + + if (dict->maxLen < 4) { + dict->maxLen = 4; + } + else { + dict->maxLen *= 2; + } + dict->table = Jim_Realloc(dict->table, dict->maxLen * sizeof(*dict->table)); + } + Jim_IncrRefCount(keyObjPtr); + Jim_IncrRefCount(valueObjPtr); + + dict->table[dict->len++] = keyObjPtr; + dict->table[dict->len++] = valueObjPtr; + + } + return JIM_OK; + } +} + +int Jim_DictAddElement(Jim_Interp *interp, Jim_Obj *objPtr, + Jim_Obj *keyObjPtr, Jim_Obj *valueObjPtr) +{ + JimPanic((Jim_IsShared(objPtr), "Jim_DictAddElement called with shared object")); + if (SetDictFromAny(interp, objPtr) != JIM_OK) { + return JIM_ERR; + } + Jim_InvalidateStringRep(objPtr); + return DictAddElement(interp, objPtr, keyObjPtr, valueObjPtr); +} + +Jim_Obj *Jim_NewDictObj(Jim_Interp *interp, Jim_Obj *const *elements, int len) +{ + Jim_Obj *objPtr; + int i; + + JimPanic((len % 2, "Jim_NewDictObj() 'len' argument must be even")); + + objPtr = Jim_NewObj(interp); + objPtr->typePtr = &dictObjType; + objPtr->bytes = NULL; + + objPtr->internalRep.dictValue = JimDictNew(interp, len, len); + for (i = 0; i < len; i += 2) + DictAddElement(interp, objPtr, elements[i], elements[i + 1]); + return objPtr; +} + +int Jim_DictKey(Jim_Interp *interp, Jim_Obj *dictPtr, Jim_Obj *keyPtr, + Jim_Obj **objPtrPtr, int flags) +{ + int tvoffset; + Jim_Dict *dict; + + if (SetDictFromAny(interp, dictPtr) != JIM_OK) { + return -1; + } + dict = dictPtr->internalRep.dictValue; + tvoffset = JimDictHashFind(dict, keyPtr, DICT_HASH_FIND); + if (tvoffset == 0) { + if (flags & JIM_ERRMSG) { + Jim_SetResultFormatted(interp, "key \"%#s\" not known in dictionary", keyPtr); + } + return JIM_ERR; + } + *objPtrPtr = dict->table[tvoffset]; + return JIM_OK; +} + +Jim_Obj **Jim_DictPairs(Jim_Interp *interp, Jim_Obj *dictPtr, int *len) +{ + + if (Jim_IsList(dictPtr)) { + Jim_Obj **table; + JimListGetElements(interp, dictPtr, len, &table); + if (*len % 2 == 0) { + return table; + } + + } + if (SetDictFromAny(interp, dictPtr) != JIM_OK) { + + *len = 1; + return NULL; + } + *len = dictPtr->internalRep.dictValue->len; + return dictPtr->internalRep.dictValue->table; +} + + +int Jim_DictKeysVector(Jim_Interp *interp, Jim_Obj *dictPtr, + Jim_Obj *const *keyv, int keyc, Jim_Obj **objPtrPtr, int flags) +{ + int i; + + if (keyc == 0) { + *objPtrPtr = dictPtr; + return JIM_OK; + } + + for (i = 0; i < keyc; i++) { + Jim_Obj *objPtr; + + int rc = Jim_DictKey(interp, dictPtr, keyv[i], &objPtr, flags); + if (rc != JIM_OK) { + return rc; + } + dictPtr = objPtr; + } + *objPtrPtr = dictPtr; + return JIM_OK; +} + +int Jim_SetDictKeysVector(Jim_Interp *interp, Jim_Obj *varNamePtr, + Jim_Obj *const *keyv, int keyc, Jim_Obj *newObjPtr, int flags) +{ + Jim_Obj *varObjPtr, *objPtr, *dictObjPtr; + int shared, i; + + varObjPtr = objPtr = Jim_GetVariable(interp, varNamePtr, flags); + if (objPtr == NULL) { + if (newObjPtr == NULL && (flags & JIM_MUSTEXIST)) { + + return JIM_ERR; + } + varObjPtr = objPtr = Jim_NewDictObj(interp, NULL, 0); + if (Jim_SetVariable(interp, varNamePtr, objPtr) != JIM_OK) { + Jim_FreeNewObj(interp, varObjPtr); + return JIM_ERR; + } + } + if ((shared = Jim_IsShared(objPtr))) + varObjPtr = objPtr = Jim_DuplicateObj(interp, objPtr); + for (i = 0; i < keyc; i++) { + dictObjPtr = objPtr; + + + if (SetDictFromAny(interp, dictObjPtr) != JIM_OK) { + goto err; + } + + if (i == keyc - 1) { + + if (Jim_DictAddElement(interp, objPtr, keyv[keyc - 1], newObjPtr) != JIM_OK) { + if (newObjPtr || (flags & JIM_MUSTEXIST)) { + goto err; + } + } + break; + } + + + Jim_InvalidateStringRep(dictObjPtr); + if (Jim_DictKey(interp, dictObjPtr, keyv[i], &objPtr, + newObjPtr ? JIM_NONE : JIM_ERRMSG) == JIM_OK) { + if (Jim_IsShared(objPtr)) { + objPtr = Jim_DuplicateObj(interp, objPtr); + DictAddElement(interp, dictObjPtr, keyv[i], objPtr); + } + } + else { + if (newObjPtr == NULL) { + goto err; + } + objPtr = Jim_NewDictObj(interp, NULL, 0); + DictAddElement(interp, dictObjPtr, keyv[i], objPtr); + } + } + + Jim_InvalidateStringRep(objPtr); + Jim_InvalidateStringRep(varObjPtr); + if (Jim_SetVariable(interp, varNamePtr, varObjPtr) != JIM_OK) { + goto err; + } + + if (!(flags & JIM_NORESULT)) { + Jim_SetResult(interp, varObjPtr); + } + return JIM_OK; + err: + if (shared) { + Jim_FreeNewObj(interp, varObjPtr); + } + return JIM_ERR; +} + +static void UpdateStringOfIndex(struct Jim_Obj *objPtr); +static int SetIndexFromAny(Jim_Interp *interp, struct Jim_Obj *objPtr); + +static const Jim_ObjType indexObjType = { + "index", + NULL, + NULL, + UpdateStringOfIndex, + JIM_TYPE_NONE, +}; + +static void UpdateStringOfIndex(struct Jim_Obj *objPtr) +{ + if (objPtr->internalRep.intValue == -1) { + JimSetStringBytes(objPtr, "end"); + } + else { + char buf[JIM_INTEGER_SPACE + 1]; + if (objPtr->internalRep.intValue >= 0 || objPtr->internalRep.intValue == -INT_MAX) { + sprintf(buf, "%d", objPtr->internalRep.intValue); + } + else { + + sprintf(buf, "end%d", objPtr->internalRep.intValue + 1); + } + JimSetStringBytes(objPtr, buf); + } +} + +static int SetIndexFromAny(Jim_Interp *interp, Jim_Obj *objPtr) +{ + jim_wide idx; + int end = 0; + const char *str; + Jim_Obj *exprObj = objPtr; + + JimPanic((objPtr->refCount == 0, "SetIndexFromAny() called with zero refcount object")); + + + str = Jim_String(objPtr); + + + if (strncmp(str, "end", 3) == 0) { + end = 1; + str += 3; + idx = 0; + switch (*str) { + case '\0': + exprObj = NULL; + break; + + case '-': + case '+': + exprObj = Jim_NewStringObj(interp, str, -1); + break; + + default: + goto badindex; + } + } + if (exprObj) { + int ret; + Jim_IncrRefCount(exprObj); + ret = Jim_GetWideExpr(interp, exprObj, &idx); + Jim_DecrRefCount(interp, exprObj); + if (ret != JIM_OK) { + goto badindex; + } + } + + if (end) { + if (idx > 0) { + idx = INT_MAX; + } + else { + + idx--; + } + } + else if (idx < 0) { + idx = -INT_MAX; + } + + + Jim_FreeIntRep(interp, objPtr); + objPtr->typePtr = &indexObjType; + objPtr->internalRep.intValue = idx; + return JIM_OK; + + badindex: + Jim_SetResultFormatted(interp, + "bad index \"%#s\": must be intexpr or end?[+-]intexpr?", objPtr); + return JIM_ERR; +} + +int Jim_GetIndex(Jim_Interp *interp, Jim_Obj *objPtr, int *indexPtr) +{ + + if (objPtr->typePtr == &intObjType) { + jim_wide val = JimWideValue(objPtr); + + if (val < 0) + *indexPtr = -INT_MAX; + else if (val > INT_MAX) + *indexPtr = INT_MAX; + else + *indexPtr = (int)val; + return JIM_OK; + } + if (objPtr->typePtr != &indexObjType && SetIndexFromAny(interp, objPtr) == JIM_ERR) + return JIM_ERR; + *indexPtr = objPtr->internalRep.intValue; + return JIM_OK; +} + + + +static const char * const jimReturnCodes[] = { + "ok", + "error", + "return", + "break", + "continue", + "signal", + "exit", + "eval", + NULL +}; + +#define jimReturnCodesSize (sizeof(jimReturnCodes)/sizeof(*jimReturnCodes) - 1) + +static const Jim_ObjType returnCodeObjType = { + "return-code", + NULL, + NULL, + NULL, + JIM_TYPE_NONE, +}; + +const char *Jim_ReturnCode(int code) +{ + if (code < 0 || code >= (int)jimReturnCodesSize) { + return "?"; + } + else { + return jimReturnCodes[code]; + } +} + +static int SetReturnCodeFromAny(Jim_Interp *interp, Jim_Obj *objPtr) +{ + int returnCode; + jim_wide wideValue; + + + if (JimGetWideNoErr(interp, objPtr, &wideValue) != JIM_ERR) + returnCode = (int)wideValue; + else if (Jim_GetEnum(interp, objPtr, jimReturnCodes, &returnCode, NULL, JIM_NONE) != JIM_OK) { + Jim_SetResultFormatted(interp, "expected return code but got \"%#s\"", objPtr); + return JIM_ERR; + } + + Jim_FreeIntRep(interp, objPtr); + objPtr->typePtr = &returnCodeObjType; + objPtr->internalRep.intValue = returnCode; + return JIM_OK; +} + +int Jim_GetReturnCode(Jim_Interp *interp, Jim_Obj *objPtr, int *intPtr) +{ + if (objPtr->typePtr != &returnCodeObjType && SetReturnCodeFromAny(interp, objPtr) == JIM_ERR) + return JIM_ERR; + *intPtr = objPtr->internalRep.intValue; + return JIM_OK; +} + +static int JimParseExprOperator(struct JimParserCtx *pc); +static int JimParseExprNumber(struct JimParserCtx *pc); +static int JimParseExprIrrational(struct JimParserCtx *pc); +static int JimParseExprBoolean(struct JimParserCtx *pc); + + +enum +{ + + + + JIM_EXPROP_MUL = JIM_TT_EXPR_OP, + JIM_EXPROP_DIV, + JIM_EXPROP_MOD, + JIM_EXPROP_SUB, + JIM_EXPROP_ADD, + JIM_EXPROP_LSHIFT, + JIM_EXPROP_RSHIFT, + JIM_EXPROP_ROTL, + JIM_EXPROP_ROTR, + JIM_EXPROP_LT, + JIM_EXPROP_GT, + JIM_EXPROP_LTE, + JIM_EXPROP_GTE, + JIM_EXPROP_NUMEQ, + JIM_EXPROP_NUMNE, + JIM_EXPROP_BITAND, + JIM_EXPROP_BITXOR, + JIM_EXPROP_BITOR, + JIM_EXPROP_LOGICAND, + JIM_EXPROP_LOGICOR, + JIM_EXPROP_TERNARY, + JIM_EXPROP_COLON, + JIM_EXPROP_POW, + + + JIM_EXPROP_STREQ, + JIM_EXPROP_STRNE, + JIM_EXPROP_STRIN, + JIM_EXPROP_STRNI, + JIM_EXPROP_STRLT, + JIM_EXPROP_STRGT, + JIM_EXPROP_STRLE, + JIM_EXPROP_STRGE, + + + JIM_EXPROP_NOT, + JIM_EXPROP_BITNOT, + JIM_EXPROP_UNARYMINUS, + JIM_EXPROP_UNARYPLUS, + + + JIM_EXPROP_FUNC_INT, + JIM_EXPROP_FUNC_WIDE, + JIM_EXPROP_FUNC_ABS, + JIM_EXPROP_FUNC_DOUBLE, + JIM_EXPROP_FUNC_ROUND, + JIM_EXPROP_FUNC_RAND, + JIM_EXPROP_FUNC_SRAND, + + + JIM_EXPROP_FUNC_SIN, + JIM_EXPROP_FUNC_COS, + JIM_EXPROP_FUNC_TAN, + JIM_EXPROP_FUNC_ASIN, + JIM_EXPROP_FUNC_ACOS, + JIM_EXPROP_FUNC_ATAN, + JIM_EXPROP_FUNC_ATAN2, + JIM_EXPROP_FUNC_SINH, + JIM_EXPROP_FUNC_COSH, + JIM_EXPROP_FUNC_TANH, + JIM_EXPROP_FUNC_CEIL, + JIM_EXPROP_FUNC_FLOOR, + JIM_EXPROP_FUNC_EXP, + JIM_EXPROP_FUNC_LOG, + JIM_EXPROP_FUNC_LOG10, + JIM_EXPROP_FUNC_SQRT, + JIM_EXPROP_FUNC_POW, + JIM_EXPROP_FUNC_HYPOT, + JIM_EXPROP_FUNC_FMOD, +}; + +struct JimExprNode { + int type; + struct Jim_Obj *objPtr; + + struct JimExprNode *left; + struct JimExprNode *right; + struct JimExprNode *ternary; +}; + + +typedef struct Jim_ExprOperator +{ + const char *name; + int (*funcop) (Jim_Interp *interp, struct JimExprNode *opnode); + unsigned char precedence; + unsigned char arity; + unsigned char attr; + unsigned char namelen; +} Jim_ExprOperator; + +static int JimExprGetTerm(Jim_Interp *interp, struct JimExprNode *node, Jim_Obj **objPtrPtr); +static int JimExprGetTermBoolean(Jim_Interp *interp, struct JimExprNode *node); +static int JimExprEvalTermNode(Jim_Interp *interp, struct JimExprNode *node); + +static int JimExprOpNumUnary(Jim_Interp *interp, struct JimExprNode *node) +{ + int intresult = 1; + int rc, bA = 0; + double dA, dC = 0; + jim_wide wA, wC = 0; + Jim_Obj *A; + + if ((rc = JimExprGetTerm(interp, node->left, &A)) != JIM_OK) { + return rc; + } + + if ((A->typePtr != &doubleObjType || A->bytes) && JimGetWideNoErr(interp, A, &wA) == JIM_OK) { + switch (node->type) { + case JIM_EXPROP_FUNC_INT: + case JIM_EXPROP_FUNC_WIDE: + case JIM_EXPROP_FUNC_ROUND: + case JIM_EXPROP_UNARYPLUS: + wC = wA; + break; + case JIM_EXPROP_FUNC_DOUBLE: + dC = wA; + intresult = 0; + break; + case JIM_EXPROP_FUNC_ABS: + wC = wA >= 0 ? wA : -wA; + break; + case JIM_EXPROP_UNARYMINUS: + wC = -wA; + break; + case JIM_EXPROP_NOT: + wC = !wA; + break; + default: + abort(); + } + } + else if ((rc = Jim_GetDouble(interp, A, &dA)) == JIM_OK) { + switch (node->type) { + case JIM_EXPROP_FUNC_INT: + case JIM_EXPROP_FUNC_WIDE: + wC = dA; + break; + case JIM_EXPROP_FUNC_ROUND: + wC = dA < 0 ? (dA - 0.5) : (dA + 0.5); + break; + case JIM_EXPROP_FUNC_DOUBLE: + case JIM_EXPROP_UNARYPLUS: + dC = dA; + intresult = 0; + break; + case JIM_EXPROP_FUNC_ABS: +#ifdef JIM_MATH_FUNCTIONS + dC = fabs(dA); +#else + dC = dA >= 0 ? dA : -dA; +#endif + intresult = 0; + break; + case JIM_EXPROP_UNARYMINUS: + dC = -dA; + intresult = 0; + break; + case JIM_EXPROP_NOT: + wC = !dA; + break; + default: + abort(); + } + } + else if ((rc = Jim_GetBoolean(interp, A, &bA)) == JIM_OK) { + switch (node->type) { + case JIM_EXPROP_NOT: + wC = !bA; + break; + default: + abort(); + } + } + + if (rc == JIM_OK) { + if (intresult) { + Jim_SetResultInt(interp, wC); + } + else { + Jim_SetResult(interp, Jim_NewDoubleObj(interp, dC)); + } + } + + Jim_DecrRefCount(interp, A); + + return rc; +} + +static double JimRandDouble(Jim_Interp *interp) +{ + unsigned long x; + JimRandomBytes(interp, &x, sizeof(x)); + + return (double)x / (double)~0UL; +} + +static int JimExprOpIntUnary(Jim_Interp *interp, struct JimExprNode *node) +{ + jim_wide wA; + Jim_Obj *A; + int rc; + + if ((rc = JimExprGetTerm(interp, node->left, &A)) != JIM_OK) { + return rc; + } + + rc = Jim_GetWide(interp, A, &wA); + if (rc == JIM_OK) { + switch (node->type) { + case JIM_EXPROP_BITNOT: + Jim_SetResultInt(interp, ~wA); + break; + case JIM_EXPROP_FUNC_SRAND: + JimPrngSeed(interp, (unsigned char *)&wA, sizeof(wA)); + Jim_SetResult(interp, Jim_NewDoubleObj(interp, JimRandDouble(interp))); + break; + default: + abort(); + } + } + + Jim_DecrRefCount(interp, A); + + return rc; +} + +static int JimExprOpNone(Jim_Interp *interp, struct JimExprNode *node) +{ + JimPanic((node->type != JIM_EXPROP_FUNC_RAND, "JimExprOpNone only support rand()")); + + Jim_SetResult(interp, Jim_NewDoubleObj(interp, JimRandDouble(interp))); + + return JIM_OK; +} + +#ifdef JIM_MATH_FUNCTIONS +static int JimExprOpDoubleUnary(Jim_Interp *interp, struct JimExprNode *node) +{ + int rc; + double dA, dC; + Jim_Obj *A; + + if ((rc = JimExprGetTerm(interp, node->left, &A)) != JIM_OK) { + return rc; + } + + rc = Jim_GetDouble(interp, A, &dA); + if (rc == JIM_OK) { + switch (node->type) { + case JIM_EXPROP_FUNC_SIN: + dC = sin(dA); + break; + case JIM_EXPROP_FUNC_COS: + dC = cos(dA); + break; + case JIM_EXPROP_FUNC_TAN: + dC = tan(dA); + break; + case JIM_EXPROP_FUNC_ASIN: + dC = asin(dA); + break; + case JIM_EXPROP_FUNC_ACOS: + dC = acos(dA); + break; + case JIM_EXPROP_FUNC_ATAN: + dC = atan(dA); + break; + case JIM_EXPROP_FUNC_SINH: + dC = sinh(dA); + break; + case JIM_EXPROP_FUNC_COSH: + dC = cosh(dA); + break; + case JIM_EXPROP_FUNC_TANH: + dC = tanh(dA); + break; + case JIM_EXPROP_FUNC_CEIL: + dC = ceil(dA); + break; + case JIM_EXPROP_FUNC_FLOOR: + dC = floor(dA); + break; + case JIM_EXPROP_FUNC_EXP: + dC = exp(dA); + break; + case JIM_EXPROP_FUNC_LOG: + dC = log(dA); + break; + case JIM_EXPROP_FUNC_LOG10: + dC = log10(dA); + break; + case JIM_EXPROP_FUNC_SQRT: + dC = sqrt(dA); + break; + default: + abort(); + } + Jim_SetResult(interp, Jim_NewDoubleObj(interp, dC)); + } + + Jim_DecrRefCount(interp, A); + + return rc; +} +#endif + + +static int JimExprOpIntBin(Jim_Interp *interp, struct JimExprNode *node) +{ + jim_wide wA, wB; + int rc; + Jim_Obj *A, *B; + + if ((rc = JimExprGetTerm(interp, node->left, &A)) != JIM_OK) { + return rc; + } + if ((rc = JimExprGetTerm(interp, node->right, &B)) != JIM_OK) { + Jim_DecrRefCount(interp, A); + return rc; + } + + rc = JIM_ERR; + + if (Jim_GetWide(interp, A, &wA) == JIM_OK && Jim_GetWide(interp, B, &wB) == JIM_OK) { + jim_wide wC; + + rc = JIM_OK; + + switch (node->type) { + case JIM_EXPROP_LSHIFT: + wC = wA << wB; + break; + case JIM_EXPROP_RSHIFT: + wC = wA >> wB; + break; + case JIM_EXPROP_BITAND: + wC = wA & wB; + break; + case JIM_EXPROP_BITXOR: + wC = wA ^ wB; + break; + case JIM_EXPROP_BITOR: + wC = wA | wB; + break; + case JIM_EXPROP_MOD: + if (wB == 0) { + wC = 0; + Jim_SetResultString(interp, "Division by zero", -1); + rc = JIM_ERR; + } + else { + int negative = 0; + + if (wB < 0) { + wB = -wB; + wA = -wA; + negative = 1; + } + wC = wA % wB; + if (wC < 0) { + wC += wB; + } + if (negative) { + wC = -wC; + } + } + break; + case JIM_EXPROP_ROTL: + case JIM_EXPROP_ROTR:{ + + unsigned long uA = (unsigned long)wA; + unsigned long uB = (unsigned long)wB; + const unsigned int S = sizeof(unsigned long) * 8; + + + uB %= S; + + if (node->type == JIM_EXPROP_ROTR) { + uB = S - uB; + } + wC = (unsigned long)(uA << uB) | (uA >> (S - uB)); + break; + } + default: + abort(); + } + Jim_SetResultInt(interp, wC); + } + + Jim_DecrRefCount(interp, A); + Jim_DecrRefCount(interp, B); + + return rc; +} + + + +static int JimExprOpBin(Jim_Interp *interp, struct JimExprNode *node) +{ + int rc = JIM_OK; + double dA, dB, dC = 0; + jim_wide wA, wB, wC = 0; + Jim_Obj *A, *B; + + if ((rc = JimExprGetTerm(interp, node->left, &A)) != JIM_OK) { + return rc; + } + if ((rc = JimExprGetTerm(interp, node->right, &B)) != JIM_OK) { + Jim_DecrRefCount(interp, A); + return rc; + } + + if ((A->typePtr != &doubleObjType || A->bytes) && + (B->typePtr != &doubleObjType || B->bytes) && + JimGetWideNoErr(interp, A, &wA) == JIM_OK && JimGetWideNoErr(interp, B, &wB) == JIM_OK) { + + + + switch (node->type) { + case JIM_EXPROP_POW: + case JIM_EXPROP_FUNC_POW: + if (wA == 0 && wB < 0) { + Jim_SetResultString(interp, "exponentiation of zero by negative power", -1); + rc = JIM_ERR; + goto done; + } + wC = JimPowWide(wA, wB); + goto intresult; + case JIM_EXPROP_ADD: + wC = wA + wB; + goto intresult; + case JIM_EXPROP_SUB: + wC = wA - wB; + goto intresult; + case JIM_EXPROP_MUL: + wC = wA * wB; + goto intresult; + case JIM_EXPROP_DIV: + if (wB == 0) { + Jim_SetResultString(interp, "Division by zero", -1); + rc = JIM_ERR; + goto done; + } + else { + if (wB < 0) { + wB = -wB; + wA = -wA; + } + wC = wA / wB; + if (wA % wB < 0) { + wC--; + } + goto intresult; + } + case JIM_EXPROP_LT: + wC = wA < wB; + goto intresult; + case JIM_EXPROP_GT: + wC = wA > wB; + goto intresult; + case JIM_EXPROP_LTE: + wC = wA <= wB; + goto intresult; + case JIM_EXPROP_GTE: + wC = wA >= wB; + goto intresult; + case JIM_EXPROP_NUMEQ: + wC = wA == wB; + goto intresult; + case JIM_EXPROP_NUMNE: + wC = wA != wB; + goto intresult; + } + } + if (Jim_GetDouble(interp, A, &dA) == JIM_OK && Jim_GetDouble(interp, B, &dB) == JIM_OK) { + switch (node->type) { +#ifndef JIM_MATH_FUNCTIONS + case JIM_EXPROP_POW: + case JIM_EXPROP_FUNC_POW: + case JIM_EXPROP_FUNC_ATAN2: + case JIM_EXPROP_FUNC_HYPOT: + case JIM_EXPROP_FUNC_FMOD: + Jim_SetResultString(interp, "unsupported", -1); + rc = JIM_ERR; + goto done; +#else + case JIM_EXPROP_POW: + case JIM_EXPROP_FUNC_POW: + dC = pow(dA, dB); + goto doubleresult; + case JIM_EXPROP_FUNC_ATAN2: + dC = atan2(dA, dB); + goto doubleresult; + case JIM_EXPROP_FUNC_HYPOT: + dC = hypot(dA, dB); + goto doubleresult; + case JIM_EXPROP_FUNC_FMOD: + dC = fmod(dA, dB); + goto doubleresult; +#endif + case JIM_EXPROP_ADD: + dC = dA + dB; + goto doubleresult; + case JIM_EXPROP_SUB: + dC = dA - dB; + goto doubleresult; + case JIM_EXPROP_MUL: + dC = dA * dB; + goto doubleresult; + case JIM_EXPROP_DIV: + if (dB == 0) { +#ifdef INFINITY + dC = dA < 0 ? -INFINITY : INFINITY; +#else + dC = (dA < 0 ? -1.0 : 1.0) * strtod("Inf", NULL); +#endif + } + else { + dC = dA / dB; + } + goto doubleresult; + case JIM_EXPROP_LT: + wC = dA < dB; + goto intresult; + case JIM_EXPROP_GT: + wC = dA > dB; + goto intresult; + case JIM_EXPROP_LTE: + wC = dA <= dB; + goto intresult; + case JIM_EXPROP_GTE: + wC = dA >= dB; + goto intresult; + case JIM_EXPROP_NUMEQ: + wC = dA == dB; + goto intresult; + case JIM_EXPROP_NUMNE: + wC = dA != dB; + goto intresult; + } + } + else { + + + + int i = Jim_StringCompareObj(interp, A, B, 0); + + switch (node->type) { + case JIM_EXPROP_LT: + wC = i < 0; + goto intresult; + case JIM_EXPROP_GT: + wC = i > 0; + goto intresult; + case JIM_EXPROP_LTE: + wC = i <= 0; + goto intresult; + case JIM_EXPROP_GTE: + wC = i >= 0; + goto intresult; + case JIM_EXPROP_NUMEQ: + wC = i == 0; + goto intresult; + case JIM_EXPROP_NUMNE: + wC = i != 0; + goto intresult; + } + } + + rc = JIM_ERR; +done: + Jim_DecrRefCount(interp, A); + Jim_DecrRefCount(interp, B); + return rc; +intresult: + Jim_SetResultInt(interp, wC); + goto done; +doubleresult: + Jim_SetResult(interp, Jim_NewDoubleObj(interp, dC)); + goto done; +} + +static int JimSearchList(Jim_Interp *interp, Jim_Obj *listObjPtr, Jim_Obj *valObj) +{ + int listlen; + int i; + + listlen = Jim_ListLength(interp, listObjPtr); + for (i = 0; i < listlen; i++) { + if (Jim_StringEqObj(Jim_ListGetIndex(interp, listObjPtr, i), valObj)) { + return 1; + } + } + return 0; +} + + + +static int JimExprOpStrBin(Jim_Interp *interp, struct JimExprNode *node) +{ + Jim_Obj *A, *B; + jim_wide wC; + int comp, rc; + + if ((rc = JimExprGetTerm(interp, node->left, &A)) != JIM_OK) { + return rc; + } + if ((rc = JimExprGetTerm(interp, node->right, &B)) != JIM_OK) { + Jim_DecrRefCount(interp, A); + return rc; + } + + switch (node->type) { + case JIM_EXPROP_STREQ: + case JIM_EXPROP_STRNE: + wC = Jim_StringEqObj(A, B); + if (node->type == JIM_EXPROP_STRNE) { + wC = !wC; + } + break; + case JIM_EXPROP_STRLT: + case JIM_EXPROP_STRGT: + case JIM_EXPROP_STRLE: + case JIM_EXPROP_STRGE: + comp = Jim_StringCompareObj(interp, A, B, 0); + if (node->type == JIM_EXPROP_STRLT) { + wC = comp == -1; + } else if (node->type == JIM_EXPROP_STRGT) { + wC = comp == 1; + } else if (node->type == JIM_EXPROP_STRLE) { + wC = comp == -1 || comp == 0; + } else { + wC = comp == 0 || comp == 1; + } + break; + case JIM_EXPROP_STRIN: + wC = JimSearchList(interp, B, A); + break; + case JIM_EXPROP_STRNI: + wC = !JimSearchList(interp, B, A); + break; + default: + abort(); + } + Jim_SetResultInt(interp, wC); + + Jim_DecrRefCount(interp, A); + Jim_DecrRefCount(interp, B); + + return rc; +} + +static int ExprBool(Jim_Interp *interp, Jim_Obj *obj) +{ + long l; + double d; + int b; + int ret = -1; + + + Jim_IncrRefCount(obj); + + if (Jim_GetLong(interp, obj, &l) == JIM_OK) { + ret = (l != 0); + } + else if (Jim_GetDouble(interp, obj, &d) == JIM_OK) { + ret = (d != 0); + } + else if (Jim_GetBoolean(interp, obj, &b) == JIM_OK) { + ret = (b != 0); + } + + Jim_DecrRefCount(interp, obj); + return ret; +} + +static int JimExprOpAnd(Jim_Interp *interp, struct JimExprNode *node) +{ + + int result = JimExprGetTermBoolean(interp, node->left); + + if (result == 1) { + + result = JimExprGetTermBoolean(interp, node->right); + } + if (result == -1) { + return JIM_ERR; + } + Jim_SetResultInt(interp, result); + return JIM_OK; +} + +static int JimExprOpOr(Jim_Interp *interp, struct JimExprNode *node) +{ + + int result = JimExprGetTermBoolean(interp, node->left); + + if (result == 0) { + + result = JimExprGetTermBoolean(interp, node->right); + } + if (result == -1) { + return JIM_ERR; + } + Jim_SetResultInt(interp, result); + return JIM_OK; +} + +static int JimExprOpTernary(Jim_Interp *interp, struct JimExprNode *node) +{ + + int result = JimExprGetTermBoolean(interp, node->left); + + if (result == 1) { + + return JimExprEvalTermNode(interp, node->right); + } + else if (result == 0) { + + return JimExprEvalTermNode(interp, node->ternary); + } + + return JIM_ERR; +} + +enum +{ + OP_FUNC = 0x0001, + OP_RIGHT_ASSOC = 0x0002, +}; + +#define OPRINIT_ATTR(N, P, ARITY, F, ATTR) {N, F, P, ARITY, ATTR, sizeof(N) - 1} +#define OPRINIT(N, P, ARITY, F) OPRINIT_ATTR(N, P, ARITY, F, 0) + +static const struct Jim_ExprOperator Jim_ExprOperators[] = { + OPRINIT("*", 110, 2, JimExprOpBin), + OPRINIT("/", 110, 2, JimExprOpBin), + OPRINIT("%", 110, 2, JimExprOpIntBin), + + OPRINIT("-", 100, 2, JimExprOpBin), + OPRINIT("+", 100, 2, JimExprOpBin), + + OPRINIT("<<", 90, 2, JimExprOpIntBin), + OPRINIT(">>", 90, 2, JimExprOpIntBin), + + OPRINIT("<<<", 90, 2, JimExprOpIntBin), + OPRINIT(">>>", 90, 2, JimExprOpIntBin), + + OPRINIT("<", 80, 2, JimExprOpBin), + OPRINIT(">", 80, 2, JimExprOpBin), + OPRINIT("<=", 80, 2, JimExprOpBin), + OPRINIT(">=", 80, 2, JimExprOpBin), + + OPRINIT("==", 70, 2, JimExprOpBin), + OPRINIT("!=", 70, 2, JimExprOpBin), + + OPRINIT("&", 50, 2, JimExprOpIntBin), + OPRINIT("^", 49, 2, JimExprOpIntBin), + OPRINIT("|", 48, 2, JimExprOpIntBin), + + OPRINIT("&&", 10, 2, JimExprOpAnd), + OPRINIT("||", 9, 2, JimExprOpOr), + OPRINIT_ATTR("?", 5, 3, JimExprOpTernary, OP_RIGHT_ASSOC), + OPRINIT_ATTR(":", 5, 3, NULL, OP_RIGHT_ASSOC), + + + OPRINIT_ATTR("**", 120, 2, JimExprOpBin, OP_RIGHT_ASSOC), + + OPRINIT("eq", 60, 2, JimExprOpStrBin), + OPRINIT("ne", 60, 2, JimExprOpStrBin), + + OPRINIT("in", 55, 2, JimExprOpStrBin), + OPRINIT("ni", 55, 2, JimExprOpStrBin), + + OPRINIT("lt", 75, 2, JimExprOpStrBin), + OPRINIT("gt", 75, 2, JimExprOpStrBin), + OPRINIT("le", 75, 2, JimExprOpStrBin), + OPRINIT("ge", 75, 2, JimExprOpStrBin), + + OPRINIT_ATTR("!", 150, 1, JimExprOpNumUnary, OP_RIGHT_ASSOC), + OPRINIT_ATTR("~", 150, 1, JimExprOpIntUnary, OP_RIGHT_ASSOC), + OPRINIT_ATTR(" -", 150, 1, JimExprOpNumUnary, OP_RIGHT_ASSOC), + OPRINIT_ATTR(" +", 150, 1, JimExprOpNumUnary, OP_RIGHT_ASSOC), + + + + OPRINIT_ATTR("int", 200, 1, JimExprOpNumUnary, OP_FUNC), + OPRINIT_ATTR("wide", 200, 1, JimExprOpNumUnary, OP_FUNC), + OPRINIT_ATTR("abs", 200, 1, JimExprOpNumUnary, OP_FUNC), + OPRINIT_ATTR("double", 200, 1, JimExprOpNumUnary, OP_FUNC), + OPRINIT_ATTR("round", 200, 1, JimExprOpNumUnary, OP_FUNC), + OPRINIT_ATTR("rand", 200, 0, JimExprOpNone, OP_FUNC), + OPRINIT_ATTR("srand", 200, 1, JimExprOpIntUnary, OP_FUNC), + +#ifdef JIM_MATH_FUNCTIONS + OPRINIT_ATTR("sin", 200, 1, JimExprOpDoubleUnary, OP_FUNC), + OPRINIT_ATTR("cos", 200, 1, JimExprOpDoubleUnary, OP_FUNC), + OPRINIT_ATTR("tan", 200, 1, JimExprOpDoubleUnary, OP_FUNC), + OPRINIT_ATTR("asin", 200, 1, JimExprOpDoubleUnary, OP_FUNC), + OPRINIT_ATTR("acos", 200, 1, JimExprOpDoubleUnary, OP_FUNC), + OPRINIT_ATTR("atan", 200, 1, JimExprOpDoubleUnary, OP_FUNC), + OPRINIT_ATTR("atan2", 200, 2, JimExprOpBin, OP_FUNC), + OPRINIT_ATTR("sinh", 200, 1, JimExprOpDoubleUnary, OP_FUNC), + OPRINIT_ATTR("cosh", 200, 1, JimExprOpDoubleUnary, OP_FUNC), + OPRINIT_ATTR("tanh", 200, 1, JimExprOpDoubleUnary, OP_FUNC), + OPRINIT_ATTR("ceil", 200, 1, JimExprOpDoubleUnary, OP_FUNC), + OPRINIT_ATTR("floor", 200, 1, JimExprOpDoubleUnary, OP_FUNC), + OPRINIT_ATTR("exp", 200, 1, JimExprOpDoubleUnary, OP_FUNC), + OPRINIT_ATTR("log", 200, 1, JimExprOpDoubleUnary, OP_FUNC), + OPRINIT_ATTR("log10", 200, 1, JimExprOpDoubleUnary, OP_FUNC), + OPRINIT_ATTR("sqrt", 200, 1, JimExprOpDoubleUnary, OP_FUNC), + OPRINIT_ATTR("pow", 200, 2, JimExprOpBin, OP_FUNC), + OPRINIT_ATTR("hypot", 200, 2, JimExprOpBin, OP_FUNC), + OPRINIT_ATTR("fmod", 200, 2, JimExprOpBin, OP_FUNC), +#endif +}; +#undef OPRINIT +#undef OPRINIT_ATTR + +#define JIM_EXPR_OPERATORS_NUM \ + (sizeof(Jim_ExprOperators)/sizeof(struct Jim_ExprOperator)) + +static int JimParseExpression(struct JimParserCtx *pc) +{ + pc->errmsg = NULL; + + while (1) { + + while (isspace(UCHAR(*pc->p)) || (*(pc->p) == '\\' && *(pc->p + 1) == '\n')) { + if (*pc->p == '\n') { + pc->linenr++; + } + pc->p++; + pc->len--; + } + + if (*pc->p == '#') { + JimParseComment(pc); + + continue; + } + break; + } + + + pc->tline = pc->linenr; + pc->tstart = pc->p; + + if (pc->len == 0) { + pc->tend = pc->p; + pc->tt = JIM_TT_EOL; + pc->eof = 1; + return JIM_OK; + } + switch (*(pc->p)) { + case '(': + pc->tt = JIM_TT_SUBEXPR_START; + goto singlechar; + case ')': + pc->tt = JIM_TT_SUBEXPR_END; + goto singlechar; + case ',': + pc->tt = JIM_TT_SUBEXPR_COMMA; +singlechar: + pc->tend = pc->p; + pc->p++; + pc->len--; + break; + case '[': + return JimParseCmd(pc); + case '$': + if (JimParseVar(pc) == JIM_ERR) + return JimParseExprOperator(pc); + else { + + if (pc->tt == JIM_TT_EXPRSUGAR) { + pc->errmsg = "nesting expr in expr is not allowed"; + return JIM_ERR; + } + return JIM_OK; + } + break; + case '0': + case '1': + case '2': + case '3': + case '4': + case '5': + case '6': + case '7': + case '8': + case '9': + case '.': + return JimParseExprNumber(pc); + case '"': + return JimParseQuote(pc); + case '{': + return JimParseBrace(pc); + + case 'N': + case 'I': + case 'n': + case 'i': + if (JimParseExprIrrational(pc) == JIM_ERR) + if (JimParseExprBoolean(pc) == JIM_ERR) + return JimParseExprOperator(pc); + break; + case 't': + case 'f': + case 'o': + case 'y': + if (JimParseExprBoolean(pc) == JIM_ERR) + return JimParseExprOperator(pc); + break; + default: + return JimParseExprOperator(pc); + break; + } + return JIM_OK; +} + +static int JimParseExprNumber(struct JimParserCtx *pc) +{ + char *end; + + + pc->tt = JIM_TT_EXPR_INT; + + jim_strtoull(pc->p, (char **)&pc->p); + + if (strchr("eENnIi.", *pc->p) || pc->p == pc->tstart) { + if (strtod(pc->tstart, &end)) { } + if (end == pc->tstart) + return JIM_ERR; + if (end > pc->p) { + + pc->tt = JIM_TT_EXPR_DOUBLE; + pc->p = end; + } + } + pc->tend = pc->p - 1; + pc->len -= (pc->p - pc->tstart); + return JIM_OK; +} + +static int JimParseExprIrrational(struct JimParserCtx *pc) +{ + const char *irrationals[] = { "NaN", "nan", "NAN", "Inf", "inf", "INF", NULL }; + int i; + + for (i = 0; irrationals[i]; i++) { + const char *irr = irrationals[i]; + + if (strncmp(irr, pc->p, 3) == 0) { + pc->p += 3; + pc->len -= 3; + pc->tend = pc->p - 1; + pc->tt = JIM_TT_EXPR_DOUBLE; + return JIM_OK; + } + } + return JIM_ERR; +} + +static int JimParseExprBoolean(struct JimParserCtx *pc) +{ + int i; + for (i = 0; i < sizeof(jim_true_false_strings) / sizeof(*jim_true_false_strings); i++) { + if (strncmp(pc->p, jim_true_false_strings[i], jim_true_false_lens[i]) == 0) { + pc->p += jim_true_false_lens[i]; + pc->len -= jim_true_false_lens[i]; + pc->tend = pc->p - 1; + pc->tt = JIM_TT_EXPR_BOOLEAN; + return JIM_OK; + } + } + return JIM_ERR; +} + +static const struct Jim_ExprOperator *JimExprOperatorInfoByOpcode(int opcode) +{ + static Jim_ExprOperator dummy_op; + if (opcode < JIM_TT_EXPR_OP) { + return &dummy_op; + } + return &Jim_ExprOperators[opcode - JIM_TT_EXPR_OP]; +} + +static int JimParseExprOperator(struct JimParserCtx *pc) +{ + int i; + const struct Jim_ExprOperator *bestOp = NULL; + int bestLen = 0; + + + for (i = 0; i < (signed)JIM_EXPR_OPERATORS_NUM; i++) { + const struct Jim_ExprOperator *op = &Jim_ExprOperators[i]; + + if (op->name[0] != pc->p[0]) { + continue; + } + + if (op->namelen > bestLen && strncmp(op->name, pc->p, op->namelen) == 0) { + bestOp = op; + bestLen = op->namelen; + } + } + if (bestOp == NULL) { + return JIM_ERR; + } + + + if (bestOp->attr & OP_FUNC) { + const char *p = pc->p + bestLen; + int len = pc->len - bestLen; + + while (len && isspace(UCHAR(*p))) { + len--; + p++; + } + if (*p != '(') { + pc->errmsg = "function requires parentheses"; + return JIM_ERR; + } + } + pc->tend = pc->p + bestLen - 1; + pc->p += bestLen; + pc->len -= bestLen; + + pc->tt = (bestOp - Jim_ExprOperators) + JIM_TT_EXPR_OP; + return JIM_OK; +} + + +static void FreeExprInternalRep(Jim_Interp *interp, Jim_Obj *objPtr); +static void DupExprInternalRep(Jim_Interp *interp, Jim_Obj *srcPtr, Jim_Obj *dupPtr); +static int SetExprFromAny(Jim_Interp *interp, struct Jim_Obj *objPtr); + +static const Jim_ObjType exprObjType = { + "expression", + FreeExprInternalRep, + DupExprInternalRep, + NULL, + JIM_TYPE_NONE, +}; + + +struct ExprTree +{ + struct JimExprNode *expr; + struct JimExprNode *nodes; + int len; + int inUse; +}; + +static void ExprTreeFreeNodes(Jim_Interp *interp, struct JimExprNode *nodes, int num) +{ + int i; + for (i = 0; i < num; i++) { + if (nodes[i].objPtr) { + Jim_DecrRefCount(interp, nodes[i].objPtr); + } + } + Jim_Free(nodes); +} + +static void ExprTreeFree(Jim_Interp *interp, struct ExprTree *expr) +{ + ExprTreeFreeNodes(interp, expr->nodes, expr->len); + Jim_Free(expr); +} + +static void FreeExprInternalRep(Jim_Interp *interp, Jim_Obj *objPtr) +{ + struct ExprTree *expr = (void *)objPtr->internalRep.ptr; + + if (expr) { + if (--expr->inUse != 0) { + return; + } + + ExprTreeFree(interp, expr); + } +} + +static void DupExprInternalRep(Jim_Interp *interp, Jim_Obj *srcPtr, Jim_Obj *dupPtr) +{ + JIM_NOTUSED(interp); + JIM_NOTUSED(srcPtr); + + + dupPtr->typePtr = NULL; +} + +struct ExprBuilder { + int parencount; + int level; + ParseToken *token; + ParseToken *first_token; + Jim_Stack stack; + Jim_Obj *exprObjPtr; + Jim_Obj *fileNameObj; + struct JimExprNode *nodes; + struct JimExprNode *next; +}; + +#ifdef DEBUG_SHOW_EXPR +static void JimShowExprNode(struct JimExprNode *node, int level) +{ + int i; + for (i = 0; i < level; i++) { + printf(" "); + } + if (TOKEN_IS_EXPR_OP(node->type)) { + printf("%s\n", jim_tt_name(node->type)); + if (node->left) { + JimShowExprNode(node->left, level + 1); + } + if (node->right) { + JimShowExprNode(node->right, level + 1); + } + if (node->ternary) { + JimShowExprNode(node->ternary, level + 1); + } + } + else { + printf("[%s] %s\n", jim_tt_name(node->type), Jim_String(node->objPtr)); + } +} +#endif + +#define EXPR_UNTIL_CLOSE 0x0001 +#define EXPR_FUNC_ARGS 0x0002 +#define EXPR_TERNARY 0x0004 + +static int ExprTreeBuildTree(Jim_Interp *interp, struct ExprBuilder *builder, int precedence, int flags, int exp_numterms) { + int rc; + struct JimExprNode *node; + + int exp_stacklen = builder->stack.len + exp_numterms; + + if (builder->level++ > 200) { + Jim_SetResultString(interp, "Expression too complex", -1); + return JIM_ERR; + } + + while (builder->token->type != JIM_TT_EOL) { + ParseToken *t = builder->token++; + int prevtt; + + if (t == builder->first_token) { + prevtt = JIM_TT_NONE; + } + else { + prevtt = t[-1].type; + } + + if (t->type == JIM_TT_SUBEXPR_START) { + if (builder->stack.len == exp_stacklen) { + Jim_SetResultFormatted(interp, "unexpected open parenthesis in expression: \"%#s\"", builder->exprObjPtr); + return JIM_ERR; + } + builder->parencount++; + rc = ExprTreeBuildTree(interp, builder, 0, EXPR_UNTIL_CLOSE, 1); + if (rc != JIM_OK) { + return rc; + } + + } + else if (t->type == JIM_TT_SUBEXPR_END) { + if (!(flags & EXPR_UNTIL_CLOSE)) { + if (builder->stack.len == exp_stacklen && builder->level > 1) { + builder->token--; + builder->level--; + return JIM_OK; + } + Jim_SetResultFormatted(interp, "unexpected closing parenthesis in expression: \"%#s\"", builder->exprObjPtr); + return JIM_ERR; + } + builder->parencount--; + if (builder->stack.len == exp_stacklen) { + + break; + } + } + else if (t->type == JIM_TT_SUBEXPR_COMMA) { + if (!(flags & EXPR_FUNC_ARGS)) { + if (builder->stack.len == exp_stacklen) { + + builder->token--; + builder->level--; + return JIM_OK; + } + Jim_SetResultFormatted(interp, "unexpected comma in expression: \"%#s\"", builder->exprObjPtr); + return JIM_ERR; + } + else { + + if (builder->stack.len > exp_stacklen) { + Jim_SetResultFormatted(interp, "too many arguments to math function"); + return JIM_ERR; + } + } + + } + else if (t->type == JIM_EXPROP_COLON) { + if (!(flags & EXPR_TERNARY)) { + if (builder->level != 1) { + + builder->token--; + builder->level--; + return JIM_OK; + } + Jim_SetResultFormatted(interp, ": without ? in expression: \"%#s\"", builder->exprObjPtr); + return JIM_ERR; + } + if (builder->stack.len == exp_stacklen) { + + builder->token--; + builder->level--; + return JIM_OK; + } + + } + else if (TOKEN_IS_EXPR_OP(t->type)) { + const struct Jim_ExprOperator *op; + + + if (TOKEN_IS_EXPR_OP(prevtt) || TOKEN_IS_EXPR_START(prevtt)) { + if (t->type == JIM_EXPROP_SUB) { + t->type = JIM_EXPROP_UNARYMINUS; + } + else if (t->type == JIM_EXPROP_ADD) { + t->type = JIM_EXPROP_UNARYPLUS; + } + } + + op = JimExprOperatorInfoByOpcode(t->type); + + if (op->precedence < precedence || (!(op->attr & OP_RIGHT_ASSOC) && op->precedence == precedence)) { + + builder->token--; + break; + } + + if (op->attr & OP_FUNC) { + if (builder->token->type != JIM_TT_SUBEXPR_START) { + Jim_SetResultString(interp, "missing arguments for math function", -1); + return JIM_ERR; + } + builder->token++; + if (op->arity == 0) { + if (builder->token->type != JIM_TT_SUBEXPR_END) { + Jim_SetResultString(interp, "too many arguments for math function", -1); + return JIM_ERR; + } + builder->token++; + goto noargs; + } + builder->parencount++; + + + rc = ExprTreeBuildTree(interp, builder, 0, EXPR_FUNC_ARGS | EXPR_UNTIL_CLOSE, op->arity); + } + else if (t->type == JIM_EXPROP_TERNARY) { + + rc = ExprTreeBuildTree(interp, builder, op->precedence, EXPR_TERNARY, 2); + } + else { + rc = ExprTreeBuildTree(interp, builder, op->precedence, 0, 1); + } + + if (rc != JIM_OK) { + return rc; + } + +noargs: + node = builder->next++; + node->type = t->type; + + if (op->arity >= 3) { + node->ternary = Jim_StackPop(&builder->stack); + if (node->ternary == NULL) { + goto missingoperand; + } + } + if (op->arity >= 2) { + node->right = Jim_StackPop(&builder->stack); + if (node->right == NULL) { + goto missingoperand; + } + } + if (op->arity >= 1) { + node->left = Jim_StackPop(&builder->stack); + if (node->left == NULL) { +missingoperand: + Jim_SetResultFormatted(interp, "missing operand to %s in expression: \"%#s\"", op->name, builder->exprObjPtr); + builder->next--; + return JIM_ERR; + + } + } + + + Jim_StackPush(&builder->stack, node); + } + else { + Jim_Obj *objPtr = NULL; + + + + + if (!TOKEN_IS_EXPR_START(prevtt) && !TOKEN_IS_EXPR_OP(prevtt)) { + Jim_SetResultFormatted(interp, "missing operator in expression: \"%#s\"", builder->exprObjPtr); + return JIM_ERR; + } + + + if (t->type == JIM_TT_EXPR_INT || t->type == JIM_TT_EXPR_DOUBLE) { + char *endptr; + if (t->type == JIM_TT_EXPR_INT) { + objPtr = Jim_NewIntObj(interp, jim_strtoull(t->token, &endptr)); + } + else { + objPtr = Jim_NewDoubleObj(interp, strtod(t->token, &endptr)); + } + if (endptr != t->token + t->len) { + + Jim_FreeNewObj(interp, objPtr); + objPtr = NULL; + } + } + + if (!objPtr) { + + objPtr = Jim_NewStringObj(interp, t->token, t->len); + if (t->type == JIM_TT_CMD) { + + Jim_SetSourceInfo(interp, objPtr, builder->fileNameObj, t->line); + } + } + + + node = builder->next++; + node->objPtr = objPtr; + Jim_IncrRefCount(node->objPtr); + node->type = t->type; + Jim_StackPush(&builder->stack, node); + } + } + + if (builder->stack.len == exp_stacklen) { + builder->level--; + return JIM_OK; + } + + if ((flags & EXPR_FUNC_ARGS)) { + Jim_SetResultFormatted(interp, "too %s arguments for math function", (builder->stack.len < exp_stacklen) ? "few" : "many"); + } + else { + if (builder->stack.len < exp_stacklen) { + if (builder->level == 0) { + Jim_SetResultFormatted(interp, "empty expression"); + } + else { + Jim_SetResultFormatted(interp, "syntax error in expression \"%#s\": premature end of expression", builder->exprObjPtr); + } + } + else { + Jim_SetResultFormatted(interp, "extra terms after expression"); + } + } + + return JIM_ERR; +} + +static struct ExprTree *ExprTreeCreateTree(Jim_Interp *interp, const ParseTokenList *tokenlist, Jim_Obj *exprObjPtr, Jim_Obj *fileNameObj) +{ + struct ExprTree *expr; + struct ExprBuilder builder; + int rc; + struct JimExprNode *top = NULL; + + builder.parencount = 0; + builder.level = 0; + builder.token = builder.first_token = tokenlist->list; + builder.exprObjPtr = exprObjPtr; + builder.fileNameObj = fileNameObj; + + builder.nodes = Jim_Alloc(sizeof(struct JimExprNode) * (tokenlist->count - 1)); + memset(builder.nodes, 0, sizeof(struct JimExprNode) * (tokenlist->count - 1)); + builder.next = builder.nodes; + Jim_InitStack(&builder.stack); + + rc = ExprTreeBuildTree(interp, &builder, 0, 0, 1); + + if (rc == JIM_OK) { + top = Jim_StackPop(&builder.stack); + + if (builder.parencount) { + Jim_SetResultString(interp, "missing close parenthesis", -1); + rc = JIM_ERR; + } + } + + + Jim_FreeStack(&builder.stack); + + if (rc != JIM_OK) { + ExprTreeFreeNodes(interp, builder.nodes, builder.next - builder.nodes); + return NULL; + } + + expr = Jim_Alloc(sizeof(*expr)); + expr->inUse = 1; + expr->expr = top; + expr->nodes = builder.nodes; + expr->len = builder.next - builder.nodes; + + assert(expr->len <= tokenlist->count - 1); + + return expr; +} + +static int SetExprFromAny(Jim_Interp *interp, struct Jim_Obj *objPtr) +{ + int exprTextLen; + const char *exprText; + struct JimParserCtx parser; + struct ExprTree *expr; + ParseTokenList tokenlist; + int line; + Jim_Obj *fileNameObj; + int rc = JIM_ERR; + + + fileNameObj = Jim_GetSourceInfo(interp, objPtr, &line); + Jim_IncrRefCount(fileNameObj); + + exprText = Jim_GetString(objPtr, &exprTextLen); + + + ScriptTokenListInit(&tokenlist); + + JimParserInit(&parser, exprText, exprTextLen, line); + while (!parser.eof) { + if (JimParseExpression(&parser) != JIM_OK) { + ScriptTokenListFree(&tokenlist); + Jim_SetResultFormatted(interp, "syntax error in expression: \"%#s\"", objPtr); + if (parser.errmsg) { + Jim_AppendStrings(interp, Jim_GetResult(interp), ": ", parser.errmsg, NULL); + } + expr = NULL; + goto err; + } + + ScriptAddToken(&tokenlist, parser.tstart, parser.tend - parser.tstart + 1, parser.tt, + parser.tline); + } + +#ifdef DEBUG_SHOW_EXPR_TOKENS + { + int i; + printf("==== Expr Tokens (%s) ====\n", Jim_String(fileNameObj)); + for (i = 0; i < tokenlist.count; i++) { + printf("[%2d]@%d %s '%.*s'\n", i, tokenlist.list[i].line, jim_tt_name(tokenlist.list[i].type), + tokenlist.list[i].len, tokenlist.list[i].token); + } + } +#endif + + if (tokenlist.count <= 1) { + Jim_SetResultString(interp, "empty expression", -1); + rc = JIM_ERR; + } + else { + rc = JimParseCheckMissing(interp, parser.missing.ch); + } + if (rc != JIM_OK) { + ScriptTokenListFree(&tokenlist); + Jim_DecrRefCount(interp, fileNameObj); + return rc; + } + + + expr = ExprTreeCreateTree(interp, &tokenlist, objPtr, fileNameObj); + + + ScriptTokenListFree(&tokenlist); + + if (!expr) { + goto err; + } + +#ifdef DEBUG_SHOW_EXPR + printf("==== Expr ====\n"); + JimShowExprNode(expr->expr, 0); +#endif + + rc = JIM_OK; + + err: + + Jim_DecrRefCount(interp, fileNameObj); + Jim_FreeIntRep(interp, objPtr); + Jim_SetIntRepPtr(objPtr, expr); + objPtr->typePtr = &exprObjType; + return rc; +} + +static struct ExprTree *JimGetExpression(Jim_Interp *interp, Jim_Obj *objPtr) +{ + if (objPtr->typePtr != &exprObjType) { + if (SetExprFromAny(interp, objPtr) != JIM_OK) { + return NULL; + } + } + return (struct ExprTree *) Jim_GetIntRepPtr(objPtr); +} + +#ifdef JIM_OPTIMIZATION +static Jim_Obj *JimExprIntValOrVar(Jim_Interp *interp, struct JimExprNode *node) +{ + if (node->type == JIM_TT_EXPR_INT) + return node->objPtr; + else if (node->type == JIM_TT_VAR) + return Jim_GetVariable(interp, node->objPtr, JIM_NONE); + else if (node->type == JIM_TT_DICTSUGAR) + return JimExpandDictSugar(interp, node->objPtr); + else + return NULL; +} +#endif + + +static int JimExprEvalTermNode(Jim_Interp *interp, struct JimExprNode *node) +{ + if (TOKEN_IS_EXPR_OP(node->type)) { + const struct Jim_ExprOperator *op = JimExprOperatorInfoByOpcode(node->type); + return op->funcop(interp, node); + } + else { + Jim_Obj *objPtr; + + + switch (node->type) { + case JIM_TT_EXPR_INT: + case JIM_TT_EXPR_DOUBLE: + case JIM_TT_EXPR_BOOLEAN: + case JIM_TT_STR: + Jim_SetResult(interp, node->objPtr); + return JIM_OK; + + case JIM_TT_VAR: + objPtr = Jim_GetVariable(interp, node->objPtr, JIM_ERRMSG); + if (objPtr) { + Jim_SetResult(interp, objPtr); + return JIM_OK; + } + return JIM_ERR; + + case JIM_TT_DICTSUGAR: + objPtr = JimExpandDictSugar(interp, node->objPtr); + if (objPtr) { + Jim_SetResult(interp, objPtr); + return JIM_OK; + } + return JIM_ERR; + + case JIM_TT_ESC: + if (interp->safeexpr) { + return JIM_ERR; + } + if (Jim_SubstObj(interp, node->objPtr, &objPtr, JIM_NONE) == JIM_OK) { + Jim_SetResult(interp, objPtr); + return JIM_OK; + } + return JIM_ERR; + + case JIM_TT_CMD: + if (interp->safeexpr) { + return JIM_ERR; + } + return Jim_EvalObj(interp, node->objPtr); + + default: + + return JIM_ERR; + } + } +} + +static int JimExprGetTerm(Jim_Interp *interp, struct JimExprNode *node, Jim_Obj **objPtrPtr) +{ + int rc = JimExprEvalTermNode(interp, node); + if (rc == JIM_OK) { + *objPtrPtr = Jim_GetResult(interp); + Jim_IncrRefCount(*objPtrPtr); + } + return rc; +} + +static int JimExprGetTermBoolean(Jim_Interp *interp, struct JimExprNode *node) +{ + if (JimExprEvalTermNode(interp, node) == JIM_OK) { + return ExprBool(interp, Jim_GetResult(interp)); + } + return -1; +} + +int Jim_EvalExpression(Jim_Interp *interp, Jim_Obj *exprObjPtr) +{ + struct ExprTree *expr; + int retcode = JIM_OK; + + Jim_IncrRefCount(exprObjPtr); + expr = JimGetExpression(interp, exprObjPtr); + if (!expr) { + retcode = JIM_ERR; + goto done; + } + +#ifdef JIM_OPTIMIZATION + if (!interp->safeexpr) { + Jim_Obj *objPtr; + + + switch (expr->len) { + case 1: + objPtr = JimExprIntValOrVar(interp, expr->expr); + if (objPtr) { + Jim_SetResult(interp, objPtr); + goto done; + } + break; + + case 2: + if (expr->expr->type == JIM_EXPROP_NOT) { + objPtr = JimExprIntValOrVar(interp, expr->expr->left); + + if (objPtr && JimIsWide(objPtr)) { + Jim_SetResult(interp, JimWideValue(objPtr) ? interp->falseObj : interp->trueObj); + goto done; + } + } + break; + + case 3: + objPtr = JimExprIntValOrVar(interp, expr->expr->left); + if (objPtr && JimIsWide(objPtr)) { + Jim_Obj *objPtr2 = JimExprIntValOrVar(interp, expr->expr->right); + if (objPtr2 && JimIsWide(objPtr2)) { + jim_wide wideValueA = JimWideValue(objPtr); + jim_wide wideValueB = JimWideValue(objPtr2); + int cmpRes; + switch (expr->expr->type) { + case JIM_EXPROP_LT: + cmpRes = wideValueA < wideValueB; + break; + case JIM_EXPROP_LTE: + cmpRes = wideValueA <= wideValueB; + break; + case JIM_EXPROP_GT: + cmpRes = wideValueA > wideValueB; + break; + case JIM_EXPROP_GTE: + cmpRes = wideValueA >= wideValueB; + break; + case JIM_EXPROP_NUMEQ: + cmpRes = wideValueA == wideValueB; + break; + case JIM_EXPROP_NUMNE: + cmpRes = wideValueA != wideValueB; + break; + default: + goto noopt; + } + Jim_SetResult(interp, cmpRes ? interp->trueObj : interp->falseObj); + goto done; + } + } + break; + } + } +noopt: +#endif + + expr->inUse++; + + + retcode = JimExprEvalTermNode(interp, expr->expr); + + + Jim_FreeIntRep(interp, exprObjPtr); + exprObjPtr->typePtr = &exprObjType; + Jim_SetIntRepPtr(exprObjPtr, expr); + +done: + Jim_DecrRefCount(interp, exprObjPtr); + + return retcode; +} + +int Jim_GetBoolFromExpr(Jim_Interp *interp, Jim_Obj *exprObjPtr, int *boolPtr) +{ + int retcode = Jim_EvalExpression(interp, exprObjPtr); + + if (retcode == JIM_OK) { + switch (ExprBool(interp, Jim_GetResult(interp))) { + case 0: + *boolPtr = 0; + break; + + case 1: + *boolPtr = 1; + break; + + case -1: + retcode = JIM_ERR; + break; + } + } + return retcode; +} + + + + +typedef struct ScanFmtPartDescr +{ + const char *arg; + const char *prefix; + size_t width; + int pos; + char type; + char modifier; +} ScanFmtPartDescr; + + +typedef struct ScanFmtStringObj +{ + jim_wide size; + char *stringRep; + size_t count; + size_t convCount; + size_t maxPos; + const char *error; + char *scratch; + ScanFmtPartDescr descr[1]; +} ScanFmtStringObj; + + +static void FreeScanFmtInternalRep(Jim_Interp *interp, Jim_Obj *objPtr); +static void DupScanFmtInternalRep(Jim_Interp *interp, Jim_Obj *srcPtr, Jim_Obj *dupPtr); +static void UpdateStringOfScanFmt(Jim_Obj *objPtr); + +static const Jim_ObjType scanFmtStringObjType = { + "scanformatstring", + FreeScanFmtInternalRep, + DupScanFmtInternalRep, + UpdateStringOfScanFmt, + JIM_TYPE_NONE, +}; + +void FreeScanFmtInternalRep(Jim_Interp *interp, Jim_Obj *objPtr) +{ + JIM_NOTUSED(interp); + Jim_Free((char *)objPtr->internalRep.ptr); + objPtr->internalRep.ptr = 0; +} + +void DupScanFmtInternalRep(Jim_Interp *interp, Jim_Obj *srcPtr, Jim_Obj *dupPtr) +{ + size_t size = (size_t) ((ScanFmtStringObj *) srcPtr->internalRep.ptr)->size; + ScanFmtStringObj *newVec = (ScanFmtStringObj *) Jim_Alloc(size); + + JIM_NOTUSED(interp); + memcpy(newVec, srcPtr->internalRep.ptr, size); + dupPtr->internalRep.ptr = newVec; + dupPtr->typePtr = &scanFmtStringObjType; +} + +static void UpdateStringOfScanFmt(Jim_Obj *objPtr) +{ + JimSetStringBytes(objPtr, ((ScanFmtStringObj *) objPtr->internalRep.ptr)->stringRep); +} + + +static int SetScanFmtFromAny(Jim_Interp *interp, Jim_Obj *objPtr) +{ + ScanFmtStringObj *fmtObj; + char *buffer; + int maxCount, i, approxSize, lastPos = -1; + const char *fmt = Jim_String(objPtr); + int maxFmtLen = Jim_Length(objPtr); + const char *fmtEnd = fmt + maxFmtLen; + int curr; + + Jim_FreeIntRep(interp, objPtr); + + for (i = 0, maxCount = 0; i < maxFmtLen; ++i) + if (fmt[i] == '%') + ++maxCount; + + approxSize = sizeof(ScanFmtStringObj) + +(maxCount + 1) * sizeof(ScanFmtPartDescr) + +maxFmtLen * sizeof(char) + 3 + 1 + + maxFmtLen * sizeof(char) + 1 + + maxFmtLen * sizeof(char) + +(maxCount + 1) * sizeof(char) + +1; + fmtObj = (ScanFmtStringObj *) Jim_Alloc(approxSize); + memset(fmtObj, 0, approxSize); + fmtObj->size = approxSize; + fmtObj->maxPos = 0; + fmtObj->scratch = (char *)&fmtObj->descr[maxCount + 1]; + fmtObj->stringRep = fmtObj->scratch + maxFmtLen + 3 + 1; + memcpy(fmtObj->stringRep, fmt, maxFmtLen); + buffer = fmtObj->stringRep + maxFmtLen + 1; + objPtr->internalRep.ptr = fmtObj; + objPtr->typePtr = &scanFmtStringObjType; + for (i = 0, curr = 0; fmt < fmtEnd; ++fmt) { + int width = 0, skip; + ScanFmtPartDescr *descr = &fmtObj->descr[curr]; + + fmtObj->count++; + descr->width = 0; + + if (*fmt != '%' || fmt[1] == '%') { + descr->type = 0; + descr->prefix = &buffer[i]; + for (; fmt < fmtEnd; ++fmt) { + if (*fmt == '%') { + if (fmt[1] != '%') + break; + ++fmt; + } + buffer[i++] = *fmt; + } + buffer[i++] = 0; + } + + ++fmt; + + if (fmt >= fmtEnd) + goto done; + descr->pos = 0; + if (*fmt == '*') { + descr->pos = -1; + ++fmt; + } + else + fmtObj->convCount++; + + if (sscanf(fmt, "%d%n", &width, &skip) == 1) { + fmt += skip; + + if (descr->pos != -1 && *fmt == '$') { + int prev; + + ++fmt; + descr->pos = width; + width = 0; + + if ((lastPos == 0 && descr->pos > 0) + || (lastPos > 0 && descr->pos == 0)) { + fmtObj->error = "cannot mix \"%\" and \"%n$\" conversion specifiers"; + return JIM_ERR; + } + + for (prev = 0; prev < curr; ++prev) { + if (fmtObj->descr[prev].pos == -1) + continue; + if (fmtObj->descr[prev].pos == descr->pos) { + fmtObj->error = + "variable is assigned by multiple \"%n$\" conversion specifiers"; + return JIM_ERR; + } + } + if (descr->pos < 0) { + fmtObj->error = + "\"%n$\" conversion specifier is negative"; + return JIM_ERR; + } + + if (sscanf(fmt, "%d%n", &width, &skip) == 1) { + descr->width = width; + fmt += skip; + } + if (descr->pos > 0 && (size_t) descr->pos > fmtObj->maxPos) + fmtObj->maxPos = descr->pos; + } + else { + + descr->width = width; + } + } + + if (lastPos == -1) + lastPos = descr->pos; + + if (*fmt == '[') { + int swapped = 1, beg = i, end, j; + + descr->type = '['; + descr->arg = &buffer[i]; + ++fmt; + if (*fmt == '^') + buffer[i++] = *fmt++; + if (*fmt == ']') + buffer[i++] = *fmt++; + while (*fmt && *fmt != ']') + buffer[i++] = *fmt++; + if (*fmt != ']') { + fmtObj->error = "unmatched [ in format string"; + return JIM_ERR; + } + end = i; + buffer[i++] = 0; + + while (swapped) { + swapped = 0; + for (j = beg + 1; j < end - 1; ++j) { + if (buffer[j] == '-' && buffer[j - 1] > buffer[j + 1]) { + char tmp = buffer[j - 1]; + + buffer[j - 1] = buffer[j + 1]; + buffer[j + 1] = tmp; + swapped = 1; + } + } + } + } + else { + + if (fmt < fmtEnd && strchr("hlL", *fmt)) + descr->modifier = tolower((int)*fmt++); + + if (fmt >= fmtEnd) { + fmtObj->error = "missing scan conversion character"; + return JIM_ERR; + } + + descr->type = *fmt; + if (strchr("efgcsndoxui", *fmt) == 0) { + fmtObj->error = "bad scan conversion character"; + return JIM_ERR; + } + else if (*fmt == 'c' && descr->width != 0) { + fmtObj->error = "field width may not be specified in %c " "conversion"; + return JIM_ERR; + } + else if (*fmt == 'u' && descr->modifier == 'l') { + fmtObj->error = "unsigned wide not supported"; + return JIM_ERR; + } + } + curr++; + } + done: + return JIM_OK; +} + + + +#define FormatGetCnvCount(_fo_) \ + ((ScanFmtStringObj*)((_fo_)->internalRep.ptr))->convCount +#define FormatGetMaxPos(_fo_) \ + ((ScanFmtStringObj*)((_fo_)->internalRep.ptr))->maxPos +#define FormatGetError(_fo_) \ + ((ScanFmtStringObj*)((_fo_)->internalRep.ptr))->error + +static Jim_Obj *JimScanAString(Jim_Interp *interp, const char *sdescr, const char *str) +{ + char *buffer = Jim_StrDup(str); + char *p = buffer; + + while (*str) { + int c; + int n; + + if (!sdescr && isspace(UCHAR(*str))) + break; + + n = utf8_tounicode(str, &c); + if (sdescr && !JimCharsetMatch(sdescr, strlen(sdescr), c, JIM_CHARSET_SCAN)) + break; + while (n--) + *p++ = *str++; + } + *p = 0; + return Jim_NewStringObjNoAlloc(interp, buffer, p - buffer); +} + + +static int ScanOneEntry(Jim_Interp *interp, const char *str, int pos, int str_bytelen, + ScanFmtStringObj * fmtObj, long idx, Jim_Obj **valObjPtr) +{ + const char *tok; + const ScanFmtPartDescr *descr = &fmtObj->descr[idx]; + size_t scanned = 0; + size_t anchor = pos; + int i; + Jim_Obj *tmpObj = NULL; + + + *valObjPtr = 0; + if (descr->prefix) { + for (i = 0; pos < str_bytelen && descr->prefix[i]; ++i) { + + if (isspace(UCHAR(descr->prefix[i]))) + while (pos < str_bytelen && isspace(UCHAR(str[pos]))) + ++pos; + else if (descr->prefix[i] != str[pos]) + break; + else + ++pos; + } + if (pos >= str_bytelen) { + return -1; + } + else if (descr->prefix[i] != 0) + return 0; + } + + if (descr->type != 'c' && descr->type != '[' && descr->type != 'n') + while (isspace(UCHAR(str[pos]))) + ++pos; + + + scanned = pos - anchor; + + + if (descr->type == 'n') { + + *valObjPtr = Jim_NewIntObj(interp, anchor + scanned); + } + else if (pos >= str_bytelen) { + + return -1; + } + else if (descr->type == 'c') { + int c; + scanned += utf8_tounicode(&str[pos], &c); + *valObjPtr = Jim_NewIntObj(interp, c); + return scanned; + } + else { + + if (descr->width > 0) { + size_t sLen = utf8_strlen(&str[pos], str_bytelen - pos); + size_t tLen = descr->width > sLen ? sLen : descr->width; + + tmpObj = Jim_NewStringObjUtf8(interp, str + pos, tLen); + tok = tmpObj->bytes; + } + else { + + tok = &str[pos]; + } + switch (descr->type) { + case 'd': + case 'o': + case 'x': + case 'u': + case 'i':{ + char *endp; + jim_wide w; + + int base = descr->type == 'o' ? 8 + : descr->type == 'x' ? 16 : descr->type == 'i' ? 0 : 10; + + + if (base == 0) { + w = jim_strtoull(tok, &endp); + } + else { + w = strtoull(tok, &endp, base); + } + + if (endp != tok) { + + *valObjPtr = Jim_NewIntObj(interp, w); + + + scanned += endp - tok; + } + else { + scanned = *tok ? 0 : -1; + } + break; + } + case 's': + case '[':{ + *valObjPtr = JimScanAString(interp, descr->arg, tok); + scanned += Jim_Length(*valObjPtr); + break; + } + case 'e': + case 'f': + case 'g':{ + char *endp; + double value = strtod(tok, &endp); + + if (endp != tok) { + + *valObjPtr = Jim_NewDoubleObj(interp, value); + + scanned += endp - tok; + } + else { + scanned = *tok ? 0 : -1; + } + break; + } + } + if (tmpObj) { + Jim_FreeNewObj(interp, tmpObj); + } + } + return scanned; +} + + +Jim_Obj *Jim_ScanString(Jim_Interp *interp, Jim_Obj *strObjPtr, Jim_Obj *fmtObjPtr, int flags) +{ + size_t i, pos; + int scanned = 1; + const char *str = Jim_String(strObjPtr); + int str_bytelen = Jim_Length(strObjPtr); + Jim_Obj *resultList = 0; + Jim_Obj **resultVec = 0; + int resultc; + Jim_Obj *emptyStr = 0; + ScanFmtStringObj *fmtObj; + + + JimPanic((fmtObjPtr->typePtr != &scanFmtStringObjType, "Jim_ScanString() for non-scan format")); + + fmtObj = (ScanFmtStringObj *) fmtObjPtr->internalRep.ptr; + + if (fmtObj->error != 0) { + if (flags & JIM_ERRMSG) + Jim_SetResultString(interp, fmtObj->error, -1); + return 0; + } + + emptyStr = Jim_NewEmptyStringObj(interp); + Jim_IncrRefCount(emptyStr); + + resultList = Jim_NewListObj(interp, NULL, 0); + if (fmtObj->maxPos > 0) { + for (i = 0; i < fmtObj->maxPos; ++i) + Jim_ListAppendElement(interp, resultList, emptyStr); + JimListGetElements(interp, resultList, &resultc, &resultVec); + } + + for (i = 0, pos = 0; i < fmtObj->count; ++i) { + ScanFmtPartDescr *descr = &(fmtObj->descr[i]); + Jim_Obj *value = 0; + + + if (descr->type == 0) + continue; + + if (scanned > 0) + scanned = ScanOneEntry(interp, str, pos, str_bytelen, fmtObj, i, &value); + + if (scanned == -1 && i == 0) + goto eof; + + pos += scanned; + + + if (value == 0) + value = Jim_NewEmptyStringObj(interp); + + if (descr->pos == -1) { + Jim_FreeNewObj(interp, value); + } + else if (descr->pos == 0) + + Jim_ListAppendElement(interp, resultList, value); + else if (resultVec[descr->pos - 1] == emptyStr) { + + Jim_DecrRefCount(interp, resultVec[descr->pos - 1]); + Jim_IncrRefCount(value); + resultVec[descr->pos - 1] = value; + } + else { + + Jim_FreeNewObj(interp, value); + goto err; + } + } + Jim_DecrRefCount(interp, emptyStr); + return resultList; + eof: + Jim_DecrRefCount(interp, emptyStr); + Jim_FreeNewObj(interp, resultList); + return (Jim_Obj *)EOF; + err: + Jim_DecrRefCount(interp, emptyStr); + Jim_FreeNewObj(interp, resultList); + return 0; +} + + +static void JimPrngInit(Jim_Interp *interp) +{ +#define PRNG_SEED_SIZE 256 + int i; + unsigned int *seed; + time_t t = time(NULL); + + interp->prngState = Jim_Alloc(sizeof(Jim_PrngState)); + + seed = Jim_Alloc(PRNG_SEED_SIZE * sizeof(*seed)); + for (i = 0; i < PRNG_SEED_SIZE; i++) { + seed[i] = (rand() ^ t ^ clock()); + } + JimPrngSeed(interp, (unsigned char *)seed, PRNG_SEED_SIZE * sizeof(*seed)); + Jim_Free(seed); +} + + +static void JimRandomBytes(Jim_Interp *interp, void *dest, unsigned int len) +{ + Jim_PrngState *prng; + unsigned char *destByte = (unsigned char *)dest; + unsigned int si, sj, x; + + + if (interp->prngState == NULL) + JimPrngInit(interp); + prng = interp->prngState; + + for (x = 0; x < len; x++) { + prng->i = (prng->i + 1) & 0xff; + si = prng->sbox[prng->i]; + prng->j = (prng->j + si) & 0xff; + sj = prng->sbox[prng->j]; + prng->sbox[prng->i] = sj; + prng->sbox[prng->j] = si; + *destByte++ = prng->sbox[(si + sj) & 0xff]; + } +} + + +static void JimPrngSeed(Jim_Interp *interp, unsigned char *seed, int seedLen) +{ + int i; + Jim_PrngState *prng; + + + if (interp->prngState == NULL) + JimPrngInit(interp); + prng = interp->prngState; + + + for (i = 0; i < 256; i++) + prng->sbox[i] = i; + + for (i = 0; i < seedLen; i++) { + unsigned char t; + + t = prng->sbox[i & 0xFF]; + prng->sbox[i & 0xFF] = prng->sbox[seed[i]]; + prng->sbox[seed[i]] = t; + } + prng->i = prng->j = 0; + + for (i = 0; i < 256; i += seedLen) { + JimRandomBytes(interp, seed, seedLen); + } +} + + +static int Jim_IncrCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + jim_wide wideValue, increment = 1; + Jim_Obj *intObjPtr; + + if (argc != 2 && argc != 3) { + Jim_WrongNumArgs(interp, 1, argv, "varName ?increment?"); + return JIM_ERR; + } + if (argc == 3) { + if (Jim_GetWideExpr(interp, argv[2], &increment) != JIM_OK) + return JIM_ERR; + } + intObjPtr = Jim_GetVariable(interp, argv[1], JIM_UNSHARED); + if (!intObjPtr) { + + wideValue = 0; + } + else if (Jim_GetWide(interp, intObjPtr, &wideValue) != JIM_OK) { + return JIM_ERR; + } + if (!intObjPtr || Jim_IsShared(intObjPtr)) { + intObjPtr = Jim_NewIntObj(interp, wideValue + increment); + if (Jim_SetVariable(interp, argv[1], intObjPtr) != JIM_OK) { + Jim_FreeNewObj(interp, intObjPtr); + return JIM_ERR; + } + } + else { + + Jim_InvalidateStringRep(intObjPtr); + JimWideValue(intObjPtr) = wideValue + increment; + + if (argv[1]->typePtr != &variableObjType) { + + Jim_SetVariable(interp, argv[1], intObjPtr); + } + } + Jim_SetResult(interp, intObjPtr); + return JIM_OK; +} + + +#define JIM_EVAL_SARGV_LEN 8 +#define JIM_EVAL_SINTV_LEN 8 + +static int JimTraceCallback(Jim_Interp *interp, const char *type, int argc, Jim_Obj *const *argv) +{ + JimPanic((interp->traceCmdObj == NULL, "xtrace invoked with no object")); + + int ret; + Jim_Obj *nargv[7]; + Jim_Obj *traceCmdObj = interp->traceCmdObj; + Jim_Obj *resultObj = Jim_GetResult(interp); + ScriptObj *script = NULL; + + + + if (interp->evalFrame->scriptObj) { + script = JimGetScript(interp, interp->evalFrame->scriptObj); + } + + nargv[0] = traceCmdObj; + nargv[1] = Jim_NewStringObj(interp, type, -1); + nargv[2] = script ? script->fileNameObj : interp->emptyObj; + nargv[3] = Jim_NewIntObj(interp, script ? script->linenr : 1); + nargv[4] = resultObj; + nargv[5] = argv[0]; + nargv[6] = Jim_NewListObj(interp, argv + 1, argc - 1); + + + interp->traceCmdObj = NULL; + + Jim_IncrRefCount(resultObj); + ret = Jim_EvalObjVector(interp, 7, nargv); + Jim_DecrRefCount(interp, resultObj); + + if (ret == JIM_OK || ret == JIM_RETURN) { + + interp->traceCmdObj = traceCmdObj; + Jim_SetEmptyResult(interp); + ret = JIM_OK; + } + else { + + Jim_DecrRefCount(interp, traceCmdObj); + } + return ret; +} + + +static int JimUnknown(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + int retcode; + + if (interp->unknown_called > 50) { + return JIM_ERR; + } + + + + if (Jim_GetCommand(interp, interp->unknown, JIM_NONE) == NULL) + return JIM_ERR; + + interp->unknown_called++; + + retcode = Jim_EvalObjPrefix(interp, interp->unknown, argc, argv); + interp->unknown_called--; + + return retcode; +} + +static void JimPushEvalFrame(Jim_Interp *interp, Jim_EvalFrame *frame, Jim_Obj *scriptObj) +{ + memset(frame, 0, sizeof(*frame)); + frame->parent = interp->evalFrame; + frame->level = frame->parent->level + 1; + frame->procLevel = interp->procLevel; + frame->framePtr = interp->framePtr; + if (scriptObj) { + frame->scriptObj = scriptObj; + } + else { + frame->scriptObj = frame->parent->scriptObj; + } + interp->evalFrame = frame; +#if 0 + if (frame->scriptObj) { + printf("script: %.*s\n", 20, Jim_String(frame->scriptObj)); + } +#endif +} + +static void JimPopEvalFrame(Jim_Interp *interp) +{ + interp->evalFrame = interp->evalFrame->parent; +} + + +static int JimInvokeCommand(Jim_Interp *interp, int objc, Jim_Obj *const *objv) +{ + int retcode; + Jim_Cmd *cmdPtr; + void *prevPrivData; + Jim_Obj *tailcallObj = NULL; + +#if 0 + printf("invoke"); + int j; + for (j = 0; j < objc; j++) { + printf(" '%s'", Jim_String(objv[j])); + } + printf("\n"); +#endif + + cmdPtr = Jim_GetCommand(interp, objv[0], JIM_ERRMSG); + if (cmdPtr == NULL) { + return JimUnknown(interp, objc, objv); + } + JimIncrCmdRefCount(cmdPtr); + + if (interp->evalDepth == interp->maxEvalDepth) { + Jim_SetResultString(interp, "Infinite eval recursion", -1); + retcode = JIM_ERR; + goto out; + } + interp->evalDepth++; + prevPrivData = interp->cmdPrivData; + +tailcall: + + interp->evalFrame->argc = objc; + interp->evalFrame->argv = objv; + interp->evalFrame->cmd = cmdPtr; + + if (!interp->traceCmdObj || + (retcode = JimTraceCallback(interp, "cmd", objc, objv)) == JIM_OK) { + + Jim_SetEmptyResult(interp); + if (cmdPtr->isproc) { + retcode = JimCallProcedure(interp, cmdPtr, objc, objv); + } + else { + interp->cmdPrivData = cmdPtr->u.native.privData; + retcode = cmdPtr->u.native.cmdProc(interp, objc, objv); + } + if (retcode == JIM_ERR) { + JimSetErrorStack(interp, NULL); + } + } + + if (tailcallObj) { + + Jim_DecrRefCount(interp, tailcallObj); + tailcallObj = NULL; + } + + + interp->evalFrame->argc = 0; + interp->evalFrame->argv = NULL; + + + if (retcode == JIM_EVAL && interp->framePtr->tailcallObj) { + JimDecrCmdRefCount(interp, cmdPtr); + + + cmdPtr = interp->framePtr->tailcallCmd; + interp->framePtr->tailcallCmd = NULL; + tailcallObj = interp->framePtr->tailcallObj; + interp->framePtr->tailcallObj = NULL; + objc = tailcallObj->internalRep.listValue.len; + objv = tailcallObj->internalRep.listValue.ele; + goto tailcall; + } + + interp->cmdPrivData = prevPrivData; + interp->evalDepth--; + +out: + JimDecrCmdRefCount(interp, cmdPtr); + + if (retcode == JIM_ERR) { + JimSetErrorStack(interp, NULL); + } + + if (interp->framePtr->tailcallObj) { + JimDecrCmdRefCount(interp, interp->framePtr->tailcallCmd); + Jim_DecrRefCount(interp, interp->framePtr->tailcallObj); + interp->framePtr->tailcallCmd = NULL; + interp->framePtr->tailcallObj = NULL; + } + + return retcode; +} + +int Jim_EvalObjVector(Jim_Interp *interp, int objc, Jim_Obj *const *objv) +{ + int i, retcode; + Jim_EvalFrame frame; + + + for (i = 0; i < objc; i++) + Jim_IncrRefCount(objv[i]); + + + JimPushEvalFrame(interp, &frame, NULL); + + retcode = JimInvokeCommand(interp, objc, objv); + + JimPopEvalFrame(interp); + + + for (i = 0; i < objc; i++) + Jim_DecrRefCount(interp, objv[i]); + + return retcode; +} + +int Jim_EvalObjPrefix(Jim_Interp *interp, Jim_Obj *prefix, int objc, Jim_Obj *const *objv) +{ + int ret; + Jim_Obj **nargv = Jim_Alloc((objc + 1) * sizeof(*nargv)); + + nargv[0] = prefix; + memcpy(&nargv[1], &objv[0], sizeof(nargv[0]) * objc); + ret = Jim_EvalObjVector(interp, objc + 1, nargv); + Jim_Free(nargv); + return ret; +} + +static int JimSubstOneToken(Jim_Interp *interp, const ScriptToken *token, Jim_Obj **objPtrPtr) +{ + Jim_Obj *objPtr; + int ret = JIM_ERR; + + switch (token->type) { + case JIM_TT_STR: + case JIM_TT_ESC: + objPtr = token->objPtr; + break; + case JIM_TT_VAR: + objPtr = Jim_GetVariable(interp, token->objPtr, JIM_ERRMSG); + break; + case JIM_TT_DICTSUGAR: + objPtr = JimExpandDictSugar(interp, token->objPtr); + break; + case JIM_TT_EXPRSUGAR: + ret = Jim_EvalExpression(interp, token->objPtr); + if (ret == JIM_OK) { + objPtr = Jim_GetResult(interp); + } + else { + objPtr = NULL; + } + break; + case JIM_TT_CMD: + ret = Jim_EvalObj(interp, token->objPtr); + if (ret == JIM_OK || ret == JIM_RETURN) { + objPtr = interp->result; + } else { + + objPtr = NULL; + } + break; + default: + JimPanic((1, + "default token type (%d) reached " "in Jim_SubstObj().", token->type)); + objPtr = NULL; + break; + } + if (objPtr) { + *objPtrPtr = objPtr; + return JIM_OK; + } + return ret; +} + +static Jim_Obj *JimInterpolateTokens(Jim_Interp *interp, const ScriptToken * token, int tokens, int flags) +{ + int totlen = 0, i; + Jim_Obj **intv; + Jim_Obj *sintv[JIM_EVAL_SINTV_LEN]; + Jim_Obj *objPtr; + char *s; + + if (tokens <= JIM_EVAL_SINTV_LEN) + intv = sintv; + else + intv = Jim_Alloc(sizeof(Jim_Obj *) * tokens); + + for (i = 0; i < tokens; i++) { + switch (JimSubstOneToken(interp, &token[i], &intv[i])) { + case JIM_OK: + case JIM_RETURN: + break; + case JIM_BREAK: + if (flags & JIM_SUBST_FLAG) { + + tokens = i; + continue; + } + + + case JIM_CONTINUE: + if (flags & JIM_SUBST_FLAG) { + intv[i] = NULL; + continue; + } + + + default: + while (i--) { + Jim_DecrRefCount(interp, intv[i]); + } + if (intv != sintv) { + Jim_Free(intv); + } + return NULL; + } + Jim_IncrRefCount(intv[i]); + Jim_String(intv[i]); + totlen += intv[i]->length; + } + + + if (tokens == 1 && intv[0] && intv == sintv) { + + intv[0]->refCount--; + return intv[0]; + } + + objPtr = Jim_NewStringObjNoAlloc(interp, NULL, 0); + + if (tokens == 4 && token[0].type == JIM_TT_ESC && token[1].type == JIM_TT_ESC + && token[2].type == JIM_TT_VAR) { + + objPtr->typePtr = &interpolatedObjType; + objPtr->internalRep.dictSubstValue.varNameObjPtr = token[0].objPtr; + objPtr->internalRep.dictSubstValue.indexObjPtr = intv[2]; + Jim_IncrRefCount(intv[2]); + } + else if (tokens && intv[0] && intv[0]->typePtr == &sourceObjType) { + + int line; + Jim_Obj *fileNameObj = Jim_GetSourceInfo(interp, intv[0], &line); + Jim_SetSourceInfo(interp, objPtr, fileNameObj, line); + } + + + s = objPtr->bytes = Jim_Alloc(totlen + 1); + objPtr->length = totlen; + for (i = 0; i < tokens; i++) { + if (intv[i]) { + memcpy(s, intv[i]->bytes, intv[i]->length); + s += intv[i]->length; + Jim_DecrRefCount(interp, intv[i]); + } + } + objPtr->bytes[totlen] = '\0'; + + if (intv != sintv) { + Jim_Free(intv); + } + + return objPtr; +} + + +static int JimEvalObjList(Jim_Interp *interp, Jim_Obj *listPtr) +{ + int retcode = JIM_OK; + Jim_EvalFrame frame; + + JimPanic((Jim_IsList(listPtr) == 0, "JimEvalObjList() invoked on non-list.")); + + JimPushEvalFrame(interp, &frame, NULL); + + if (listPtr->internalRep.listValue.len) { + Jim_IncrRefCount(listPtr); + retcode = JimInvokeCommand(interp, + listPtr->internalRep.listValue.len, + listPtr->internalRep.listValue.ele); + Jim_DecrRefCount(interp, listPtr); + } + + JimPopEvalFrame(interp); + + return retcode; +} + +int Jim_EvalObjList(Jim_Interp *interp, Jim_Obj *listPtr) +{ + SetListFromAny(interp, listPtr); + return JimEvalObjList(interp, listPtr); +} + +int Jim_EvalObj(Jim_Interp *interp, Jim_Obj *scriptObjPtr) +{ + int i; + ScriptObj *script; + ScriptToken *token; + int retcode = JIM_OK; + Jim_Obj *sargv[JIM_EVAL_SARGV_LEN], **argv = NULL; + Jim_EvalFrame frame; + + if (Jim_IsList(scriptObjPtr) && scriptObjPtr->bytes == NULL) { + return JimEvalObjList(interp, scriptObjPtr); + } + + Jim_IncrRefCount(scriptObjPtr); + script = JimGetScript(interp, scriptObjPtr); + if (JimParseCheckMissing(interp, script->missing) == JIM_ERR) { + JimSetErrorStack(interp, script); + Jim_DecrRefCount(interp, scriptObjPtr); + return JIM_ERR; + } + + Jim_SetEmptyResult(interp); + + token = script->token; + +#ifdef JIM_OPTIMIZATION + if (script->len == 0) { + Jim_DecrRefCount(interp, scriptObjPtr); + return JIM_OK; + } + if (script->len == 3 + && token[1].objPtr->typePtr == &commandObjType + && token[1].objPtr->internalRep.cmdValue.cmdPtr->isproc == 0 + && token[1].objPtr->internalRep.cmdValue.cmdPtr->u.native.cmdProc == Jim_IncrCoreCommand + && token[2].objPtr->typePtr == &variableObjType) { + + Jim_Obj *objPtr = Jim_GetVariable(interp, token[2].objPtr, JIM_NONE); + + if (objPtr && !Jim_IsShared(objPtr) && objPtr->typePtr == &intObjType) { + JimWideValue(objPtr)++; + Jim_InvalidateStringRep(objPtr); + Jim_DecrRefCount(interp, scriptObjPtr); + Jim_SetResult(interp, objPtr); + return JIM_OK; + } + } +#endif + + script->inUse++; + + JimPushEvalFrame(interp, &frame, scriptObjPtr); + + + interp->errorFlag = 0; + argv = sargv; + + for (i = 0; i < script->len && retcode == JIM_OK; ) { + int argc; + int j; + + + argc = token[i].objPtr->internalRep.scriptLineValue.argc; + script->linenr = token[i].objPtr->internalRep.scriptLineValue.line; + + + if (argc > JIM_EVAL_SARGV_LEN) + argv = Jim_Alloc(sizeof(Jim_Obj *) * argc); + + + i++; + + for (j = 0; j < argc; j++) { + long wordtokens = 1; + int expand = 0; + Jim_Obj *wordObjPtr = NULL; + + if (token[i].type == JIM_TT_WORD) { + wordtokens = JimWideValue(token[i++].objPtr); + if (wordtokens < 0) { + expand = 1; + wordtokens = -wordtokens; + } + } + + if (wordtokens == 1) { + + switch (token[i].type) { + case JIM_TT_ESC: + case JIM_TT_STR: + wordObjPtr = token[i].objPtr; + break; + case JIM_TT_VAR: + wordObjPtr = Jim_GetVariable(interp, token[i].objPtr, JIM_ERRMSG); + break; + case JIM_TT_EXPRSUGAR: + retcode = Jim_EvalExpression(interp, token[i].objPtr); + if (retcode == JIM_OK) { + wordObjPtr = Jim_GetResult(interp); + } + else { + wordObjPtr = NULL; + } + break; + case JIM_TT_DICTSUGAR: + wordObjPtr = JimExpandDictSugar(interp, token[i].objPtr); + break; + case JIM_TT_CMD: + retcode = Jim_EvalObj(interp, token[i].objPtr); + if (retcode == JIM_OK) { + wordObjPtr = Jim_GetResult(interp); + } + break; + default: + JimPanic((1, "default token type reached " "in Jim_EvalObj().")); + } + } + else { + wordObjPtr = JimInterpolateTokens(interp, token + i, wordtokens, JIM_NONE); + } + + if (!wordObjPtr) { + if (retcode == JIM_OK) { + retcode = JIM_ERR; + } + break; + } + + Jim_IncrRefCount(wordObjPtr); + i += wordtokens; + + if (!expand) { + argv[j] = wordObjPtr; + } + else { + + int len = Jim_ListLength(interp, wordObjPtr); + int newargc = argc + len - 1; + int k; + + if (len > 1) { + if (argv == sargv) { + if (newargc > JIM_EVAL_SARGV_LEN) { + argv = Jim_Alloc(sizeof(*argv) * newargc); + memcpy(argv, sargv, sizeof(*argv) * j); + } + } + else { + + argv = Jim_Realloc(argv, sizeof(*argv) * newargc); + } + } + + + for (k = 0; k < len; k++) { + argv[j++] = wordObjPtr->internalRep.listValue.ele[k]; + Jim_IncrRefCount(wordObjPtr->internalRep.listValue.ele[k]); + } + + Jim_DecrRefCount(interp, wordObjPtr); + + + j--; + argc += len - 1; + } + } + + if (retcode == JIM_OK && argc) { + + retcode = JimInvokeCommand(interp, argc, argv); + + if (Jim_CheckSignal(interp)) { + retcode = JIM_SIGNAL; + } + } + + + while (j-- > 0) { + Jim_DecrRefCount(interp, argv[j]); + } + + if (argv != sargv) { + Jim_Free(argv); + argv = sargv; + } + } + + + if (retcode == JIM_ERR) { + JimSetErrorStack(interp, NULL); + } + + JimPopEvalFrame(interp); + + Jim_FreeIntRep(interp, scriptObjPtr); + scriptObjPtr->typePtr = &scriptObjType; + Jim_SetIntRepPtr(scriptObjPtr, script); + Jim_DecrRefCount(interp, scriptObjPtr); + + return retcode; +} + +static int JimSetProcArg(Jim_Interp *interp, Jim_Obj *argNameObj, Jim_Obj *argValObj) +{ + int retcode; + + const char *varname = Jim_String(argNameObj); + if (*varname == '&') { + + Jim_Obj *objPtr; + Jim_CallFrame *savedCallFrame = interp->framePtr; + + interp->framePtr = interp->framePtr->parent; + objPtr = Jim_GetVariable(interp, argValObj, JIM_ERRMSG); + interp->framePtr = savedCallFrame; + if (!objPtr) { + return JIM_ERR; + } + + + objPtr = Jim_NewStringObj(interp, varname + 1, -1); + Jim_IncrRefCount(objPtr); + retcode = Jim_SetVariableLink(interp, objPtr, argValObj, interp->framePtr->parent); + Jim_DecrRefCount(interp, objPtr); + } + else { + retcode = Jim_SetVariable(interp, argNameObj, argValObj); + } + return retcode; +} + +static void JimSetProcWrongArgs(Jim_Interp *interp, Jim_Obj *procNameObj, Jim_Cmd *cmd) +{ + + Jim_Obj *argmsg = Jim_NewStringObj(interp, "", 0); + int i; + + for (i = 0; i < cmd->u.proc.argListLen; i++) { + Jim_AppendString(interp, argmsg, " ", 1); + + if (i == cmd->u.proc.argsPos) { + if (cmd->u.proc.arglist[i].defaultObjPtr) { + + Jim_AppendString(interp, argmsg, "?", 1); + Jim_AppendObj(interp, argmsg, cmd->u.proc.arglist[i].defaultObjPtr); + Jim_AppendString(interp, argmsg, " ...?", -1); + } + else { + + Jim_AppendString(interp, argmsg, "?arg ...?", -1); + } + } + else { + if (cmd->u.proc.arglist[i].defaultObjPtr) { + Jim_AppendString(interp, argmsg, "?", 1); + Jim_AppendObj(interp, argmsg, cmd->u.proc.arglist[i].nameObjPtr); + Jim_AppendString(interp, argmsg, "?", 1); + } + else { + const char *arg = Jim_String(cmd->u.proc.arglist[i].nameObjPtr); + if (*arg == '&') { + arg++; + } + Jim_AppendString(interp, argmsg, arg, -1); + } + } + } + Jim_SetResultFormatted(interp, "wrong # args: should be \"%#s%#s\"", procNameObj, argmsg); +} + +#ifdef jim_ext_namespace +int Jim_EvalNamespace(Jim_Interp *interp, Jim_Obj *scriptObj, Jim_Obj *nsObj) +{ + Jim_CallFrame *callFramePtr; + int retcode; + + + callFramePtr = JimCreateCallFrame(interp, interp->framePtr, nsObj); + callFramePtr->argv = interp->evalFrame->argv; + callFramePtr->argc = interp->evalFrame->argc; + callFramePtr->procArgsObjPtr = NULL; + callFramePtr->procBodyObjPtr = scriptObj; + callFramePtr->staticVars = NULL; + Jim_IncrRefCount(scriptObj); + interp->framePtr = callFramePtr; + + + if (interp->framePtr->level == interp->maxCallFrameDepth) { + Jim_SetResultString(interp, "Too many nested calls. Infinite recursion?", -1); + retcode = JIM_ERR; + } + else { + + retcode = Jim_EvalObj(interp, scriptObj); + } + + + interp->framePtr = interp->framePtr->parent; + JimFreeCallFrame(interp, callFramePtr, JIM_FCF_REUSE); + + return retcode; +} +#endif + +static int JimCallProcedure(Jim_Interp *interp, Jim_Cmd *cmd, int argc, Jim_Obj *const *argv) +{ + Jim_CallFrame *callFramePtr; + int i, d, retcode, optargs; + + + if (argc - 1 < cmd->u.proc.reqArity || + (cmd->u.proc.argsPos < 0 && argc - 1 > cmd->u.proc.reqArity + cmd->u.proc.optArity)) { + JimSetProcWrongArgs(interp, argv[0], cmd); + return JIM_ERR; + } + + if (Jim_Length(cmd->u.proc.bodyObjPtr) == 0) { + + return JIM_OK; + } + + + if (interp->framePtr->level == interp->maxCallFrameDepth) { + Jim_SetResultString(interp, "Too many nested calls. Infinite recursion?", -1); + return JIM_ERR; + } + + + callFramePtr = JimCreateCallFrame(interp, interp->framePtr, cmd->u.proc.nsObj); + callFramePtr->argv = argv; + callFramePtr->argc = argc; + callFramePtr->procArgsObjPtr = cmd->u.proc.argListObjPtr; + callFramePtr->procBodyObjPtr = cmd->u.proc.bodyObjPtr; + callFramePtr->staticVars = cmd->u.proc.staticVars; + + interp->procLevel++; + + Jim_IncrRefCount(cmd->u.proc.argListObjPtr); + Jim_IncrRefCount(cmd->u.proc.bodyObjPtr); + interp->framePtr = callFramePtr; + + + optargs = (argc - 1 - cmd->u.proc.reqArity); + + + i = 1; + for (d = 0; d < cmd->u.proc.argListLen; d++) { + Jim_Obj *nameObjPtr = cmd->u.proc.arglist[d].nameObjPtr; + if (d == cmd->u.proc.argsPos) { + + Jim_Obj *listObjPtr; + int argsLen = 0; + if (cmd->u.proc.reqArity + cmd->u.proc.optArity < argc - 1) { + argsLen = argc - 1 - (cmd->u.proc.reqArity + cmd->u.proc.optArity); + } + listObjPtr = Jim_NewListObj(interp, &argv[i], argsLen); + + + if (cmd->u.proc.arglist[d].defaultObjPtr) { + nameObjPtr =cmd->u.proc.arglist[d].defaultObjPtr; + } + retcode = Jim_SetVariable(interp, nameObjPtr, listObjPtr); + if (retcode != JIM_OK) { + goto badargset; + } + + i += argsLen; + continue; + } + + + if (cmd->u.proc.arglist[d].defaultObjPtr == NULL || optargs-- > 0) { + retcode = JimSetProcArg(interp, nameObjPtr, argv[i++]); + } + else { + + retcode = Jim_SetVariable(interp, nameObjPtr, cmd->u.proc.arglist[d].defaultObjPtr); + } + if (retcode != JIM_OK) { + goto badargset; + } + } + + if (interp->traceCmdObj == NULL || + (retcode = JimTraceCallback(interp, "proc", argc, argv)) == JIM_OK) { + + retcode = Jim_EvalObj(interp, cmd->u.proc.bodyObjPtr); + } + +badargset: + + + retcode = JimInvokeDefer(interp, retcode); + interp->framePtr = interp->framePtr->parent; + JimFreeCallFrame(interp, callFramePtr, JIM_FCF_REUSE); + + + if (retcode == JIM_RETURN) { + if (--interp->returnLevel <= 0) { + retcode = interp->returnCode; + interp->returnCode = JIM_OK; + interp->returnLevel = 0; + } + } + interp->procLevel--; + + return retcode; +} + +int Jim_EvalSource(Jim_Interp *interp, const char *filename, int lineno, const char *script) +{ + int retval; + Jim_Obj *scriptObjPtr; + + scriptObjPtr = Jim_NewStringObj(interp, script, -1); + Jim_IncrRefCount(scriptObjPtr); + if (filename) { + Jim_SetSourceInfo(interp, scriptObjPtr, Jim_NewStringObj(interp, filename, -1), lineno); + } + retval = Jim_EvalObj(interp, scriptObjPtr); + Jim_DecrRefCount(interp, scriptObjPtr); + return retval; +} + +int Jim_Eval(Jim_Interp *interp, const char *script) +{ + return Jim_EvalObj(interp, Jim_NewStringObj(interp, script, -1)); +} + + +int Jim_EvalGlobal(Jim_Interp *interp, const char *script) +{ + int retval; + Jim_CallFrame *savedFramePtr = interp->framePtr; + + interp->framePtr = interp->topFramePtr; + retval = Jim_Eval(interp, script); + interp->framePtr = savedFramePtr; + + return retval; +} + +int Jim_EvalFileGlobal(Jim_Interp *interp, const char *filename) +{ + int retval; + Jim_CallFrame *savedFramePtr = interp->framePtr; + + interp->framePtr = interp->topFramePtr; + retval = Jim_EvalFile(interp, filename); + interp->framePtr = savedFramePtr; + + return retval; +} + +#include + +static Jim_Obj *JimReadTextFile(Jim_Interp *interp, const char *filename) +{ + jim_stat_t sb; + int fd; + char *buf; + int readlen; + + if (Jim_Stat(filename, &sb) == -1 || (fd = open(filename, O_RDONLY | O_TEXT, 0666)) < 0) { + Jim_SetResultFormatted(interp, "couldn't read file \"%s\": %s", filename, strerror(errno)); + return NULL; + } + buf = Jim_Alloc(sb.st_size + 1); + readlen = read(fd, buf, sb.st_size); + close(fd); + if (readlen < 0) { + Jim_Free(buf); + Jim_SetResultFormatted(interp, "failed to load file \"%s\": %s", filename, strerror(errno)); + return NULL; + } + else { + Jim_Obj *objPtr; + buf[readlen] = 0; + + objPtr = Jim_NewStringObjNoAlloc(interp, buf, readlen); + + return objPtr; + } +} + + +int Jim_EvalFile(Jim_Interp *interp, const char *filename) +{ + Jim_Obj *filenameObj; + Jim_Obj *oldFilenameObj; + Jim_Obj *scriptObjPtr; + int retcode; + + scriptObjPtr = JimReadTextFile(interp, filename); + if (!scriptObjPtr) { + return JIM_ERR; + } + + filenameObj = Jim_NewStringObj(interp, filename, -1); + Jim_SetSourceInfo(interp, scriptObjPtr, filenameObj, 1); + + oldFilenameObj = JimPushInterpObj(interp->currentFilenameObj, filenameObj); + + retcode = Jim_EvalObj(interp, scriptObjPtr); + + JimPopInterpObj(interp, interp->currentFilenameObj, oldFilenameObj); + + + if (retcode == JIM_RETURN) { + if (--interp->returnLevel <= 0) { + retcode = interp->returnCode; + interp->returnCode = JIM_OK; + interp->returnLevel = 0; + } + } + + return retcode; +} + +static void JimParseSubst(struct JimParserCtx *pc, int flags) +{ + pc->tstart = pc->p; + pc->tline = pc->linenr; + + if (pc->len == 0) { + pc->tend = pc->p; + pc->tt = JIM_TT_EOL; + pc->eof = 1; + return; + } + if (*pc->p == '[' && !(flags & JIM_SUBST_NOCMD)) { + JimParseCmd(pc); + return; + } + if (*pc->p == '$' && !(flags & JIM_SUBST_NOVAR)) { + if (JimParseVar(pc) == JIM_OK) { + return; + } + + pc->tstart = pc->p; + + pc->p++; + pc->len--; + } + while (pc->len) { + if (*pc->p == '$' && !(flags & JIM_SUBST_NOVAR)) { + break; + } + if (*pc->p == '[' && !(flags & JIM_SUBST_NOCMD)) { + break; + } + if (*pc->p == '\\' && pc->len > 1) { + pc->p++; + pc->len--; + } + pc->p++; + pc->len--; + } + pc->tend = pc->p - 1; + pc->tt = (flags & JIM_SUBST_NOESC) ? JIM_TT_STR : JIM_TT_ESC; +} + + +static int SetSubstFromAny(Jim_Interp *interp, struct Jim_Obj *objPtr, int flags) +{ + int scriptTextLen; + const char *scriptText = Jim_GetString(objPtr, &scriptTextLen); + struct JimParserCtx parser; + struct ScriptObj *script = Jim_Alloc(sizeof(*script)); + ParseTokenList tokenlist; + + + ScriptTokenListInit(&tokenlist); + + JimParserInit(&parser, scriptText, scriptTextLen, 1); + while (1) { + JimParseSubst(&parser, flags); + if (parser.eof) { + + break; + } + ScriptAddToken(&tokenlist, parser.tstart, parser.tend - parser.tstart + 1, parser.tt, + parser.tline); + } + + + script->inUse = 1; + script->substFlags = flags; + script->fileNameObj = interp->emptyObj; + Jim_IncrRefCount(script->fileNameObj); + SubstObjAddTokens(interp, script, &tokenlist); + + + ScriptTokenListFree(&tokenlist); + +#ifdef DEBUG_SHOW_SUBST + { + int i; + + printf("==== Subst ====\n"); + for (i = 0; i < script->len; i++) { + printf("[%2d] %s '%s'\n", i, jim_tt_name(script->token[i].type), + Jim_String(script->token[i].objPtr)); + } + } +#endif + + + Jim_FreeIntRep(interp, objPtr); + Jim_SetIntRepPtr(objPtr, script); + objPtr->typePtr = &scriptObjType; + return JIM_OK; +} + +static ScriptObj *Jim_GetSubst(Jim_Interp *interp, Jim_Obj *objPtr, int flags) +{ + if (objPtr->typePtr != &scriptObjType || ((ScriptObj *)Jim_GetIntRepPtr(objPtr))->substFlags != flags) + SetSubstFromAny(interp, objPtr, flags); + return (ScriptObj *) Jim_GetIntRepPtr(objPtr); +} + +int Jim_SubstObj(Jim_Interp *interp, Jim_Obj *substObjPtr, Jim_Obj **resObjPtrPtr, int flags) +{ + ScriptObj *script; + + JimPanic((substObjPtr->refCount == 0, "Jim_SubstObj() called with zero refcount object")); + + script = Jim_GetSubst(interp, substObjPtr, flags); + + Jim_IncrRefCount(substObjPtr); + script->inUse++; + + *resObjPtrPtr = JimInterpolateTokens(interp, script->token, script->len, flags); + + script->inUse--; + Jim_DecrRefCount(interp, substObjPtr); + if (*resObjPtrPtr == NULL) { + return JIM_ERR; + } + return JIM_OK; +} + +void Jim_WrongNumArgs(Jim_Interp *interp, int argc, Jim_Obj *const *argv, const char *msg) +{ + Jim_Obj *objPtr; + Jim_Obj *listObjPtr; + + JimPanic((argc == 0, "Jim_WrongNumArgs() called with argc=0")); + + listObjPtr = Jim_NewListObj(interp, argv, argc); + + if (msg && *msg) { + Jim_ListAppendElement(interp, listObjPtr, Jim_NewStringObj(interp, msg, -1)); + } + Jim_IncrRefCount(listObjPtr); + objPtr = Jim_ListJoin(interp, listObjPtr, " ", 1); + Jim_DecrRefCount(interp, listObjPtr); + + Jim_SetResultFormatted(interp, "wrong # args: should be \"%#s\"", objPtr); +} + +typedef void JimHashtableIteratorCallbackType(Jim_Interp *interp, Jim_Obj *listObjPtr, + Jim_Obj *keyObjPtr, void *value, Jim_Obj *patternObjPtr, int type); + +#define JimTrivialMatch(pattern) (strpbrk((pattern), "*[?\\") == NULL) + +static Jim_Obj *JimHashtablePatternMatch(Jim_Interp *interp, Jim_HashTable *ht, Jim_Obj *patternObjPtr, + JimHashtableIteratorCallbackType *callback, int type) +{ + Jim_HashEntry *he; + Jim_Obj *listObjPtr = Jim_NewListObj(interp, NULL, 0); + + + if (patternObjPtr && JimTrivialMatch(Jim_String(patternObjPtr))) { + he = Jim_FindHashEntry(ht, patternObjPtr); + if (he) { + callback(interp, listObjPtr, Jim_GetHashEntryKey(he), Jim_GetHashEntryVal(he), + patternObjPtr, type); + } + } + else { + Jim_HashTableIterator htiter; + JimInitHashTableIterator(ht, &htiter); + while ((he = Jim_NextHashEntry(&htiter)) != NULL) { + callback(interp, listObjPtr, Jim_GetHashEntryKey(he), Jim_GetHashEntryVal(he), + patternObjPtr, type); + } + } + return listObjPtr; +} + + +#define JIM_CMDLIST_COMMANDS 0 +#define JIM_CMDLIST_PROCS 1 +#define JIM_CMDLIST_CHANNELS 2 + +static void JimCommandMatch(Jim_Interp *interp, Jim_Obj *listObjPtr, + Jim_Obj *keyObj, void *value, Jim_Obj *patternObj, int type) +{ + Jim_Cmd *cmdPtr = (Jim_Cmd *)value; + + if (type == JIM_CMDLIST_PROCS && !cmdPtr->isproc) { + + return; + } + + Jim_IncrRefCount(keyObj); + + if (type != JIM_CMDLIST_CHANNELS || Jim_AioFilehandle(interp, keyObj) >= 0) { + int match = 1; + if (patternObj) { + int plen, slen; + const char *pattern = Jim_GetStringNoQualifier(patternObj, &plen); + const char *str = Jim_GetStringNoQualifier(keyObj, &slen); +#ifdef JIM_NO_INTROSPECTION + + match = (JimStringCompareUtf8(pattern, plen, str, slen, 0) == 0); +#else + match = JimGlobMatch(pattern, plen, str, slen, 0); +#endif + } + if (match) { + Jim_ListAppendElement(interp, listObjPtr, keyObj); + } + } + Jim_DecrRefCount(interp, keyObj); +} + +static Jim_Obj *JimCommandsList(Jim_Interp *interp, Jim_Obj *patternObjPtr, int type) +{ + return JimHashtablePatternMatch(interp, &interp->commands, patternObjPtr, JimCommandMatch, type); +} + + +#define JIM_VARLIST_GLOBALS 0 +#define JIM_VARLIST_LOCALS 1 +#define JIM_VARLIST_VARS 2 +#define JIM_VARLIST_MASK 0x000f + +#define JIM_VARLIST_VALUES 0x1000 + +static void JimVariablesMatch(Jim_Interp *interp, Jim_Obj *listObjPtr, + Jim_Obj *keyObj, void *value, Jim_Obj *patternObj, int type) +{ + Jim_VarVal *vv = (Jim_VarVal *)value; + + if ((type & JIM_VARLIST_MASK) != JIM_VARLIST_LOCALS || vv->linkFramePtr == NULL) { + if (patternObj == NULL || Jim_StringMatchObj(interp, patternObj, keyObj, 0)) { + Jim_ListAppendElement(interp, listObjPtr, keyObj); + if (type & JIM_VARLIST_VALUES) { + Jim_ListAppendElement(interp, listObjPtr, vv->objPtr); + } + } + } +} + + +static Jim_Obj *JimVariablesList(Jim_Interp *interp, Jim_Obj *patternObjPtr, int mode) +{ + if (mode == JIM_VARLIST_LOCALS && interp->framePtr == interp->topFramePtr) { + return interp->emptyObj; + } + else { + Jim_CallFrame *framePtr = (mode == JIM_VARLIST_GLOBALS) ? interp->topFramePtr : interp->framePtr; + return JimHashtablePatternMatch(interp, &framePtr->vars, patternObjPtr, JimVariablesMatch, + mode); + } +} + +static int JimInfoLevel(Jim_Interp *interp, Jim_Obj *levelObjPtr, Jim_Obj **objPtrPtr) +{ + long level; + + if (Jim_GetLong(interp, levelObjPtr, &level) == JIM_OK) { + Jim_CallFrame *targetCallFrame = JimGetCallFrameByInteger(interp, level); + if (targetCallFrame && targetCallFrame != interp->topFramePtr) { +#ifdef JIM_NO_INTROSPECTION + + *objPtrPtr = Jim_NewListObj(interp, targetCallFrame->argv, 1); +#else + *objPtrPtr = Jim_NewListObj(interp, targetCallFrame->argv, targetCallFrame->argc); +#endif + return JIM_OK; + } + } + Jim_SetResultFormatted(interp, "bad level \"%#s\"", levelObjPtr); + return JIM_ERR; +} + +static int JimInfoFrame(Jim_Interp *interp, Jim_Obj *levelObjPtr, Jim_Obj **objPtrPtr) +{ + long level; + + if (Jim_GetLong(interp, levelObjPtr, &level) == JIM_OK) { + Jim_EvalFrame *frame = JimGetEvalFrameByProcLevel(interp, level); + if (frame) { + Jim_Obj *listObj = Jim_NewListObj(interp, NULL, 0); + + Jim_ListAppendElement(interp, listObj, Jim_NewStringObj(interp, "type", -1)); + Jim_ListAppendElement(interp, listObj, Jim_NewStringObj(interp, "source", -1)); + if (frame->scriptObj) { + ScriptObj *script = JimGetScript(interp, frame->scriptObj); + Jim_ListAppendElement(interp, listObj, Jim_NewStringObj(interp, "line", -1)); + Jim_ListAppendElement(interp, listObj, Jim_NewIntObj(interp, script->linenr)); + Jim_ListAppendElement(interp, listObj, Jim_NewStringObj(interp, "file", -1)); + Jim_ListAppendElement(interp, listObj, script->fileNameObj); + } +#ifndef JIM_NO_INTROSPECTION + { + Jim_Obj *cmdObj = Jim_NewListObj(interp, frame->argv, frame->argc); + + Jim_ListAppendElement(interp, listObj, Jim_NewStringObj(interp, "cmd", -1)); + Jim_ListAppendElement(interp, listObj, cmdObj); + } +#endif + { + Jim_Obj *procNameObj = JimProcForEvalFrame(interp, frame); + if (procNameObj) { + Jim_ListAppendElement(interp, listObj, Jim_NewStringObj(interp, "proc", -1)); + Jim_ListAppendElement(interp, listObj, procNameObj); + } + } + Jim_ListAppendElement(interp, listObj, Jim_NewStringObj(interp, "level", -1)); + Jim_ListAppendElement(interp, listObj, Jim_NewIntObj(interp, interp->framePtr->level - frame->framePtr->level)); + + *objPtrPtr = listObj; + return JIM_OK; + } + } + Jim_SetResultFormatted(interp, "bad level \"%#s\"", levelObjPtr); + return JIM_ERR; +} + + +static int Jim_PutsCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + if (argc != 2 && argc != 3) { + Jim_WrongNumArgs(interp, 1, argv, "?-nonewline? string"); + return JIM_ERR; + } + if (argc == 3) { + if (!Jim_CompareStringImmediate(interp, argv[1], "-nonewline")) { + Jim_SetResultString(interp, "The second argument must " "be -nonewline", -1); + return JIM_ERR; + } + else { + fputs(Jim_String(argv[2]), stdout); + } + } + else { + puts(Jim_String(argv[1])); + } + return JIM_OK; +} + + +static int JimAddMulHelper(Jim_Interp *interp, int argc, Jim_Obj *const *argv, int op) +{ + jim_wide wideValue, res; + double doubleValue, doubleRes; + int i; + + res = (op == JIM_EXPROP_ADD) ? 0 : 1; + + for (i = 1; i < argc; i++) { + if (Jim_GetWide(interp, argv[i], &wideValue) != JIM_OK) + goto trydouble; + if (op == JIM_EXPROP_ADD) + res += wideValue; + else + res *= wideValue; + } + Jim_SetResultInt(interp, res); + return JIM_OK; + trydouble: + doubleRes = (double)res; + for (; i < argc; i++) { + if (Jim_GetDouble(interp, argv[i], &doubleValue) != JIM_OK) + return JIM_ERR; + if (op == JIM_EXPROP_ADD) + doubleRes += doubleValue; + else + doubleRes *= doubleValue; + } + Jim_SetResult(interp, Jim_NewDoubleObj(interp, doubleRes)); + return JIM_OK; +} + + +static int JimSubDivHelper(Jim_Interp *interp, int argc, Jim_Obj *const *argv, int op) +{ + jim_wide wideValue, res = 0; + double doubleValue, doubleRes = 0; + int i = 2; + + if (argc < 2) { + Jim_WrongNumArgs(interp, 1, argv, "number ?number ... number?"); + return JIM_ERR; + } + else if (argc == 2) { + if (Jim_GetWide(interp, argv[1], &wideValue) != JIM_OK) { + if (Jim_GetDouble(interp, argv[1], &doubleValue) != JIM_OK) { + return JIM_ERR; + } + else { + if (op == JIM_EXPROP_SUB) + doubleRes = -doubleValue; + else + doubleRes = 1.0 / doubleValue; + Jim_SetResult(interp, Jim_NewDoubleObj(interp, doubleRes)); + return JIM_OK; + } + } + if (op == JIM_EXPROP_SUB) { + res = -wideValue; + Jim_SetResultInt(interp, res); + } + else { + doubleRes = 1.0 / wideValue; + Jim_SetResult(interp, Jim_NewDoubleObj(interp, doubleRes)); + } + return JIM_OK; + } + else { + if (Jim_GetWide(interp, argv[1], &res) != JIM_OK) { + if (Jim_GetDouble(interp, argv[1], &doubleRes) + != JIM_OK) { + return JIM_ERR; + } + else { + goto trydouble; + } + } + } + for (i = 2; i < argc; i++) { + if (Jim_GetWide(interp, argv[i], &wideValue) != JIM_OK) { + doubleRes = (double)res; + goto trydouble; + } + if (op == JIM_EXPROP_SUB) + res -= wideValue; + else { + if (wideValue == 0) { + Jim_SetResultString(interp, "Division by zero", -1); + return JIM_ERR; + } + res /= wideValue; + } + } + Jim_SetResultInt(interp, res); + return JIM_OK; + trydouble: + for (; i < argc; i++) { + if (Jim_GetDouble(interp, argv[i], &doubleValue) != JIM_OK) + return JIM_ERR; + if (op == JIM_EXPROP_SUB) + doubleRes -= doubleValue; + else + doubleRes /= doubleValue; + } + Jim_SetResult(interp, Jim_NewDoubleObj(interp, doubleRes)); + return JIM_OK; +} + + + +static int Jim_AddCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + return JimAddMulHelper(interp, argc, argv, JIM_EXPROP_ADD); +} + + +static int Jim_MulCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + return JimAddMulHelper(interp, argc, argv, JIM_EXPROP_MUL); +} + + +static int Jim_SubCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + return JimSubDivHelper(interp, argc, argv, JIM_EXPROP_SUB); +} + + +static int Jim_DivCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + return JimSubDivHelper(interp, argc, argv, JIM_EXPROP_DIV); +} + + +static int Jim_SetCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + if (argc != 2 && argc != 3) { + Jim_WrongNumArgs(interp, 1, argv, "varName ?newValue?"); + return JIM_ERR; + } + if (argc == 2) { + Jim_Obj *objPtr; + + objPtr = Jim_GetVariable(interp, argv[1], JIM_ERRMSG); + if (!objPtr) + return JIM_ERR; + Jim_SetResult(interp, objPtr); + return JIM_OK; + } + + if (Jim_SetVariable(interp, argv[1], argv[2]) != JIM_OK) + return JIM_ERR; + Jim_SetResult(interp, argv[2]); + return JIM_OK; +} + +static int Jim_UnsetCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + int i = 1; + int complain = 1; + + while (i < argc) { + if (Jim_CompareStringImmediate(interp, argv[i], "--")) { + i++; + break; + } + if (Jim_CompareStringImmediate(interp, argv[i], "-nocomplain")) { + complain = 0; + i++; + continue; + } + break; + } + + while (i < argc) { + if (Jim_UnsetVariable(interp, argv[i], complain ? JIM_ERRMSG : JIM_NONE) != JIM_OK + && complain) { + return JIM_ERR; + } + i++; + } + + Jim_SetEmptyResult(interp); + return JIM_OK; +} + +static int JimCheckLoopRetcode(Jim_Interp *interp, int retval) +{ + if (retval == JIM_BREAK || retval == JIM_CONTINUE) { + if (--interp->break_level > 0) { + return 1; + } + } + return 0; +} + + +static int Jim_WhileCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + if (argc != 3) { + Jim_WrongNumArgs(interp, 1, argv, "condition body"); + return JIM_ERR; + } + + + while (1) { + int boolean = 0, retval; + + if ((retval = Jim_GetBoolFromExpr(interp, argv[1], &boolean)) != JIM_OK) + return retval; + if (!boolean) + break; + + if ((retval = Jim_EvalObj(interp, argv[2])) != JIM_OK) { + if (JimCheckLoopRetcode(interp, retval)) { + return retval; + } + switch (retval) { + case JIM_BREAK: + goto out; + case JIM_CONTINUE: + continue; + default: + return retval; + } + } + } + out: + Jim_SetEmptyResult(interp); + return JIM_OK; +} + + +static int Jim_ForCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + int retval; + int boolean = 1; + int immediate = 0; + Jim_Obj *varNamePtr = NULL; + Jim_Obj *stopVarNamePtr = NULL; + + if (argc != 5) { + Jim_WrongNumArgs(interp, 1, argv, "start test next body"); + return JIM_ERR; + } + + + if ((retval = Jim_EvalObj(interp, argv[1])) != JIM_OK) { + return retval; + } + + retval = Jim_GetBoolFromExpr(interp, argv[2], &boolean); + + +#ifdef JIM_OPTIMIZATION + if (retval == JIM_OK && boolean) { + ScriptObj *incrScript; + struct ExprTree *expr; + jim_wide stop, currentVal; + Jim_Obj *objPtr; + int cmpOffset; + + + expr = JimGetExpression(interp, argv[2]); + incrScript = JimGetScript(interp, argv[3]); + + + if (incrScript == NULL || incrScript->len != 3 || !expr || expr->len != 3) { + goto evalstart; + } + + if (incrScript->token[1].type != JIM_TT_ESC) { + goto evalstart; + } + + if (expr->expr->type == JIM_EXPROP_LT) { + cmpOffset = 0; + } + else if (expr->expr->type == JIM_EXPROP_LTE) { + cmpOffset = 1; + } + else { + goto evalstart; + } + + if (expr->expr->left->type != JIM_TT_VAR) { + goto evalstart; + } + + if (expr->expr->right->type != JIM_TT_VAR && expr->expr->right->type != JIM_TT_EXPR_INT) { + goto evalstart; + } + + + if (!Jim_CompareStringImmediate(interp, incrScript->token[1].objPtr, "incr")) { + goto evalstart; + } + + + if (!Jim_StringEqObj(incrScript->token[2].objPtr, expr->expr->left->objPtr)) { + goto evalstart; + } + + + if (expr->expr->right->type == JIM_TT_EXPR_INT) { + if (Jim_GetWideExpr(interp, expr->expr->right->objPtr, &stop) == JIM_ERR) { + goto evalstart; + } + } + else { + stopVarNamePtr = expr->expr->right->objPtr; + Jim_IncrRefCount(stopVarNamePtr); + + stop = 0; + } + + + varNamePtr = expr->expr->left->objPtr; + Jim_IncrRefCount(varNamePtr); + + objPtr = Jim_GetVariable(interp, varNamePtr, JIM_NONE); + if (objPtr == NULL || Jim_GetWide(interp, objPtr, ¤tVal) != JIM_OK) { + goto testcond; + } + + + while (retval == JIM_OK) { + + + + + if (stopVarNamePtr) { + objPtr = Jim_GetVariable(interp, stopVarNamePtr, JIM_NONE); + if (objPtr == NULL || Jim_GetWide(interp, objPtr, &stop) != JIM_OK) { + goto testcond; + } + } + + if (currentVal >= stop + cmpOffset) { + break; + } + + + retval = Jim_EvalObj(interp, argv[4]); + if (JimCheckLoopRetcode(interp, retval)) { + immediate++; + goto out; + } + if (retval == JIM_OK || retval == JIM_CONTINUE) { + retval = JIM_OK; + + objPtr = Jim_GetVariable(interp, varNamePtr, JIM_ERRMSG); + + + if (objPtr == NULL) { + retval = JIM_ERR; + goto out; + } + if (!Jim_IsShared(objPtr) && objPtr->typePtr == &intObjType) { + currentVal = ++JimWideValue(objPtr); + Jim_InvalidateStringRep(objPtr); + } + else { + if (Jim_GetWide(interp, objPtr, ¤tVal) != JIM_OK || + Jim_SetVariable(interp, varNamePtr, Jim_NewIntObj(interp, + ++currentVal)) != JIM_OK) { + goto evalnext; + } + } + } + } + goto out; + } + evalstart: +#endif + + while (boolean && (retval == JIM_OK || retval == JIM_CONTINUE)) { + + retval = Jim_EvalObj(interp, argv[4]); + if (JimCheckLoopRetcode(interp, retval)) { + immediate++; + break; + } + if (retval == JIM_OK || retval == JIM_CONTINUE) { + +JIM_IF_OPTIM(evalnext:) + retval = Jim_EvalObj(interp, argv[3]); + if (retval == JIM_OK || retval == JIM_CONTINUE) { + +JIM_IF_OPTIM(testcond:) + retval = Jim_GetBoolFromExpr(interp, argv[2], &boolean); + } + } + } +JIM_IF_OPTIM(out:) + if (stopVarNamePtr) { + Jim_DecrRefCount(interp, stopVarNamePtr); + } + if (varNamePtr) { + Jim_DecrRefCount(interp, varNamePtr); + } + + if (!immediate) { + if (retval == JIM_CONTINUE || retval == JIM_BREAK || retval == JIM_OK) { + Jim_SetEmptyResult(interp); + return JIM_OK; + } + } + + return retval; +} + + +static int Jim_LoopCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + int retval; + jim_wide i; + jim_wide limit = 0; + jim_wide incr = 1; + Jim_Obj *bodyObjPtr; + + if (argc < 4 || argc > 6) { + Jim_WrongNumArgs(interp, 1, argv, "var ?first? limit ?incr? body"); + return JIM_ERR; + } + + retval = Jim_GetWideExpr(interp, argv[2], &i); + if (argc > 4 && retval == JIM_OK) { + retval = Jim_GetWideExpr(interp, argv[3], &limit); + } + if (argc > 5 && retval == JIM_OK) { + Jim_GetWideExpr(interp, argv[4], &incr); + } + if (retval != JIM_OK) { + return retval; + } + if (argc == 4) { + limit = i; + i = 0; + } + bodyObjPtr = argv[argc - 1]; + + retval = Jim_SetVariable(interp, argv[1], Jim_NewIntObj(interp, i)); + + while (((i < limit && incr > 0) || (i > limit && incr < 0)) && retval == JIM_OK) { + retval = Jim_EvalObj(interp, bodyObjPtr); + if (JimCheckLoopRetcode(interp, retval)) { + return retval; + } + if (retval == JIM_OK || retval == JIM_CONTINUE) { + Jim_Obj *objPtr = Jim_GetVariable(interp, argv[1], JIM_ERRMSG); + + retval = JIM_OK; + + + i += incr; + + if (objPtr && !Jim_IsShared(objPtr) && objPtr->typePtr == &intObjType) { + if (argv[1]->typePtr != &variableObjType) { + if (Jim_SetVariable(interp, argv[1], objPtr) != JIM_OK) { + return JIM_ERR; + } + } + JimWideValue(objPtr) = i; + Jim_InvalidateStringRep(objPtr); + + if (argv[1]->typePtr != &variableObjType) { + if (Jim_SetVariable(interp, argv[1], objPtr) != JIM_OK) { + retval = JIM_ERR; + break; + } + } + } + else { + objPtr = Jim_NewIntObj(interp, i); + retval = Jim_SetVariable(interp, argv[1], objPtr); + if (retval != JIM_OK) { + Jim_FreeNewObj(interp, objPtr); + } + } + } + } + + if (retval == JIM_OK || retval == JIM_CONTINUE || retval == JIM_BREAK) { + Jim_SetEmptyResult(interp); + return JIM_OK; + } + return retval; +} + +typedef struct { + Jim_Obj *objPtr; + int idx; +} Jim_ListIter; + +static void JimListIterInit(Jim_ListIter *iter, Jim_Obj *objPtr) +{ + iter->objPtr = objPtr; + iter->idx = 0; +} + +static Jim_Obj *JimListIterNext(Jim_Interp *interp, Jim_ListIter *iter) +{ + if (iter->idx >= Jim_ListLength(interp, iter->objPtr)) { + return NULL; + } + return iter->objPtr->internalRep.listValue.ele[iter->idx++]; +} + +static int JimListIterDone(Jim_Interp *interp, Jim_ListIter *iter) +{ + return iter->idx >= Jim_ListLength(interp, iter->objPtr); +} + + +static int JimForeachMapHelper(Jim_Interp *interp, int argc, Jim_Obj *const *argv, int doMap) +{ + int result = JIM_OK; + int i, numargs; + Jim_ListIter twoiters[2]; + Jim_ListIter *iters; + Jim_Obj *script; + Jim_Obj *resultObj; + + if (argc < 4 || argc % 2 != 0) { + Jim_WrongNumArgs(interp, 1, argv, "varList list ?varList list ...? script"); + return JIM_ERR; + } + script = argv[argc - 1]; + numargs = (argc - 1 - 1); + + if (numargs == 2) { + iters = twoiters; + } + else { + iters = Jim_Alloc(numargs * sizeof(*iters)); + } + for (i = 0; i < numargs; i++) { + JimListIterInit(&iters[i], argv[i + 1]); + if (i % 2 == 0 && JimListIterDone(interp, &iters[i])) { + result = JIM_ERR; + } + } + if (result != JIM_OK) { + Jim_SetResultString(interp, "foreach varlist is empty", -1); + goto empty_varlist; + } + + if (doMap) { + resultObj = Jim_NewListObj(interp, NULL, 0); + } + else { + resultObj = interp->emptyObj; + } + Jim_IncrRefCount(resultObj); + + while (1) { + + for (i = 0; i < numargs; i += 2) { + if (!JimListIterDone(interp, &iters[i + 1])) { + break; + } + } + if (i == numargs) { + + break; + } + + + for (i = 0; i < numargs; i += 2) { + Jim_Obj *varName; + + + JimListIterInit(&iters[i], argv[i + 1]); + while ((varName = JimListIterNext(interp, &iters[i])) != NULL) { + Jim_Obj *valObj = JimListIterNext(interp, &iters[i + 1]); + if (!valObj) { + + valObj = interp->emptyObj; + } + + Jim_IncrRefCount(valObj); + result = Jim_SetVariable(interp, varName, valObj); + Jim_DecrRefCount(interp, valObj); + if (result != JIM_OK) { + goto err; + } + } + } + result = Jim_EvalObj(interp, script); + if (JimCheckLoopRetcode(interp, result)) { + goto err; + } + switch (result) { + case JIM_OK: + if (doMap) { + Jim_ListAppendElement(interp, resultObj, interp->result); + } + break; + case JIM_CONTINUE: + break; + case JIM_BREAK: + goto out; + default: + goto err; + } + } + out: + result = JIM_OK; + Jim_SetResult(interp, resultObj); + err: + Jim_DecrRefCount(interp, resultObj); + empty_varlist: + if (numargs > 2) { + Jim_Free(iters); + } + return result; +} + + +static int Jim_ForeachCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + return JimForeachMapHelper(interp, argc, argv, 0); +} + + +static int Jim_LmapCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + return JimForeachMapHelper(interp, argc, argv, 1); +} + + +static int Jim_LassignCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + int result = JIM_ERR; + int i; + Jim_ListIter iter; + Jim_Obj *resultObj; + + if (argc < 2) { + Jim_WrongNumArgs(interp, 1, argv, "varList list ?varName ...?"); + return JIM_ERR; + } + + JimListIterInit(&iter, argv[1]); + + for (i = 2; i < argc; i++) { + Jim_Obj *valObj = JimListIterNext(interp, &iter); + result = Jim_SetVariable(interp, argv[i], valObj ? valObj : interp->emptyObj); + if (result != JIM_OK) { + return result; + } + } + + resultObj = Jim_NewListObj(interp, NULL, 0); + while (!JimListIterDone(interp, &iter)) { + Jim_ListAppendElement(interp, resultObj, JimListIterNext(interp, &iter)); + } + + Jim_SetResult(interp, resultObj); + + return JIM_OK; +} + + +static int Jim_IfCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + int boolean, retval, current = 1, falsebody = 0; + + if (argc >= 3) { + while (1) { + + if (current >= argc) + goto err; + if ((retval = Jim_GetBoolFromExpr(interp, argv[current++], &boolean)) + != JIM_OK) + return retval; + + if (current >= argc) + goto err; + if (Jim_CompareStringImmediate(interp, argv[current], "then")) + current++; + + if (current >= argc) + goto err; + if (boolean) + return Jim_EvalObj(interp, argv[current]); + + if (++current >= argc) { + Jim_SetResult(interp, Jim_NewEmptyStringObj(interp)); + return JIM_OK; + } + falsebody = current++; + if (Jim_CompareStringImmediate(interp, argv[falsebody], "else")) { + + if (current != argc - 1) + goto err; + return Jim_EvalObj(interp, argv[current]); + } + else if (Jim_CompareStringImmediate(interp, argv[falsebody], "elseif")) + continue; + + else if (falsebody != argc - 1) + goto err; + return Jim_EvalObj(interp, argv[falsebody]); + } + return JIM_OK; + } + err: + Jim_WrongNumArgs(interp, 1, argv, "condition ?then? trueBody ?elseif ...? ?else? falseBody"); + return JIM_ERR; +} + + +int Jim_CommandMatchObj(Jim_Interp *interp, Jim_Obj *commandObj, Jim_Obj *patternObj, + Jim_Obj *stringObj, int flags) +{ + Jim_Obj *parms[5]; + int argc = 0; + long eq; + int rc; + + parms[argc++] = commandObj; + if (flags & JIM_NOCASE) { + parms[argc++] = Jim_NewStringObj(interp, "-nocase", -1); + } + if (flags & JIM_OPT_END) { + parms[argc++] = Jim_NewStringObj(interp, "--", -1); + } + parms[argc++] = patternObj; + parms[argc++] = stringObj; + + rc = Jim_EvalObjVector(interp, argc, parms); + + if (rc != JIM_OK || Jim_GetLong(interp, Jim_GetResult(interp), &eq) != JIM_OK) { + eq = -rc; + } + + return eq; +} + + +static int Jim_SwitchCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + enum { SWITCH_EXACT, SWITCH_GLOB, SWITCH_RE, SWITCH_CMD }; + int matchOpt = SWITCH_EXACT, opt = 1, patCount, i; + int match_flags = 0; + Jim_Obj *command = NULL, *scriptObj = NULL, *strObj; + Jim_Obj **caseList; + + if (argc < 3) { + wrongnumargs: + Jim_WrongNumArgs(interp, 1, argv, "?options? string " + "pattern body ... ?default body? or " "{pattern body ?pattern body ...?}"); + return JIM_ERR; + } + for (opt = 1; opt < argc; ++opt) { + const char *option = Jim_String(argv[opt]); + + if (*option != '-') + break; + else if (strncmp(option, "--", 2) == 0) { + ++opt; + break; + } + else if (strncmp(option, "-exact", 2) == 0) + matchOpt = SWITCH_EXACT; + else if (strncmp(option, "-glob", 2) == 0) + matchOpt = SWITCH_GLOB; + else if (strncmp(option, "-regexp", 2) == 0) { + matchOpt = SWITCH_RE; + match_flags |= JIM_OPT_END; + } + else if (strncmp(option, "-command", 2) == 0) { + matchOpt = SWITCH_CMD; + if ((argc - opt) < 2) + goto wrongnumargs; + command = argv[++opt]; + } + else { + Jim_SetResultFormatted(interp, + "bad option \"%#s\": must be -exact, -glob, -regexp, -command procname or --", + argv[opt]); + return JIM_ERR; + } + if ((argc - opt) < 2) + goto wrongnumargs; + } + strObj = argv[opt++]; + patCount = argc - opt; + if (patCount == 1) { + JimListGetElements(interp, argv[opt], &patCount, &caseList); + } + else + caseList = (Jim_Obj **)&argv[opt]; + if (patCount == 0 || patCount % 2 != 0) + goto wrongnumargs; + for (i = 0; scriptObj == NULL && i < patCount; i += 2) { + Jim_Obj *patObj = caseList[i]; + + if (!Jim_CompareStringImmediate(interp, patObj, "default") + || i < (patCount - 2)) { + switch (matchOpt) { + case SWITCH_EXACT: + if (Jim_StringEqObj(strObj, patObj)) + scriptObj = caseList[i + 1]; + break; + case SWITCH_GLOB: + if (Jim_StringMatchObj(interp, patObj, strObj, 0)) + scriptObj = caseList[i + 1]; + break; + case SWITCH_RE: + command = Jim_NewStringObj(interp, "regexp", -1); + + case SWITCH_CMD:{ + int rc = Jim_CommandMatchObj(interp, command, patObj, strObj, match_flags); + + if (argc - opt == 1) { + JimListGetElements(interp, argv[opt], &patCount, &caseList); + } + + if (rc < 0) { + return -rc; + } + if (rc) + scriptObj = caseList[i + 1]; + break; + } + } + } + else { + scriptObj = caseList[i + 1]; + } + } + for (; i < patCount && Jim_CompareStringImmediate(interp, scriptObj, "-"); i += 2) + scriptObj = caseList[i + 1]; + if (scriptObj && Jim_CompareStringImmediate(interp, scriptObj, "-")) { + Jim_SetResultFormatted(interp, "no body specified for pattern \"%#s\"", caseList[i - 2]); + return JIM_ERR; + } + Jim_SetEmptyResult(interp); + if (scriptObj) { + return Jim_EvalObj(interp, scriptObj); + } + return JIM_OK; +} + + +static int Jim_ListCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + Jim_Obj *listObjPtr; + + listObjPtr = Jim_NewListObj(interp, argv + 1, argc - 1); + Jim_SetResult(interp, listObjPtr); + return JIM_OK; +} + + +static int Jim_LindexCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + Jim_Obj *objPtr; + int ret; + + if (argc < 2) { + Jim_WrongNumArgs(interp, 1, argv, "list ?index ...?"); + return JIM_ERR; + } + ret = Jim_ListIndices(interp, argv[1], argv + 2, argc - 2, &objPtr, JIM_NONE); + if (ret < 0) { + ret = JIM_OK; + Jim_SetEmptyResult(interp); + } + else if (ret == JIM_OK) { + Jim_SetResult(interp, objPtr); + } + return ret; +} + + +static int Jim_LlengthCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + if (argc != 2) { + Jim_WrongNumArgs(interp, 1, argv, "list"); + return JIM_ERR; + } + Jim_SetResultInt(interp, Jim_ListLength(interp, argv[1])); + return JIM_OK; +} + + +static int Jim_LsearchCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + static const char * const options[] = { + "-bool", "-not", "-nocase", "-exact", "-glob", "-regexp", "-all", "-inline", "-command", + "-stride", "-index", NULL + }; + enum + { OPT_BOOL, OPT_NOT, OPT_NOCASE, OPT_EXACT, OPT_GLOB, OPT_REGEXP, OPT_ALL, OPT_INLINE, + OPT_COMMAND, OPT_STRIDE, OPT_INDEX }; + int i; + int opt_bool = 0; + int opt_not = 0; + int opt_all = 0; + int opt_inline = 0; + int opt_match = OPT_EXACT; + int listlen; + int rc = JIM_OK; + Jim_Obj *listObjPtr = NULL; + Jim_Obj *commandObj = NULL; + Jim_Obj *indexObj = NULL; + int match_flags = 0; + long stride = 1; + + if (argc < 3) { + wrongargs: + Jim_WrongNumArgs(interp, 1, argv, + "?-exact|-glob|-regexp|-command 'command'? ?-bool|-inline? ?-not? ?-nocase? ?-all? ?-stride len? ?-index val? list value"); + return JIM_ERR; + } + + for (i = 1; i < argc - 2; i++) { + int option; + + if (Jim_GetEnum(interp, argv[i], options, &option, NULL, JIM_ERRMSG) != JIM_OK) { + return JIM_ERR; + } + switch (option) { + case OPT_BOOL: + opt_bool = 1; + opt_inline = 0; + break; + case OPT_NOT: + opt_not = 1; + break; + case OPT_NOCASE: + match_flags |= JIM_NOCASE; + break; + case OPT_INLINE: + opt_inline = 1; + opt_bool = 0; + break; + case OPT_ALL: + opt_all = 1; + break; + case OPT_REGEXP: + opt_match = option; + match_flags |= JIM_OPT_END; + break; + case OPT_COMMAND: + if (i >= argc - 2) { + goto wrongargs; + } + commandObj = argv[++i]; + + case OPT_EXACT: + case OPT_GLOB: + opt_match = option; + break; + case OPT_INDEX: + if (i >= argc - 2) { + goto wrongargs; + } + indexObj = argv[++i]; + break; + case OPT_STRIDE: + if (i >= argc - 2) { + goto wrongargs; + } + if (Jim_GetLong(interp, argv[++i], &stride) != JIM_OK) { + return JIM_ERR; + } + if (stride < 1) { + Jim_SetResultString(interp, "stride length must be at least 1", -1); + return JIM_ERR; + } + break; + } + } + + argc -= i; + if (argc < 2) { + goto wrongargs; + } + argv += i; + + listlen = Jim_ListLength(interp, argv[0]); + if (listlen % stride) { + Jim_SetResultString(interp, "list size must be a multiple of the stride length", -1); + return JIM_ERR; + } + + if (opt_all) { + listObjPtr = Jim_NewListObj(interp, NULL, 0); + } + if (opt_match == OPT_REGEXP) { + commandObj = Jim_NewStringObj(interp, "regexp", -1); + } + if (commandObj) { + Jim_IncrRefCount(commandObj); + } + + for (i = 0; i < listlen; i += stride) { + int eq = 0; + Jim_Obj *searchListObj; + Jim_Obj *objPtr; + int offset; + + if (indexObj) { + int indexlen = Jim_ListLength(interp, indexObj); + if (stride == 1) { + searchListObj = Jim_ListGetIndex(interp, argv[0], i); + } + else { + searchListObj = Jim_NewListObj(interp, argv[0]->internalRep.listValue.ele + i, stride); + } + Jim_IncrRefCount(searchListObj); + rc = Jim_ListIndices(interp, searchListObj, indexObj->internalRep.listValue.ele, indexlen, &objPtr, JIM_ERRMSG); + if (rc != JIM_OK) { + Jim_DecrRefCount(interp, searchListObj); + rc = JIM_ERR; + goto done; + } + + offset = 0; + } + else { + + searchListObj = argv[0]; + offset = i; + objPtr = Jim_ListGetIndex(interp, searchListObj, i); + Jim_IncrRefCount(searchListObj); + } + + switch (opt_match) { + case OPT_EXACT: + eq = Jim_StringCompareObj(interp, argv[1], objPtr, match_flags) == 0; + break; + + case OPT_GLOB: + eq = Jim_StringMatchObj(interp, argv[1], objPtr, match_flags); + break; + + case OPT_REGEXP: + case OPT_COMMAND: + eq = Jim_CommandMatchObj(interp, commandObj, argv[1], objPtr, match_flags); + if (eq < 0) { + Jim_DecrRefCount(interp, searchListObj); + rc = JIM_ERR; + goto done; + } + break; + } + + + if ((!opt_bool && eq == !opt_not) || (opt_bool && (eq || opt_all))) { + Jim_Obj *resultObj; + + if (opt_bool) { + resultObj = Jim_NewIntObj(interp, eq ^ opt_not); + } + else if (!opt_inline) { + resultObj = Jim_NewIntObj(interp, i); + } + else if (stride == 1) { + resultObj = objPtr; + } + else if (opt_all) { + + ListInsertElements(listObjPtr, -1, stride, + searchListObj->internalRep.listValue.ele + offset); + + resultObj = NULL; + } + else { + resultObj = Jim_NewListObj(interp, searchListObj->internalRep.listValue.ele + offset, stride); + } + + if (opt_all) { + + if (stride == 1) { + Jim_ListAppendElement(interp, listObjPtr, resultObj); + } + } + else { + Jim_SetResult(interp, resultObj); + Jim_DecrRefCount(interp, searchListObj); + goto done; + } + } + Jim_DecrRefCount(interp, searchListObj); + } + + if (opt_all) { + Jim_SetResult(interp, listObjPtr); + listObjPtr = NULL; + } + else { + + if (opt_bool) { + Jim_SetResultBool(interp, opt_not); + } + else if (!opt_inline) { + Jim_SetResultInt(interp, -1); + } + } + + done: + if (listObjPtr) { + Jim_FreeNewObj(interp, listObjPtr); + } + if (commandObj) { + Jim_DecrRefCount(interp, commandObj); + } + return rc; +} + + +static int Jim_LappendCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + Jim_Obj *listObjPtr; + int new_obj = 0; + int i; + + if (argc < 2) { + Jim_WrongNumArgs(interp, 1, argv, "varName ?value value ...?"); + return JIM_ERR; + } + listObjPtr = Jim_GetVariable(interp, argv[1], JIM_UNSHARED); + if (!listObjPtr) { + + listObjPtr = Jim_NewListObj(interp, NULL, 0); + new_obj = 1; + } + else if (Jim_IsShared(listObjPtr)) { + listObjPtr = Jim_DuplicateObj(interp, listObjPtr); + new_obj = 1; + } + for (i = 2; i < argc; i++) + Jim_ListAppendElement(interp, listObjPtr, argv[i]); + if (Jim_SetVariable(interp, argv[1], listObjPtr) != JIM_OK) { + if (new_obj) + Jim_FreeNewObj(interp, listObjPtr); + return JIM_ERR; + } + Jim_SetResult(interp, listObjPtr); + return JIM_OK; +} + + +static int Jim_LinsertCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + int idx, len; + Jim_Obj *listPtr; + + if (argc < 3) { + Jim_WrongNumArgs(interp, 1, argv, "list index ?element ...?"); + return JIM_ERR; + } + listPtr = argv[1]; + if (Jim_IsShared(listPtr)) + listPtr = Jim_DuplicateObj(interp, listPtr); + if (Jim_GetIndex(interp, argv[2], &idx) != JIM_OK) + goto err; + len = Jim_ListLength(interp, listPtr); + if (idx >= len) + idx = len; + else if (idx < 0) + idx = len + idx + 1; + Jim_ListInsertElements(interp, listPtr, idx, argc - 3, &argv[3]); + Jim_SetResult(interp, listPtr); + return JIM_OK; + err: + if (listPtr != argv[1]) { + Jim_FreeNewObj(interp, listPtr); + } + return JIM_ERR; +} + + +static int Jim_LreplaceCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + int first, last, len, rangeLen; + Jim_Obj *listObj; + Jim_Obj *newListObj; + + if (argc < 4) { + Jim_WrongNumArgs(interp, 1, argv, "list first last ?element ...?"); + return JIM_ERR; + } + if (Jim_GetIndex(interp, argv[2], &first) != JIM_OK || + Jim_GetIndex(interp, argv[3], &last) != JIM_OK) { + return JIM_ERR; + } + + listObj = argv[1]; + len = Jim_ListLength(interp, listObj); + + first = JimRelToAbsIndex(len, first); + last = JimRelToAbsIndex(len, last); + JimRelToAbsRange(len, &first, &last, &rangeLen); + + + if (first > len) { + first = len; + } + + + newListObj = Jim_NewListObj(interp, listObj->internalRep.listValue.ele, first); + + + ListInsertElements(newListObj, -1, argc - 4, argv + 4); + + + ListInsertElements(newListObj, -1, len - first - rangeLen, listObj->internalRep.listValue.ele + first + rangeLen); + + Jim_SetResult(interp, newListObj); + return JIM_OK; +} + + +static int Jim_LsetCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + if (argc < 3) { + Jim_WrongNumArgs(interp, 1, argv, "listVar ?index ...? value"); + return JIM_ERR; + } + else if (argc == 3) { + + if (Jim_SetVariable(interp, argv[1], argv[2]) != JIM_OK) + return JIM_ERR; + Jim_SetResult(interp, argv[2]); + return JIM_OK; + } + return Jim_ListSetIndex(interp, argv[1], argv + 2, argc - 3, argv[argc - 1]); +} + + +static int Jim_LsortCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const argv[]) +{ + static const char * const options[] = { + "-ascii", "-nocase", "-increasing", "-decreasing", "-command", "-integer", "-real", "-index", "-unique", + "-stride", "-dictionary", NULL + }; + enum { + OPT_ASCII, OPT_NOCASE, OPT_INCREASING, OPT_DECREASING, OPT_COMMAND, OPT_INTEGER, OPT_REAL, OPT_INDEX, OPT_UNIQUE, + OPT_STRIDE, OPT_DICT + }; + Jim_Obj *resObj; + int i; + int retCode; + int shared; + long stride = 1; + Jim_Obj **elements; + int listlen; + + struct lsort_info info; + + if (argc < 2) { +wrongargs: + Jim_WrongNumArgs(interp, 1, argv, "?options? list"); + return JIM_ERR; + } + + info.type = JIM_LSORT_ASCII; + info.order = 1; + info.indexc = 0; + info.unique = 0; + info.command = NULL; + info.interp = interp; + + for (i = 1; i < (argc - 1); i++) { + int option; + + if (Jim_GetEnum(interp, argv[i], options, &option, NULL, JIM_ENUM_ABBREV | JIM_ERRMSG) + != JIM_OK) + return JIM_ERR; + switch (option) { + case OPT_ASCII: + info.type = JIM_LSORT_ASCII; + break; + case OPT_DICT: + info.type = JIM_LSORT_DICT; + break; + case OPT_NOCASE: + info.type = JIM_LSORT_NOCASE; + break; + case OPT_INTEGER: + info.type = JIM_LSORT_INTEGER; + break; + case OPT_REAL: + info.type = JIM_LSORT_REAL; + break; + case OPT_INCREASING: + info.order = 1; + break; + case OPT_DECREASING: + info.order = -1; + break; + case OPT_UNIQUE: + info.unique = 1; + break; + case OPT_COMMAND: + if (i >= (argc - 2)) { + Jim_SetResultString(interp, "\"-command\" option must be followed by comparison command", -1); + return JIM_ERR; + } + info.type = JIM_LSORT_COMMAND; + info.command = argv[i + 1]; + i++; + break; + case OPT_STRIDE: + if (i >= argc - 2) { + goto wrongargs; + } + if (Jim_GetLong(interp, argv[++i], &stride) != JIM_OK) { + return JIM_ERR; + } + if (stride < 2) { + Jim_SetResultString(interp, "stride length must be at least 2", -1); + return JIM_ERR; + } + break; + case OPT_INDEX: + if (i >= (argc - 2)) { +badindex: + Jim_SetResultString(interp, "\"-index\" option must be followed by list index", -1); + return JIM_ERR; + } + JimListGetElements(interp, argv[i + 1], &info.indexc, &info.indexv); + if (info.indexc == 0) { + goto badindex; + } + i++; + break; + } + } + resObj = argv[argc - 1]; + JimListGetElements(interp, resObj, &listlen, &elements); + if (listlen <= 1) { + + Jim_SetResult(interp, resObj); + return JIM_OK; + } + + if (stride > 1) { + Jim_Obj *tmpListObj; + int i; + + if (listlen % stride) { + Jim_SetResultString(interp, "list size must be a multiple of the stride length", -1); + return JIM_ERR; + } + + tmpListObj = Jim_NewListObj(interp, NULL, 0); + Jim_IncrRefCount(tmpListObj); + for (i = 0; i < listlen; i += stride) { + Jim_ListAppendElement(interp, tmpListObj, Jim_NewListObj(interp, elements + i, stride)); + } + retCode = ListSortElements(interp, tmpListObj, &info); + if (retCode == JIM_OK) { + resObj = Jim_NewListObj(interp, NULL, 0); + + for (i = 0; i < listlen; i += stride) { + Jim_ListAppendList(interp, resObj, Jim_ListGetIndex(interp, tmpListObj, i / stride)); + } + Jim_SetResult(interp, resObj); + } + Jim_DecrRefCount(interp, tmpListObj); + } + else { + if ((shared = Jim_IsShared(resObj))) { + resObj = Jim_DuplicateObj(interp, resObj); + } + retCode = ListSortElements(interp, resObj, &info); + if (retCode == JIM_OK) { + Jim_SetResult(interp, resObj); + } + else if (shared) { + Jim_FreeNewObj(interp, resObj); + } + } + return retCode; +} + + +static int Jim_AppendCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + Jim_Obj *stringObjPtr; + int i; + + if (argc < 2) { + Jim_WrongNumArgs(interp, 1, argv, "varName ?value ...?"); + return JIM_ERR; + } + if (argc == 2) { + stringObjPtr = Jim_GetVariable(interp, argv[1], JIM_ERRMSG); + if (!stringObjPtr) + return JIM_ERR; + } + else { + int new_obj = 0; + stringObjPtr = Jim_GetVariable(interp, argv[1], JIM_UNSHARED); + if (!stringObjPtr) { + + stringObjPtr = Jim_NewEmptyStringObj(interp); + new_obj = 1; + } + else if (Jim_IsShared(stringObjPtr)) { + new_obj = 1; + stringObjPtr = Jim_DuplicateObj(interp, stringObjPtr); + } + for (i = 2; i < argc; i++) { + Jim_AppendObj(interp, stringObjPtr, argv[i]); + } + if (Jim_SetVariable(interp, argv[1], stringObjPtr) != JIM_OK) { + if (new_obj) { + Jim_FreeNewObj(interp, stringObjPtr); + } + return JIM_ERR; + } + } + Jim_SetResult(interp, stringObjPtr); + return JIM_OK; +} + + + + + +static int Jim_EvalCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + int rc; + + if (argc < 2) { + Jim_WrongNumArgs(interp, 1, argv, "arg ?arg ...?"); + return JIM_ERR; + } + + if (argc == 2) { + rc = Jim_EvalObj(interp, argv[1]); + } + else { + rc = Jim_EvalObj(interp, Jim_ConcatObj(interp, argc - 1, argv + 1)); + } + + return rc; +} + + +static int Jim_UplevelCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + if (argc >= 2) { + int retcode; + Jim_CallFrame *savedCallFrame, *targetCallFrame; + const char *str; + + + savedCallFrame = interp->framePtr; + + + str = Jim_String(argv[1]); + if ((str[0] >= '0' && str[0] <= '9') || str[0] == '#') { + targetCallFrame = Jim_GetCallFrameByLevel(interp, argv[1]); + argc--; + argv++; + } + else { + targetCallFrame = Jim_GetCallFrameByLevel(interp, NULL); + } + if (targetCallFrame == NULL) { + return JIM_ERR; + } + if (argc < 2) { + Jim_WrongNumArgs(interp, 1, argv - 1, "?level? command ?arg ...?"); + return JIM_ERR; + } + + interp->framePtr = targetCallFrame; + if (argc == 2) { + retcode = Jim_EvalObj(interp, argv[1]); + } + else { + retcode = Jim_EvalObj(interp, Jim_ConcatObj(interp, argc - 1, argv + 1)); + } + interp->framePtr = savedCallFrame; + return retcode; + } + else { + Jim_WrongNumArgs(interp, 1, argv, "?level? command ?arg ...?"); + return JIM_ERR; + } +} + + +static int Jim_ExprCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + int retcode; + + if (argc == 2) { + retcode = Jim_EvalExpression(interp, argv[1]); + } +#ifndef JIM_COMPAT + else { + Jim_WrongNumArgs(interp, 1, argv, "expression"); + retcode = JIM_ERR; + } +#else + else if (argc > 2) { + Jim_Obj *objPtr; + + objPtr = Jim_ConcatObj(interp, argc - 1, argv + 1); + Jim_IncrRefCount(objPtr); + retcode = Jim_EvalExpression(interp, objPtr); + Jim_DecrRefCount(interp, objPtr); + } + else { + Jim_WrongNumArgs(interp, 1, argv, "expression ?...?"); + return JIM_ERR; + } +#endif + return retcode; +} + +static int JimBreakContinueHelper(Jim_Interp *interp, int argc, Jim_Obj *const *argv, int retcode) +{ + if (argc != 1 && argc != 2) { + Jim_WrongNumArgs(interp, 1, argv, "?level?"); + return JIM_ERR; + } + if (argc == 2) { + long level; + int ret = Jim_GetLong(interp, argv[1], &level); + if (ret != JIM_OK) { + return ret; + } + interp->break_level = level; + } + return retcode; +} + + +static int Jim_BreakCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + return JimBreakContinueHelper(interp, argc, argv, JIM_BREAK); +} + + +static int Jim_ContinueCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + return JimBreakContinueHelper(interp, argc, argv, JIM_CONTINUE); +} + + +static int Jim_StacktraceCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + Jim_Obj *listObj; + int i; + jim_wide skip = 0; + jim_wide last = 0; + + if (argc > 1) { + if (Jim_GetWideExpr(interp, argv[1], &skip) != JIM_OK) { + return JIM_ERR; + } + } + if (argc > 2) { + if (Jim_GetWideExpr(interp, argv[2], &last) != JIM_OK) { + return JIM_ERR; + } + } + + listObj = Jim_NewListObj(interp, NULL, 0); + for (i = skip; i <= interp->procLevel; i++) { + Jim_EvalFrame *frame = JimGetEvalFrameByProcLevel(interp, -i); + if (frame->procLevel < last) { + break; + } + JimAddStackFrame(interp, frame, listObj); + } + Jim_SetResult(interp, listObj); + return JIM_OK; +} + + +static int Jim_ReturnCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + int i; + Jim_Obj *stackTraceObj = NULL; + Jim_Obj *errorCodeObj = NULL; + int returnCode = JIM_OK; + long level = 1; + + for (i = 1; i < argc - 1; i += 2) { + if (Jim_CompareStringImmediate(interp, argv[i], "-code")) { + if (Jim_GetReturnCode(interp, argv[i + 1], &returnCode) == JIM_ERR) { + return JIM_ERR; + } + } + else if (Jim_CompareStringImmediate(interp, argv[i], "-errorinfo")) { + stackTraceObj = argv[i + 1]; + } + else if (Jim_CompareStringImmediate(interp, argv[i], "-errorcode")) { + errorCodeObj = argv[i + 1]; + } + else if (Jim_CompareStringImmediate(interp, argv[i], "-level")) { + if (Jim_GetLong(interp, argv[i + 1], &level) != JIM_OK || level < 0) { + Jim_SetResultFormatted(interp, "bad level \"%#s\"", argv[i + 1]); + return JIM_ERR; + } + } + else { + break; + } + } + + if (i != argc - 1 && i != argc) { + Jim_WrongNumArgs(interp, 1, argv, + "?-code code? ?-errorinfo stacktrace? ?-level level? ?result?"); + } + + + if (stackTraceObj && returnCode == JIM_ERR) { + JimSetStackTrace(interp, stackTraceObj); + } + + if (errorCodeObj && returnCode == JIM_ERR) { + Jim_SetGlobalVariableStr(interp, "errorCode", errorCodeObj); + } + interp->returnCode = returnCode; + interp->returnLevel = level; + + if (i == argc - 1) { + Jim_SetResult(interp, argv[i]); + } + return level == 0 ? returnCode : JIM_RETURN; +} + + +static int Jim_TailcallCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + if (interp->framePtr->level == 0) { + Jim_SetResultString(interp, "tailcall can only be called from a proc or lambda", -1); + return JIM_ERR; + } + else if (argc >= 2) { + + Jim_CallFrame *cf = interp->framePtr->parent; + + Jim_Cmd *cmdPtr = Jim_GetCommand(interp, argv[1], JIM_ERRMSG); + if (cmdPtr == NULL) { + return JIM_ERR; + } + + JimPanic((cf->tailcallCmd != NULL, "Already have a tailcallCmd")); + + + JimIncrCmdRefCount(cmdPtr); + cf->tailcallCmd = cmdPtr; + + + JimPanic((cf->tailcallObj != NULL, "Already have a tailcallobj")); + + cf->tailcallObj = Jim_NewListObj(interp, argv + 1, argc - 1); + Jim_IncrRefCount(cf->tailcallObj); + + + return JIM_EVAL; + } + return JIM_OK; +} + +static int JimAliasCmd(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + Jim_Obj *cmdList; + Jim_Obj *prefixListObj = Jim_CmdPrivData(interp); + + + cmdList = Jim_DuplicateObj(interp, prefixListObj); + Jim_ListInsertElements(interp, cmdList, Jim_ListLength(interp, cmdList), argc - 1, argv + 1); + + return JimEvalObjList(interp, cmdList); +} + +static void JimAliasCmdDelete(Jim_Interp *interp, void *privData) +{ + Jim_Obj *prefixListObj = privData; + Jim_DecrRefCount(interp, prefixListObj); +} + +static int Jim_AliasCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + Jim_Obj *prefixListObj; + + if (argc < 3) { + Jim_WrongNumArgs(interp, 1, argv, "newname command ?args ...?"); + return JIM_ERR; + } + + prefixListObj = Jim_NewListObj(interp, argv + 2, argc - 2); + Jim_IncrRefCount(prefixListObj); + Jim_SetResult(interp, argv[1]); + + return Jim_CreateCommandObj(interp, argv[1], JimAliasCmd, prefixListObj, JimAliasCmdDelete); +} + + +static int Jim_ProcCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + Jim_Cmd *cmd; + + if (argc != 4 && argc != 5) { + Jim_WrongNumArgs(interp, 1, argv, "name arglist ?statics? body"); + return JIM_ERR; + } + + if (argc == 4) { + cmd = JimCreateProcedureCmd(interp, argv[2], NULL, argv[3], NULL); + } + else { + cmd = JimCreateProcedureCmd(interp, argv[2], argv[3], argv[4], NULL); + } + + if (cmd) { + + Jim_Obj *nameObjPtr = JimQualifyName(interp, argv[1]); + JimCreateCommand(interp, nameObjPtr, cmd); + + + JimUpdateProcNamespace(interp, cmd, nameObjPtr); + Jim_DecrRefCount(interp, nameObjPtr); + + + Jim_SetResult(interp, argv[1]); + return JIM_OK; + } + return JIM_ERR; +} + + +static int Jim_XtraceCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + if (argc != 2) { + Jim_WrongNumArgs(interp, 1, argv, "callback"); + return JIM_ERR; + } + + if (interp->traceCmdObj) { + Jim_DecrRefCount(interp, interp->traceCmdObj); + interp->traceCmdObj = NULL; + } + + if (Jim_Length(argv[1])) { + + interp->traceCmdObj = argv[1]; + Jim_IncrRefCount(interp->traceCmdObj); + } + return JIM_OK; +} + + +static int Jim_LocalCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + int retcode; + + if (argc < 2) { + Jim_WrongNumArgs(interp, 1, argv, "cmd ?args ...?"); + return JIM_ERR; + } + + + interp->local++; + retcode = Jim_EvalObjVector(interp, argc - 1, argv + 1); + interp->local--; + + + + if (retcode == 0) { + Jim_Obj *cmdNameObj = Jim_GetResult(interp); + + if (Jim_GetCommand(interp, cmdNameObj, JIM_ERRMSG) == NULL) { + return JIM_ERR; + } + if (interp->framePtr->localCommands == NULL) { + interp->framePtr->localCommands = Jim_Alloc(sizeof(*interp->framePtr->localCommands)); + Jim_InitStack(interp->framePtr->localCommands); + } + Jim_IncrRefCount(cmdNameObj); + Jim_StackPush(interp->framePtr->localCommands, cmdNameObj); + } + + return retcode; +} + + +static int Jim_UpcallCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + if (argc < 2) { + Jim_WrongNumArgs(interp, 1, argv, "cmd ?args ...?"); + return JIM_ERR; + } + else { + int retcode; + + Jim_Cmd *cmdPtr = Jim_GetCommand(interp, argv[1], JIM_ERRMSG); + if (cmdPtr == NULL || !cmdPtr->isproc || !cmdPtr->prevCmd) { + Jim_SetResultFormatted(interp, "no previous command: \"%#s\"", argv[1]); + return JIM_ERR; + } + + cmdPtr->u.proc.upcall++; + JimIncrCmdRefCount(cmdPtr); + + + retcode = Jim_EvalObjVector(interp, argc - 1, argv + 1); + + + cmdPtr->u.proc.upcall--; + JimDecrCmdRefCount(interp, cmdPtr); + + return retcode; + } +} + + +static int Jim_ApplyCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + if (argc < 2) { + Jim_WrongNumArgs(interp, 1, argv, "lambdaExpr ?arg ...?"); + return JIM_ERR; + } + else { + int ret; + Jim_Cmd *cmd; + Jim_Obj *argListObjPtr; + Jim_Obj *bodyObjPtr; + Jim_Obj *nsObj = NULL; + Jim_Obj **nargv; + + int len = Jim_ListLength(interp, argv[1]); + if (len != 2 && len != 3) { + Jim_SetResultFormatted(interp, "can't interpret \"%#s\" as a lambda expression", argv[1]); + return JIM_ERR; + } + + if (len == 3) { +#ifdef jim_ext_namespace + + nsObj = Jim_ListGetIndex(interp, argv[1], 2); +#else + Jim_SetResultString(interp, "namespaces not enabled", -1); + return JIM_ERR; +#endif + } + argListObjPtr = Jim_ListGetIndex(interp, argv[1], 0); + bodyObjPtr = Jim_ListGetIndex(interp, argv[1], 1); + + cmd = JimCreateProcedureCmd(interp, argListObjPtr, NULL, bodyObjPtr, nsObj); + + if (cmd) { + + nargv = Jim_Alloc((argc - 2 + 1) * sizeof(*nargv)); + nargv[0] = Jim_NewStringObj(interp, "apply lambdaExpr", -1); + Jim_IncrRefCount(nargv[0]); + memcpy(&nargv[1], argv + 2, (argc - 2) * sizeof(*nargv)); + ret = JimCallProcedure(interp, cmd, argc - 2 + 1, nargv); + Jim_DecrRefCount(interp, nargv[0]); + Jim_Free(nargv); + + JimDecrCmdRefCount(interp, cmd); + return ret; + } + return JIM_ERR; + } +} + + + +static int Jim_ConcatCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + Jim_SetResult(interp, Jim_ConcatObj(interp, argc - 1, argv + 1)); + return JIM_OK; +} + + +static int Jim_UpvarCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + int i; + Jim_CallFrame *targetCallFrame; + + + if (argc > 3 && (argc % 2 == 0)) { + targetCallFrame = Jim_GetCallFrameByLevel(interp, argv[1]); + argc--; + argv++; + } + else { + targetCallFrame = Jim_GetCallFrameByLevel(interp, NULL); + } + if (targetCallFrame == NULL) { + return JIM_ERR; + } + + + if (argc < 3) { + Jim_WrongNumArgs(interp, 1, argv, "?level? otherVar localVar ?otherVar localVar ...?"); + return JIM_ERR; + } + + + for (i = 1; i < argc; i += 2) { + if (Jim_SetVariableLink(interp, argv[i + 1], argv[i], targetCallFrame) != JIM_OK) + return JIM_ERR; + } + return JIM_OK; +} + + +static int Jim_GlobalCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + int i; + + if (argc < 2) { + Jim_WrongNumArgs(interp, 1, argv, "varName ?varName ...?"); + return JIM_ERR; + } + + if (interp->framePtr->level == 0) + return JIM_OK; + for (i = 1; i < argc; i++) { + + const char *name = Jim_String(argv[i]); + if (name[0] != ':' || name[1] != ':') { + if (Jim_SetVariableLink(interp, argv[i], argv[i], interp->topFramePtr) != JIM_OK) + return JIM_ERR; + } + } + return JIM_OK; +} + +static Jim_Obj *JimStringMap(Jim_Interp *interp, Jim_Obj *mapListObjPtr, + Jim_Obj *objPtr, int nocase) +{ + int numMaps; + const char *str, *noMatchStart = NULL; + int strLen, i; + Jim_Obj *resultObjPtr; + + numMaps = Jim_ListLength(interp, mapListObjPtr); + if (numMaps % 2) { + Jim_SetResultString(interp, "list must contain an even number of elements", -1); + return NULL; + } + + str = Jim_String(objPtr); + strLen = Jim_Utf8Length(interp, objPtr); + + + resultObjPtr = Jim_NewStringObj(interp, "", 0); + while (strLen) { + for (i = 0; i < numMaps; i += 2) { + Jim_Obj *eachObjPtr; + const char *k; + int kl; + + eachObjPtr = Jim_ListGetIndex(interp, mapListObjPtr, i); + k = Jim_String(eachObjPtr); + kl = Jim_Utf8Length(interp, eachObjPtr); + + if (strLen >= kl && kl) { + int rc; + rc = JimStringCompareUtf8(str, kl, k, kl, nocase); + if (rc == 0) { + if (noMatchStart) { + Jim_AppendString(interp, resultObjPtr, noMatchStart, str - noMatchStart); + noMatchStart = NULL; + } + Jim_AppendObj(interp, resultObjPtr, Jim_ListGetIndex(interp, mapListObjPtr, i + 1)); + str += utf8_index(str, kl); + strLen -= kl; + break; + } + } + } + if (i == numMaps) { + int c; + if (noMatchStart == NULL) + noMatchStart = str; + str += utf8_tounicode(str, &c); + strLen--; + } + } + if (noMatchStart) { + Jim_AppendString(interp, resultObjPtr, noMatchStart, str - noMatchStart); + } + return resultObjPtr; +} + + +static int Jim_StringCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + int len; + int opt_case = 1; + int option; + static const char * const nocase_options[] = { + "-nocase", NULL + }; + static const char * const nocase_length_options[] = { + "-nocase", "-length", NULL + }; + + enum { + OPT_BYTELENGTH, + OPT_BYTERANGE, + OPT_CAT, + OPT_COMPARE, + OPT_EQUAL, + OPT_FIRST, + OPT_INDEX, + OPT_IS, + OPT_LAST, + OPT_LENGTH, + OPT_MAP, + OPT_MATCH, + OPT_RANGE, + OPT_REPEAT, + OPT_REPLACE, + OPT_REVERSE, + OPT_TOLOWER, + OPT_TOTITLE, + OPT_TOUPPER, + OPT_TRIM, + OPT_TRIMLEFT, + OPT_TRIMRIGHT, + OPT_COUNT + }; + static const jim_subcmd_type cmds[OPT_COUNT + 1] = { + JIM_DEF_SUBCMD("bytelength", "string", 1, 1), + JIM_DEF_SUBCMD("byterange", "string first last", 3, 3), + JIM_DEF_SUBCMD("cat", "?...?", 0, -1), + JIM_DEF_SUBCMD("compare", "?-nocase? ?-length int? string1 string2", 2, 5), + JIM_DEF_SUBCMD("equal", "?-nocase? ?-length int? string1 string2", 2, 5), + JIM_DEF_SUBCMD("first", "subString string ?index?", 2, 3), + JIM_DEF_SUBCMD("index", "string index", 2, 2), + JIM_DEF_SUBCMD("is", "class ?-strict? str", 2, 3), + JIM_DEF_SUBCMD("last", "subString string ?index?", 2, 3), + JIM_DEF_SUBCMD("length","string", 1, 1), + JIM_DEF_SUBCMD("map", "?-nocase? mapList string", 2, 3), + JIM_DEF_SUBCMD("match", "?-nocase? pattern string", 2, 3), + JIM_DEF_SUBCMD("range", "string first last", 3, 3), + JIM_DEF_SUBCMD("repeat", "string count", 2, 2), + JIM_DEF_SUBCMD("replace", "string first last ?string?", 3, 4), + JIM_DEF_SUBCMD("reverse", "string", 1, 1), + JIM_DEF_SUBCMD("tolower", "string", 1, 1), + JIM_DEF_SUBCMD("totitle", "string", 1, 1), + JIM_DEF_SUBCMD("toupper", "string", 1, 1), + JIM_DEF_SUBCMD("trim", "string ?trimchars?", 1, 2), + JIM_DEF_SUBCMD("trimleft", "string ?trimchars?", 1, 2), + JIM_DEF_SUBCMD("trimright", "string ?trimchars?", 1, 2), + { NULL } + }; + const jim_subcmd_type *ct = Jim_ParseSubCmd(interp, cmds, argc, argv); + if (!ct) { + return JIM_ERR; + } + if (ct->function) { + + return ct->function(interp, argc, argv); + } + + option = ct - cmds; + + switch (option) { + case OPT_LENGTH: + Jim_SetResultInt(interp, Jim_Utf8Length(interp, argv[2])); + return JIM_OK; + + case OPT_BYTELENGTH: + Jim_SetResultInt(interp, Jim_Length(argv[2])); + return JIM_OK; + + case OPT_CAT:{ + Jim_Obj *objPtr; + if (argc == 3) { + + objPtr = argv[2]; + } + else { + int i; + + objPtr = Jim_NewStringObj(interp, "", 0); + + for (i = 2; i < argc; i++) { + Jim_AppendObj(interp, objPtr, argv[i]); + } + } + Jim_SetResult(interp, objPtr); + return JIM_OK; + } + + case OPT_COMPARE: + case OPT_EQUAL: + { + + long opt_length = -1; + int n = argc - 4; + int i = 2; + while (n > 0) { + int subopt; + if (Jim_GetEnum(interp, argv[i++], nocase_length_options, &subopt, NULL, + JIM_ENUM_ABBREV) != JIM_OK) { +badcompareargs: + Jim_SubCmdArgError(interp, ct, argv[0]); + return JIM_ERR; + } + if (subopt == 0) { + + opt_case = 0; + n--; + } + else { + + if (n < 2) { + goto badcompareargs; + } + if (Jim_GetLong(interp, argv[i++], &opt_length) != JIM_OK) { + return JIM_ERR; + } + n -= 2; + } + } + if (n) { + goto badcompareargs; + } + argv += argc - 2; + if (opt_length < 0 && option != OPT_COMPARE && opt_case) { + + Jim_SetResultBool(interp, Jim_StringEqObj(argv[0], argv[1])); + } + else { + const char *s1 = Jim_String(argv[0]); + int l1 = Jim_Utf8Length(interp, argv[0]); + const char *s2 = Jim_String(argv[1]); + int l2 = Jim_Utf8Length(interp, argv[1]); + if (opt_length >= 0) { + if (l1 > opt_length) { + l1 = opt_length; + } + if (l2 > opt_length) { + l2 = opt_length; + } + } + n = JimStringCompareUtf8(s1, l1, s2, l2, !opt_case); + Jim_SetResultInt(interp, option == OPT_COMPARE ? n : n == 0); + } + return JIM_OK; + } + + case OPT_MATCH: + if (argc != 4 && + (argc != 5 || + Jim_GetEnum(interp, argv[2], nocase_options, &opt_case, NULL, + JIM_ENUM_ABBREV) != JIM_OK)) { + Jim_WrongNumArgs(interp, 2, argv, "?-nocase? pattern string"); + return JIM_ERR; + } + if (opt_case == 0) { + argv++; + } + Jim_SetResultBool(interp, Jim_StringMatchObj(interp, argv[2], argv[3], !opt_case)); + return JIM_OK; + + case OPT_MAP:{ + Jim_Obj *objPtr; + + if (argc != 4 && + (argc != 5 || + Jim_GetEnum(interp, argv[2], nocase_options, &opt_case, NULL, + JIM_ENUM_ABBREV) != JIM_OK)) { + Jim_WrongNumArgs(interp, 2, argv, "?-nocase? mapList string"); + return JIM_ERR; + } + + if (opt_case == 0) { + argv++; + } + objPtr = JimStringMap(interp, argv[2], argv[3], !opt_case); + if (objPtr == NULL) { + return JIM_ERR; + } + Jim_SetResult(interp, objPtr); + return JIM_OK; + } + + case OPT_RANGE:{ + Jim_Obj *objPtr = Jim_StringRangeObj(interp, argv[2], argv[3], argv[4]); + if (objPtr == NULL) { + return JIM_ERR; + } + Jim_SetResult(interp, objPtr); + return JIM_OK; + } + + case OPT_BYTERANGE:{ + Jim_Obj *objPtr = Jim_StringByteRangeObj(interp, argv[2], argv[3], argv[4]); + if (objPtr == NULL) { + return JIM_ERR; + } + Jim_SetResult(interp, objPtr); + return JIM_OK; + } + + case OPT_REPLACE:{ + Jim_Obj *objPtr = JimStringReplaceObj(interp, argv[2], argv[3], argv[4], argc == 6 ? argv[5] : NULL); + if (objPtr == NULL) { + return JIM_ERR; + } + Jim_SetResult(interp, objPtr); + return JIM_OK; + } + + + case OPT_REPEAT:{ + Jim_Obj *objPtr; + jim_wide count; + + if (Jim_GetWideExpr(interp, argv[3], &count) != JIM_OK) { + return JIM_ERR; + } + objPtr = Jim_NewStringObj(interp, "", 0); + if (count > 0) { + while (count--) { + Jim_AppendObj(interp, objPtr, argv[2]); + } + } + Jim_SetResult(interp, objPtr); + return JIM_OK; + } + + case OPT_REVERSE:{ + char *buf, *p; + const char *str; + int i; + + str = Jim_GetString(argv[2], &len); + buf = Jim_Alloc(len + 1); + assert(buf); + p = buf + len; + *p = 0; + for (i = 0; i < len; ) { + int c; + int l = utf8_tounicode(str, &c); + memcpy(p - l, str, l); + p -= l; + i += l; + str += l; + } + Jim_SetResult(interp, Jim_NewStringObjNoAlloc(interp, buf, len)); + return JIM_OK; + } + + case OPT_INDEX:{ + int idx; + const char *str; + + if (Jim_GetIndex(interp, argv[3], &idx) != JIM_OK) { + return JIM_ERR; + } + str = Jim_String(argv[2]); + len = Jim_Utf8Length(interp, argv[2]); + idx = JimRelToAbsIndex(len, idx); + if (idx < 0 || idx >= len || str == NULL) { + Jim_SetResultString(interp, "", 0); + } + else if (len == Jim_Length(argv[2])) { + + Jim_SetResultString(interp, str + idx, 1); + } + else { + int c; + int i = utf8_index(str, idx); + Jim_SetResultString(interp, str + i, utf8_tounicode(str + i, &c)); + } + return JIM_OK; + } + + case OPT_FIRST: + case OPT_LAST:{ + int idx = 0, l1, l2; + const char *s1, *s2; + + s1 = Jim_String(argv[2]); + s2 = Jim_String(argv[3]); + l1 = Jim_Utf8Length(interp, argv[2]); + l2 = Jim_Utf8Length(interp, argv[3]); + if (argc == 5) { + if (Jim_GetIndex(interp, argv[4], &idx) != JIM_OK) { + return JIM_ERR; + } + idx = JimRelToAbsIndex(l2, idx); + if (idx < 0) { + idx = 0; + } + } + else if (option == OPT_LAST) { + idx = l2; + } + if (option == OPT_FIRST) { + Jim_SetResultInt(interp, JimStringFirst(s1, l1, s2, l2, idx)); + } + else { +#ifdef JIM_UTF8 + Jim_SetResultInt(interp, JimStringLastUtf8(s1, l1, s2, idx)); +#else + Jim_SetResultInt(interp, JimStringLast(s1, l1, s2, idx)); +#endif + } + return JIM_OK; + } + + case OPT_TRIM: + Jim_SetResult(interp, JimStringTrim(interp, argv[2], argc == 4 ? argv[3] : NULL)); + return JIM_OK; + case OPT_TRIMLEFT: + Jim_SetResult(interp, JimStringTrimLeft(interp, argv[2], argc == 4 ? argv[3] : NULL)); + return JIM_OK; + case OPT_TRIMRIGHT:{ + Jim_SetResult(interp, JimStringTrimRight(interp, argv[2], argc == 4 ? argv[3] : NULL)); + return JIM_OK; + } + + case OPT_TOLOWER: + Jim_SetResult(interp, JimStringToLower(interp, argv[2])); + return JIM_OK; + case OPT_TOUPPER: + Jim_SetResult(interp, JimStringToUpper(interp, argv[2])); + return JIM_OK; + case OPT_TOTITLE: + Jim_SetResult(interp, JimStringToTitle(interp, argv[2])); + return JIM_OK; + + case OPT_IS: + if (argc == 5 && !Jim_CompareStringImmediate(interp, argv[3], "-strict")) { + Jim_SubCmdArgError(interp, ct, argv[0]); + return JIM_ERR; + } + return JimStringIs(interp, argv[argc - 1], argv[2], argc == 5); + } + return JIM_OK; +} + + +static int Jim_TimeCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + long i, count = 1; + jim_wide start, elapsed; + + if (argc < 2) { + Jim_WrongNumArgs(interp, 1, argv, "script ?count?"); + return JIM_ERR; + } + if (argc == 3) { + if (Jim_GetLong(interp, argv[2], &count) != JIM_OK) + return JIM_ERR; + } + if (count < 0) + return JIM_OK; + i = count; + start = Jim_GetTimeUsec(CLOCK_MONOTONIC_RAW); + while (i-- > 0) { + int retval; + + retval = Jim_EvalObj(interp, argv[1]); + if (retval != JIM_OK) { + return retval; + } + } + elapsed = Jim_GetTimeUsec(CLOCK_MONOTONIC_RAW) - start; + if (elapsed < count * 10) { + Jim_SetResult(interp, Jim_NewDoubleObj(interp, elapsed * 1.0 / count)); + } + else { + Jim_SetResultInt(interp, count == 0 ? 0 : elapsed / count); + } + Jim_AppendString(interp, Jim_GetResult(interp)," microseconds per iteration", -1); + return JIM_OK; +} + + +static int Jim_TimeRateCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + long us = 0; + jim_wide start, delta, overhead; + Jim_Obj *objPtr; + double us_per_iter; + int count; + int n; + + if (argc < 2) { + Jim_WrongNumArgs(interp, 1, argv, "script ?milliseconds?"); + return JIM_ERR; + } + if (argc == 3) { + if (Jim_GetLong(interp, argv[2], &us) != JIM_OK) + return JIM_ERR; + us *= 1000; + } + if (us < 1) { + + us = 1000 * 1000; + } + + + start = Jim_GetTimeUsec(CLOCK_MONOTONIC_RAW); + count = 0; + do { + int retval = Jim_EvalObj(interp, argv[1]); + delta = Jim_GetTimeUsec(CLOCK_MONOTONIC_RAW) - start; + if (retval != JIM_OK) { + return retval; + } + count++; + } while (delta < us); + + + start = Jim_GetTimeUsec(CLOCK_MONOTONIC_RAW); + n = 0; + do { + int retval = Jim_EvalObj(interp, interp->nullScriptObj); + overhead = Jim_GetTimeUsec(CLOCK_MONOTONIC_RAW) - start; + if (retval != JIM_OK) { + return retval; + } + n++; + } while (n < count); + + delta -= overhead; + + us_per_iter = (double)delta / count; + objPtr = Jim_NewListObj(interp, NULL, 0); + + Jim_ListAppendElement(interp, objPtr, Jim_NewStringObj(interp, "us_per_iter", -1)); + Jim_ListAppendElement(interp, objPtr, Jim_NewDoubleObj(interp, us_per_iter)); + Jim_ListAppendElement(interp, objPtr, Jim_NewStringObj(interp, "iters_per_sec", -1)); + Jim_ListAppendElement(interp, objPtr, Jim_NewDoubleObj(interp, 1e6 / us_per_iter)); + Jim_ListAppendElement(interp, objPtr, Jim_NewStringObj(interp, "count", -1)); + Jim_ListAppendElement(interp, objPtr, Jim_NewIntObj(interp, count)); + Jim_ListAppendElement(interp, objPtr, Jim_NewStringObj(interp, "elapsed_us", -1)); + Jim_ListAppendElement(interp, objPtr, Jim_NewIntObj(interp, delta)); + Jim_SetResult(interp, objPtr); + return JIM_OK; +} + + +static int Jim_ExitCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + long exitCode = 0; + + if (argc > 2) { + Jim_WrongNumArgs(interp, 1, argv, "?exitCode?"); + return JIM_ERR; + } + if (argc == 2) { + if (Jim_GetLong(interp, argv[1], &exitCode) != JIM_OK) + return JIM_ERR; + Jim_SetResult(interp, argv[1]); + } + interp->exitCode = exitCode; + return JIM_EXIT; +} + +static int JimMatchReturnCodes(Jim_Interp *interp, Jim_Obj *retcodeListObj, int rc) +{ + int len = Jim_ListLength(interp, retcodeListObj); + int i; + for (i = 0; i < len; i++) { + int returncode; + if (Jim_GetReturnCode(interp, Jim_ListGetIndex(interp, retcodeListObj, i), &returncode) != JIM_OK) { + return JIM_ERR; + } + if (rc == returncode) { + return JIM_OK; + } + } + return -1; +} + + +static int JimCatchTryHelper(Jim_Interp *interp, int istry, int argc, Jim_Obj *const *argv) +{ + static const char * const wrongargs_catchtry[2] = { + "?-?no?code ... --? script ?resultVarName? ?optionVarName?", + "?-?no?code ... --? script ?on|trap codes vars script? ... ?finally script?" + }; + int exitCode = 0; + int i; + int sig = 0; + int ok; + Jim_Obj *finallyScriptObj = NULL; + Jim_Obj *msgVarObj = NULL; + Jim_Obj *optsVarObj = NULL; + Jim_Obj *handlerScriptObj = NULL; + Jim_Obj *errorCodeObj; + int idx; + + + jim_wide ignore_mask = (1 << JIM_EXIT) | (1 << JIM_EVAL) | (1 << JIM_SIGNAL); + static const int max_ignore_code = sizeof(ignore_mask) * 8; + + JimPanic((istry != 0 && istry != 1, "wrong args to JimCatchTryHelper")); + + Jim_SetGlobalVariableStr(interp, "errorCode", Jim_NewStringObj(interp, "NONE", -1)); + + for (i = 1; i < argc - 1; i++) { + const char *arg = Jim_String(argv[i]); + jim_wide option; + int ignore; + + + if (strcmp(arg, "--") == 0) { + i++; + break; + } + if (*arg != '-') { + break; + } + + if (strncmp(arg, "-no", 3) == 0) { + arg += 3; + ignore = 1; + } + else { + arg++; + ignore = 0; + } + + if (Jim_StringToWide(arg, &option, 10) != JIM_OK) { + option = -1; + } + if (option < 0) { + option = Jim_FindByName(arg, jimReturnCodes, jimReturnCodesSize); + } + if (option < 0) { + goto wrongargs; + } + + if (ignore) { + ignore_mask |= ((jim_wide)1 << option); + } + else { + ignore_mask &= (~((jim_wide)1 << option)); + } + } + + idx = i; + + if (argc - idx < 1) { +wrongargs: + Jim_WrongNumArgs(interp, 1, argv, wrongargs_catchtry[istry]); + return JIM_ERR; + } + + if ((ignore_mask & (1 << JIM_SIGNAL)) == 0) { + sig++; + } + + interp->signal_level += sig; + if (Jim_CheckSignal(interp)) { + + exitCode = JIM_SIGNAL; + } + else { + exitCode = Jim_EvalObj(interp, argv[idx]); + + interp->errorFlag = 0; + } + interp->signal_level -= sig; + + errorCodeObj = Jim_GetGlobalVariableStr(interp, "errorCode", JIM_NONE); + + idx++; + if (istry) { + while (idx < argc) { + int option; + int ret; + static const char * const try_options[] = { "on", "trap", "finally", NULL }; + enum { TRY_ON, TRY_TRAP, TRY_FINALLY, }; + + if (Jim_GetEnum(interp, argv[idx], try_options, &option, "handler", JIM_ERRMSG) != JIM_OK) { + return JIM_ERR; + } + switch (option) { + case TRY_ON: + case TRY_TRAP: + if (idx + 4 > argc) { + goto wrongargs; + } + if (option == TRY_ON) { + ret = JimMatchReturnCodes(interp, argv[idx + 1], exitCode); + if (ret > JIM_OK) { + goto wrongargs; + } + } + else if (errorCodeObj) { + int len = Jim_ListLength(interp, argv[idx + 1]); + int i; + + ret = JIM_OK; + + for (i = 0; i < len; i++) { + Jim_Obj *matchObj = Jim_ListGetIndex(interp, argv[idx + 1], i); + Jim_Obj *objPtr = Jim_ListGetIndex(interp, errorCodeObj, i); + if (Jim_StringCompareObj(interp, matchObj, objPtr, 0) != 0) { + ret = -1; + break; + } + } + } + else { + + ret = -1; + } + + if (ret == JIM_OK && handlerScriptObj == NULL) { + msgVarObj = Jim_ListGetIndex(interp, argv[idx + 2], 0); + optsVarObj = Jim_ListGetIndex(interp, argv[idx + 2], 1); + handlerScriptObj = argv[idx + 3]; + } + idx += 4; + break; + case TRY_FINALLY: + if (idx + 2 != argc) { + goto wrongargs; + } + finallyScriptObj = argv[idx + 1]; + idx += 2; + break; + } + } + } + else { + if (argc - idx >= 1) { + msgVarObj = argv[idx]; + idx++; + if (argc - idx >= 1) { + optsVarObj = argv[idx]; + idx++; + } + } + } + + + if (exitCode >= 0 && exitCode < max_ignore_code && (((unsigned jim_wide)1 << exitCode) & ignore_mask)) { + + if (finallyScriptObj) { + Jim_EvalObj(interp, finallyScriptObj); + } + return exitCode; + } + + if (sig && exitCode == JIM_SIGNAL) { + + if (interp->signal_set_result) { + interp->signal_set_result(interp, interp->sigmask); + } + else if (!istry) { + Jim_SetResultInt(interp, interp->sigmask); + } + interp->sigmask = 0; + } + + ok = 1; + if (msgVarObj && Jim_Length(msgVarObj)) { + if (Jim_SetVariable(interp, msgVarObj, Jim_GetResult(interp)) != JIM_OK) { + ok = 0; + } + } + if (ok && optsVarObj && Jim_Length(optsVarObj)) { + Jim_Obj *optListObj = Jim_NewListObj(interp, NULL, 0); + + Jim_ListAppendElement(interp, optListObj, Jim_NewStringObj(interp, "-code", -1)); + Jim_ListAppendElement(interp, optListObj, + Jim_NewIntObj(interp, exitCode == JIM_RETURN ? interp->returnCode : exitCode)); + Jim_ListAppendElement(interp, optListObj, Jim_NewStringObj(interp, "-level", -1)); + Jim_ListAppendElement(interp, optListObj, Jim_NewIntObj(interp, interp->returnLevel)); + if (exitCode == JIM_ERR) { + Jim_ListAppendElement(interp, optListObj, Jim_NewStringObj(interp, "-errorinfo", + -1)); + Jim_ListAppendElement(interp, optListObj, interp->stackTrace); + + if (errorCodeObj) { + Jim_ListAppendElement(interp, optListObj, Jim_NewStringObj(interp, "-errorcode", -1)); + Jim_ListAppendElement(interp, optListObj, errorCodeObj); + } + } + if (Jim_SetVariable(interp, optsVarObj, optListObj) != JIM_OK) { + ok = 0; + } + } + if (ok && handlerScriptObj) { + + exitCode = Jim_EvalObj(interp, handlerScriptObj); + } + + if (finallyScriptObj) { + + Jim_Obj *prevResultObj = Jim_GetResult(interp); + Jim_IncrRefCount(prevResultObj); + int ret = Jim_EvalObj(interp, finallyScriptObj); + if (ret == JIM_OK) { + Jim_SetResult(interp, prevResultObj); + } + else { + exitCode = ret; + } + Jim_DecrRefCount(interp, prevResultObj); + } + if (!istry) { + Jim_SetResultInt(interp, exitCode); + exitCode = JIM_OK; + } + return exitCode; +} + + +static int Jim_CatchCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + return JimCatchTryHelper(interp, 0, argc, argv); +} + + +static int Jim_TryCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + return JimCatchTryHelper(interp, 1, argc, argv); +} + + + +static int Jim_RenameCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + if (argc != 3) { + Jim_WrongNumArgs(interp, 1, argv, "oldName newName"); + return JIM_ERR; + } + + return Jim_RenameCommand(interp, argv[1], argv[2]); +} + +#define JIM_DICTMATCH_KEYS 0x0001 +#define JIM_DICTMATCH_VALUES 0x002 + +int Jim_DictMatchTypes(Jim_Interp *interp, Jim_Obj *objPtr, Jim_Obj *patternObj, int match_type, int return_types) +{ + Jim_Obj *listObjPtr; + Jim_Dict *dict; + int i; + + if (SetDictFromAny(interp, objPtr) != JIM_OK) { + return JIM_ERR; + } + dict = objPtr->internalRep.dictValue; + + listObjPtr = Jim_NewListObj(interp, NULL, 0); + + for (i = 0; i < dict->len; i += 2 ) { + Jim_Obj *keyObj = dict->table[i]; + Jim_Obj *valObj = dict->table[i + 1]; + if (patternObj) { + Jim_Obj *matchObj = (match_type == JIM_DICTMATCH_KEYS) ? keyObj : valObj; + if (!Jim_StringMatchObj(interp, patternObj, matchObj, 0)) { + + continue; + } + } + if (return_types & JIM_DICTMATCH_KEYS) { + Jim_ListAppendElement(interp, listObjPtr, keyObj); + } + if (return_types & JIM_DICTMATCH_VALUES) { + Jim_ListAppendElement(interp, listObjPtr, valObj); + } + } + + Jim_SetResult(interp, listObjPtr); + return JIM_OK; +} + +int Jim_DictSize(Jim_Interp *interp, Jim_Obj *objPtr) +{ + if (SetDictFromAny(interp, objPtr) != JIM_OK) { + return -1; + } + return objPtr->internalRep.dictValue->len / 2; +} + +Jim_Obj *Jim_DictMerge(Jim_Interp *interp, int objc, Jim_Obj *const *objv) +{ + Jim_Obj *objPtr = Jim_NewDictObj(interp, NULL, 0); + int i; + + JimPanic((objc == 0, "Jim_DictMerge called with objc=0")); + + + + for (i = 0; i < objc; i++) { + Jim_Obj **table; + int tablelen; + int j; + + table = Jim_DictPairs(interp, objv[i], &tablelen); + if (tablelen && !table) { + Jim_FreeNewObj(interp, objPtr); + return NULL; + } + for (j = 0; j < tablelen; j += 2) { + DictAddElement(interp, objPtr, table[j], table[j + 1]); + } + } + return objPtr; +} + +int Jim_DictInfo(Jim_Interp *interp, Jim_Obj *objPtr) +{ + char buffer[100]; + Jim_Obj *output; + Jim_Dict *dict; + + if (SetDictFromAny(interp, objPtr) != JIM_OK) { + return JIM_ERR; + } + + dict = objPtr->internalRep.dictValue; + + + snprintf(buffer, sizeof(buffer), "%d entries in table, %d buckets", dict->len, dict->size); + output = Jim_NewStringObj(interp, buffer, -1); + Jim_SetResult(interp, output); + return JIM_OK; +} + +static int Jim_EvalEnsemble(Jim_Interp *interp, const char *basecmd, const char *subcmd, int argc, Jim_Obj *const *argv) +{ + Jim_Obj *prefixObj = Jim_NewStringObj(interp, basecmd, -1); + + Jim_AppendString(interp, prefixObj, " ", 1); + Jim_AppendString(interp, prefixObj, subcmd, -1); + + return Jim_EvalObjPrefix(interp, prefixObj, argc, argv); +} + +static int JimDictWith(Jim_Interp *interp, Jim_Obj *dictVarName, Jim_Obj *const *keyv, int keyc, Jim_Obj *scriptObj) +{ + int i; + Jim_Obj *objPtr; + Jim_Obj *dictObj; + Jim_Obj **dictValues; + int len; + int ret = JIM_OK; + + + dictObj = Jim_GetVariable(interp, dictVarName, JIM_ERRMSG); + if (dictObj == NULL || Jim_DictKeysVector(interp, dictObj, keyv, keyc, &objPtr, JIM_ERRMSG) != JIM_OK) { + return JIM_ERR; + } + + dictValues = Jim_DictPairs(interp, objPtr, &len); + if (len && dictValues == NULL) { + return JIM_ERR; + } + for (i = 0; i < len; i += 2) { + if (Jim_SetVariable(interp, dictValues[i], dictValues[i + 1]) == JIM_ERR) { + return JIM_ERR; + } + } + + + if (Jim_Length(scriptObj)) { + ret = Jim_EvalObj(interp, scriptObj); + + + if (ret == JIM_OK && Jim_GetVariable(interp, dictVarName, 0) != NULL) { + + Jim_Obj **newkeyv = Jim_Alloc(sizeof(*newkeyv) * (keyc + 1)); + for (i = 0; i < keyc; i++) { + newkeyv[i] = keyv[i]; + } + + for (i = 0; i < len; i += 2) { + + if (Jim_StringCompareObj(interp, dictVarName, dictValues[i], 0) != 0) { + + objPtr = Jim_GetVariable(interp, dictValues[i], 0); + newkeyv[keyc] = dictValues[i]; + Jim_SetDictKeysVector(interp, dictVarName, newkeyv, keyc + 1, objPtr, JIM_NORESULT); + } + } + Jim_Free(newkeyv); + } + } + + return ret; +} + + +static int Jim_DictCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + Jim_Obj *objPtr; + int types = JIM_DICTMATCH_KEYS; + + enum { + OPT_CREATE, + OPT_GET, + OPT_GETDEF, + OPT_GETWITHDEFAULT, + OPT_SET, + OPT_UNSET, + OPT_EXISTS, + OPT_KEYS, + OPT_SIZE, + OPT_INFO, + OPT_MERGE, + OPT_WITH, + OPT_APPEND, + OPT_LAPPEND, + OPT_INCR, + OPT_REMOVE, + OPT_VALUES, + OPT_FOR, + OPT_REPLACE, + OPT_UPDATE, + OPT_COUNT + }; + static const jim_subcmd_type cmds[OPT_COUNT + 1] = { + JIM_DEF_SUBCMD("create", "?key value ...?", 0, -2), + JIM_DEF_SUBCMD("get", "dictionary ?key ...?", 1, -1), + JIM_DEF_SUBCMD_HIDDEN("getdef", "dictionary ?key ...? key default", 3, -1), + JIM_DEF_SUBCMD("getwithdefault", "dictionary ?key ...? key default", 3, -1), + JIM_DEF_SUBCMD("set", "varName key ?key ...? value", 3, -1), + JIM_DEF_SUBCMD("unset", "varName key ?key ...?", 2, -1), + JIM_DEF_SUBCMD("exists", "dictionary key ?key ...?", 2, -1), + JIM_DEF_SUBCMD("keys", "dictionary ?pattern?", 1, 2), + JIM_DEF_SUBCMD("size", "dictionary", 1, 1), + JIM_DEF_SUBCMD("info", "dictionary", 1, 1), + JIM_DEF_SUBCMD("merge", "?...?", 0, -1), + JIM_DEF_SUBCMD("with", "dictVar ?key ...? script", 2, -1), + JIM_DEF_SUBCMD("append", "varName key ?value ...?", 2, -1), + JIM_DEF_SUBCMD("lappend", "varName key ?value ...?", 2, -1), + JIM_DEF_SUBCMD("incr", "varName key ?increment?", 2, 3), + JIM_DEF_SUBCMD("remove", "dictionary ?key ...?", 1, -1), + JIM_DEF_SUBCMD("values", "dictionary ?pattern?", 1, 2), + JIM_DEF_SUBCMD("for", "vars dictionary script", 3, 3), + JIM_DEF_SUBCMD("replace", "dictionary ?key value ...?", 1, -1), + JIM_DEF_SUBCMD("update", "varName ?arg ...? script", 2, -1), + { NULL } + }; + const jim_subcmd_type *ct = Jim_ParseSubCmd(interp, cmds, argc, argv); + if (!ct) { + return JIM_ERR; + } + if (ct->function) { + + return ct->function(interp, argc, argv); + } + + + switch (ct - cmds) { + case OPT_GET: + if (Jim_DictKeysVector(interp, argv[2], argv + 3, argc - 3, &objPtr, + JIM_ERRMSG) != JIM_OK) { + return JIM_ERR; + } + Jim_SetResult(interp, objPtr); + return JIM_OK; + + case OPT_GETDEF: + case OPT_GETWITHDEFAULT:{ + int rc = Jim_DictKeysVector(interp, argv[2], argv + 3, argc - 4, &objPtr, JIM_ERRMSG); + if (rc == -1) { + + return JIM_ERR; + } + if (rc == JIM_ERR) { + Jim_SetResult(interp, argv[argc - 1]); + } + else { + Jim_SetResult(interp, objPtr); + } + return JIM_OK; + } + + case OPT_SET: + return Jim_SetDictKeysVector(interp, argv[2], argv + 3, argc - 4, argv[argc - 1], JIM_ERRMSG); + + case OPT_EXISTS:{ + int rc = Jim_DictKeysVector(interp, argv[2], argv + 3, argc - 3, &objPtr, JIM_NONE); + if (rc < 0) { + return JIM_ERR; + } + Jim_SetResultBool(interp, rc == JIM_OK); + return JIM_OK; + } + + case OPT_UNSET: + if (Jim_SetDictKeysVector(interp, argv[2], argv + 3, argc - 3, NULL, JIM_NONE) != JIM_OK) { + return JIM_ERR; + } + return JIM_OK; + + case OPT_VALUES: + types = JIM_DICTMATCH_VALUES; + + case OPT_KEYS: + return Jim_DictMatchTypes(interp, argv[2], argc == 4 ? argv[3] : NULL, types, types); + + case OPT_SIZE: + if (Jim_DictSize(interp, argv[2]) < 0) { + return JIM_ERR; + } + Jim_SetResultInt(interp, Jim_DictSize(interp, argv[2])); + return JIM_OK; + + case OPT_MERGE: + if (argc == 2) { + return JIM_OK; + } + objPtr = Jim_DictMerge(interp, argc - 2, argv + 2); + if (objPtr == NULL) { + return JIM_ERR; + } + Jim_SetResult(interp, objPtr); + return JIM_OK; + + case OPT_CREATE: + objPtr = Jim_NewDictObj(interp, argv + 2, argc - 2); + Jim_SetResult(interp, objPtr); + return JIM_OK; + + case OPT_INFO: + return Jim_DictInfo(interp, argv[2]); + + case OPT_WITH: + return JimDictWith(interp, argv[2], argv + 3, argc - 4, argv[argc - 1]); + + case OPT_UPDATE: + if (argc < 6 || argc % 2) { + + argc = 2; + } + + default: + return Jim_EvalEnsemble(interp, "dict", Jim_String(argv[1]), argc - 2, argv + 2); + } +} + + +static int Jim_SubstCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + static const char * const options[] = { + "-nobackslashes", "-nocommands", "-novariables", NULL + }; + enum + { OPT_NOBACKSLASHES, OPT_NOCOMMANDS, OPT_NOVARIABLES }; + int i; + int flags = JIM_SUBST_FLAG; + Jim_Obj *objPtr; + + if (argc < 2) { + Jim_WrongNumArgs(interp, 1, argv, "?options? string"); + return JIM_ERR; + } + for (i = 1; i < (argc - 1); i++) { + int option; + + if (Jim_GetEnum(interp, argv[i], options, &option, NULL, + JIM_ERRMSG | JIM_ENUM_ABBREV) != JIM_OK) { + return JIM_ERR; + } + switch (option) { + case OPT_NOBACKSLASHES: + flags |= JIM_SUBST_NOESC; + break; + case OPT_NOCOMMANDS: + flags |= JIM_SUBST_NOCMD; + break; + case OPT_NOVARIABLES: + flags |= JIM_SUBST_NOVAR; + break; + } + } + if (Jim_SubstObj(interp, argv[argc - 1], &objPtr, flags) != JIM_OK) { + return JIM_ERR; + } + Jim_SetResult(interp, objPtr); + return JIM_OK; +} + +#ifdef jim_ext_namespace +static int JimIsGlobalNamespace(Jim_Obj *objPtr) +{ + int len; + const char *str = Jim_GetString(objPtr, &len); + return len >= 2 && str[0] == ':' && str[1] == ':'; +} +#endif + + +static int Jim_InfoCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + Jim_Obj *objPtr; + int mode = 0; + + + enum { + INFO_ALIAS, + INFO_ARGS, + INFO_BODY, + INFO_CHANNELS, + INFO_COMMANDS, + INFO_COMPLETE, + INFO_EXISTS, + INFO_FRAME, + INFO_GLOBALS, + INFO_HOSTNAME, + INFO_LEVEL, + INFO_LOCALS, + INFO_NAMEOFEXECUTABLE, + INFO_PATCHLEVEL, + INFO_PROCS, + INFO_REFERENCES, + INFO_RETURNCODES, + INFO_SCRIPT, + INFO_SOURCE, + INFO_STACKTRACE, + INFO_STATICS, + INFO_VARS, + INFO_VERSION, + INFO_COUNT + }; + static const jim_subcmd_type cmds[INFO_COUNT + 1] = { + JIM_DEF_SUBCMD("alias", "command", 1, 1), + JIM_DEF_SUBCMD("args", "procname", 1, 1), + JIM_DEF_SUBCMD("body", "procname", 1, 1), + JIM_DEF_SUBCMD("channels", "?pattern?", 0, 1), + JIM_DEF_SUBCMD("commands", "?pattern?", 0, 1), + JIM_DEF_SUBCMD("complete", "script ?missing?", 1, 2), + JIM_DEF_SUBCMD("exists", "varName", 1, 1), + JIM_DEF_SUBCMD("frame", "?levelNum?", 0, 1), + JIM_DEF_SUBCMD("globals", "?pattern?", 0, 1), + JIM_DEF_SUBCMD("hostname", NULL, 0, 0), + JIM_DEF_SUBCMD("level", "?levelNum?", 0, 1), + JIM_DEF_SUBCMD("locals", "?pattern?", 0, 1), + JIM_DEF_SUBCMD("nameofexecutable", NULL, 0, 0), + JIM_DEF_SUBCMD("patchlevel", NULL, 0, 0), + JIM_DEF_SUBCMD("procs", "?pattern?", 0, 1), + JIM_DEF_SUBCMD("references", NULL, 0, 0), + JIM_DEF_SUBCMD("returncodes", "?code?", 0, 1), + JIM_DEF_SUBCMD("script", "?filename?", 0, 1), + JIM_DEF_SUBCMD("source", "source ?filename line?", 1, 3), + JIM_DEF_SUBCMD("stacktrace", NULL, 0, 0), + JIM_DEF_SUBCMD("statics", "procname", 1, 1), + JIM_DEF_SUBCMD("vars", "?pattern?", 0, 1), + JIM_DEF_SUBCMD("version", NULL, 0, 0), + { NULL } + }; + const jim_subcmd_type *ct; +#ifdef jim_ext_namespace + int nons = 0; + + if (argc > 2 && Jim_CompareStringImmediate(interp, argv[1], "-nons")) { + + argc--; + argv++; + nons = 1; + } +#endif + ct = Jim_ParseSubCmd(interp, cmds, argc, argv); + if (!ct) { + return JIM_ERR; + } + if (ct->function) { + + return ct->function(interp, argc, argv); + } + + int option = ct - cmds; + + switch (option) { + case INFO_EXISTS: + Jim_SetResultBool(interp, Jim_GetVariable(interp, argv[2], 0) != NULL); + return JIM_OK; + + case INFO_ALIAS:{ + Jim_Cmd *cmdPtr; + + if ((cmdPtr = Jim_GetCommand(interp, argv[2], JIM_ERRMSG)) == NULL) { + return JIM_ERR; + } + if (cmdPtr->isproc || cmdPtr->u.native.cmdProc != JimAliasCmd) { + Jim_SetResultFormatted(interp, "command \"%#s\" is not an alias", argv[2]); + return JIM_ERR; + } + Jim_SetResult(interp, (Jim_Obj *)cmdPtr->u.native.privData); + return JIM_OK; + } + + case INFO_CHANNELS: + mode++; +#ifndef jim_ext_aio + Jim_SetResultString(interp, "aio not enabled", -1); + return JIM_ERR; +#endif + + case INFO_PROCS: + mode++; + + case INFO_COMMANDS: + +#ifdef jim_ext_namespace + if (!nons) { + if (Jim_Length(interp->framePtr->nsObj) || (argc == 3 && JimIsGlobalNamespace(argv[2]))) { + return Jim_EvalPrefix(interp, "namespace info", argc - 1, argv + 1); + } + } +#endif + Jim_SetResult(interp, JimCommandsList(interp, (argc == 3) ? argv[2] : NULL, mode)); + return JIM_OK; + + case INFO_VARS: + mode++; + + case INFO_LOCALS: + mode++; + + case INFO_GLOBALS: + +#ifdef jim_ext_namespace + if (!nons) { + if (Jim_Length(interp->framePtr->nsObj) || (argc == 3 && JimIsGlobalNamespace(argv[2]))) { + return Jim_EvalPrefix(interp, "namespace info", argc - 1, argv + 1); + } + } +#endif + Jim_SetResult(interp, JimVariablesList(interp, argc == 3 ? argv[2] : NULL, mode)); + return JIM_OK; + + case INFO_SCRIPT: + if (argc == 3) { + Jim_IncrRefCount(argv[2]); + Jim_DecrRefCount(interp, interp->currentFilenameObj); + interp->currentFilenameObj = argv[2]; + } + Jim_SetResult(interp, interp->currentFilenameObj); + return JIM_OK; + + case INFO_SOURCE:{ + Jim_Obj *resObjPtr; + Jim_Obj *fileNameObj; + + if (argc == 4) { + Jim_SubCmdArgError(interp, ct, argv[0]); + return JIM_ERR; + } + if (argc == 5) { + jim_wide line; + if (Jim_GetWide(interp, argv[4], &line) != JIM_OK) { + return JIM_ERR; + } + resObjPtr = Jim_NewStringObj(interp, Jim_String(argv[2]), Jim_Length(argv[2])); + Jim_SetSourceInfo(interp, resObjPtr, argv[3], line); + } + else { + int line; + fileNameObj = Jim_GetSourceInfo(interp, argv[2], &line); + resObjPtr = Jim_NewListObj(interp, NULL, 0); + Jim_ListAppendElement(interp, resObjPtr, fileNameObj); + Jim_ListAppendElement(interp, resObjPtr, Jim_NewIntObj(interp, line)); + } + Jim_SetResult(interp, resObjPtr); + return JIM_OK; + } + + case INFO_STACKTRACE: + Jim_SetResult(interp, interp->stackTrace); + return JIM_OK; + + case INFO_LEVEL: + if (argc == 2) { + Jim_SetResultInt(interp, interp->framePtr->level); + } + else { + if (JimInfoLevel(interp, argv[2], &objPtr) != JIM_OK) { + return JIM_ERR; + } + Jim_SetResult(interp, objPtr); + } + return JIM_OK; + + case INFO_FRAME: + if (argc == 2) { + Jim_SetResultInt(interp, interp->procLevel + 1); + } + else { + if (JimInfoFrame(interp, argv[2], &objPtr) != JIM_OK) { + return JIM_ERR; + } + Jim_SetResult(interp, objPtr); + } + return JIM_OK; + + case INFO_BODY: + case INFO_STATICS: + case INFO_ARGS:{ + Jim_Cmd *cmdPtr; + + if ((cmdPtr = Jim_GetCommand(interp, argv[2], JIM_ERRMSG)) == NULL) { + return JIM_ERR; + } + if (!cmdPtr->isproc) { + Jim_SetResultFormatted(interp, "command \"%#s\" is not a procedure", argv[2]); + return JIM_ERR; + } + switch (option) { +#ifdef JIM_NO_INTROSPECTION + default: + Jim_SetResultString(interp, "unsupported", -1); + return JIM_ERR; +#else + case INFO_BODY: + Jim_SetResult(interp, cmdPtr->u.proc.bodyObjPtr); + break; + case INFO_ARGS: + Jim_SetResult(interp, cmdPtr->u.proc.argListObjPtr); + break; +#endif + case INFO_STATICS: + if (cmdPtr->u.proc.staticVars) { + Jim_SetResult(interp, JimHashtablePatternMatch(interp, cmdPtr->u.proc.staticVars, + NULL, JimVariablesMatch, JIM_VARLIST_LOCALS | JIM_VARLIST_VALUES)); + } + break; + } + return JIM_OK; + } + + case INFO_VERSION: + case INFO_PATCHLEVEL:{ + char buf[(JIM_INTEGER_SPACE * 2) + 1]; + + sprintf(buf, "%d.%d", JIM_VERSION / 100, JIM_VERSION % 100); + Jim_SetResultString(interp, buf, -1); + return JIM_OK; + } + + case INFO_COMPLETE: { + char missing; + + Jim_SetResultBool(interp, Jim_ScriptIsComplete(interp, argv[2], &missing)); + if (missing != ' ' && argc == 4) { + Jim_SetVariable(interp, argv[3], Jim_NewStringObj(interp, &missing, 1)); + } + return JIM_OK; + } + + case INFO_HOSTNAME: + + return Jim_Eval(interp, "os.gethostname"); + + case INFO_NAMEOFEXECUTABLE: + + return Jim_Eval(interp, "{info nameofexecutable}"); + + case INFO_RETURNCODES: + if (argc == 2) { + int i; + Jim_Obj *listObjPtr = Jim_NewListObj(interp, NULL, 0); + + for (i = 0; jimReturnCodes[i]; i++) { + Jim_ListAppendElement(interp, listObjPtr, Jim_NewIntObj(interp, i)); + Jim_ListAppendElement(interp, listObjPtr, Jim_NewStringObj(interp, + jimReturnCodes[i], -1)); + } + + Jim_SetResult(interp, listObjPtr); + } + else if (argc == 3) { + long code; + const char *name; + + if (Jim_GetLong(interp, argv[2], &code) != JIM_OK) { + return JIM_ERR; + } + name = Jim_ReturnCode(code); + if (*name == '?') { + Jim_SetResultInt(interp, code); + } + else { + Jim_SetResultString(interp, name, -1); + } + } + return JIM_OK; + case INFO_REFERENCES: +#ifdef JIM_REFERENCES + return JimInfoReferences(interp, argc, argv); +#else + Jim_SetResultString(interp, "not supported", -1); + return JIM_ERR; +#endif + default: + abort(); + } +} + + +static int Jim_ExistsCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + Jim_Obj *objPtr; + int result = 0; + + static const char * const options[] = { + "-command", "-proc", "-alias", "-var", NULL + }; + enum + { + OPT_COMMAND, OPT_PROC, OPT_ALIAS, OPT_VAR + }; + int option; + + if (argc == 2) { + option = OPT_VAR; + objPtr = argv[1]; + } + else if (argc == 3) { + if (Jim_GetEnum(interp, argv[1], options, &option, NULL, JIM_ERRMSG | JIM_ENUM_ABBREV) != JIM_OK) { + return JIM_ERR; + } + objPtr = argv[2]; + } + else { + Jim_WrongNumArgs(interp, 1, argv, "?option? name"); + return JIM_ERR; + } + + if (option == OPT_VAR) { + result = Jim_GetVariable(interp, objPtr, 0) != NULL; + } + else { + + Jim_Cmd *cmd = Jim_GetCommand(interp, objPtr, JIM_NONE); + + if (cmd) { + switch (option) { + case OPT_COMMAND: + result = 1; + break; + + case OPT_ALIAS: + result = cmd->isproc == 0 && cmd->u.native.cmdProc == JimAliasCmd; + break; + + case OPT_PROC: + result = cmd->isproc; + break; + } + } + } + Jim_SetResultBool(interp, result); + return JIM_OK; +} + + +static int Jim_SplitCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + const char *str, *splitChars, *noMatchStart; + int splitLen, strLen; + Jim_Obj *resObjPtr; + int c; + int len; + + if (argc != 2 && argc != 3) { + Jim_WrongNumArgs(interp, 1, argv, "string ?splitChars?"); + return JIM_ERR; + } + + str = Jim_GetString(argv[1], &len); + if (len == 0) { + return JIM_OK; + } + strLen = Jim_Utf8Length(interp, argv[1]); + + + if (argc == 2) { + splitChars = " \n\t\r"; + splitLen = 4; + } + else { + splitChars = Jim_String(argv[2]); + splitLen = Jim_Utf8Length(interp, argv[2]); + } + + noMatchStart = str; + resObjPtr = Jim_NewListObj(interp, NULL, 0); + + + if (splitLen) { + Jim_Obj *objPtr; + while (strLen--) { + const char *sc = splitChars; + int scLen = splitLen; + int sl = utf8_tounicode(str, &c); + while (scLen--) { + int pc; + sc += utf8_tounicode(sc, &pc); + if (c == pc) { + objPtr = Jim_NewStringObj(interp, noMatchStart, (str - noMatchStart)); + Jim_ListAppendElement(interp, resObjPtr, objPtr); + noMatchStart = str + sl; + break; + } + } + str += sl; + } + objPtr = Jim_NewStringObj(interp, noMatchStart, (str - noMatchStart)); + Jim_ListAppendElement(interp, resObjPtr, objPtr); + } + else { + Jim_Obj **commonObj = NULL; +#define NUM_COMMON (128 - 9) + while (strLen--) { + int n = utf8_tounicode(str, &c); +#ifdef JIM_OPTIMIZATION + if (c >= 9 && c < 128) { + + c -= 9; + if (!commonObj) { + commonObj = Jim_Alloc(sizeof(*commonObj) * NUM_COMMON); + memset(commonObj, 0, sizeof(*commonObj) * NUM_COMMON); + } + if (!commonObj[c]) { + commonObj[c] = Jim_NewStringObj(interp, str, 1); + } + Jim_ListAppendElement(interp, resObjPtr, commonObj[c]); + str++; + continue; + } +#endif + Jim_ListAppendElement(interp, resObjPtr, Jim_NewStringObjUtf8(interp, str, 1)); + str += n; + } + Jim_Free(commonObj); + } + + Jim_SetResult(interp, resObjPtr); + return JIM_OK; +} + + +static int Jim_JoinCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + const char *joinStr; + int joinStrLen; + + if (argc != 2 && argc != 3) { + Jim_WrongNumArgs(interp, 1, argv, "list ?joinString?"); + return JIM_ERR; + } + + if (argc == 2) { + joinStr = " "; + joinStrLen = 1; + } + else { + joinStr = Jim_GetString(argv[2], &joinStrLen); + } + Jim_SetResult(interp, Jim_ListJoin(interp, argv[1], joinStr, joinStrLen)); + return JIM_OK; +} + + +static int Jim_FormatCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + Jim_Obj *objPtr; + + if (argc < 2) { + Jim_WrongNumArgs(interp, 1, argv, "formatString ?arg arg ...?"); + return JIM_ERR; + } + objPtr = Jim_FormatString(interp, argv[1], argc - 2, argv + 2); + if (objPtr == NULL) + return JIM_ERR; + Jim_SetResult(interp, objPtr); + return JIM_OK; +} + + +static int Jim_ScanCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + Jim_Obj *listPtr, **outVec; + int outc, i; + + if (argc < 3) { + Jim_WrongNumArgs(interp, 1, argv, "string format ?varName varName ...?"); + return JIM_ERR; + } + if (argv[2]->typePtr != &scanFmtStringObjType) + SetScanFmtFromAny(interp, argv[2]); + if (FormatGetError(argv[2]) != 0) { + Jim_SetResultString(interp, FormatGetError(argv[2]), -1); + return JIM_ERR; + } + if (argc > 3) { + int maxPos = FormatGetMaxPos(argv[2]); + int count = FormatGetCnvCount(argv[2]); + + if (maxPos > argc - 3) { + Jim_SetResultString(interp, "\"%n$\" argument index out of range", -1); + return JIM_ERR; + } + else if (count > argc - 3) { + Jim_SetResultString(interp, "different numbers of variable names and " + "field specifiers", -1); + return JIM_ERR; + } + else if (count < argc - 3) { + Jim_SetResultString(interp, "variable is not assigned by any " + "conversion specifiers", -1); + return JIM_ERR; + } + } + listPtr = Jim_ScanString(interp, argv[1], argv[2], JIM_ERRMSG); + if (listPtr == 0) + return JIM_ERR; + if (argc > 3) { + int rc = JIM_OK; + int count = 0; + + if (listPtr != 0 && listPtr != (Jim_Obj *)EOF) { + int len = Jim_ListLength(interp, listPtr); + + if (len != 0) { + JimListGetElements(interp, listPtr, &outc, &outVec); + for (i = 0; i < outc; ++i) { + if (Jim_Length(outVec[i]) > 0) { + ++count; + if (Jim_SetVariable(interp, argv[3 + i], outVec[i]) != JIM_OK) { + rc = JIM_ERR; + } + } + } + } + Jim_FreeNewObj(interp, listPtr); + } + else { + count = -1; + } + if (rc == JIM_OK) { + Jim_SetResultInt(interp, count); + } + return rc; + } + else { + if (listPtr == (Jim_Obj *)EOF) { + Jim_SetResult(interp, Jim_NewListObj(interp, 0, 0)); + return JIM_OK; + } + Jim_SetResult(interp, listPtr); + } + return JIM_OK; +} + + +static int Jim_ErrorCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + if (argc != 2 && argc != 3) { + Jim_WrongNumArgs(interp, 1, argv, "message ?stacktrace?"); + return JIM_ERR; + } + Jim_SetResult(interp, argv[1]); + if (argc == 3) { + JimSetStackTrace(interp, argv[2]); + return JIM_ERR; + } + return JIM_ERR; +} + + +static int Jim_LrangeCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + Jim_Obj *objPtr; + + if (argc != 4) { + Jim_WrongNumArgs(interp, 1, argv, "list first last"); + return JIM_ERR; + } + if ((objPtr = Jim_ListRange(interp, argv[1], argv[2], argv[3])) == NULL) + return JIM_ERR; + Jim_SetResult(interp, objPtr); + return JIM_OK; +} + + +static int Jim_LrepeatCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + Jim_Obj *objPtr; + jim_wide count; + + if (argc < 2 || Jim_GetWideExpr(interp, argv[1], &count) != JIM_OK || count < 0) { + Jim_WrongNumArgs(interp, 1, argv, "count ?value ...?"); + return JIM_ERR; + } + if (count == 0 || argc == 2) { + Jim_SetEmptyResult(interp); + return JIM_OK; + } + + argc -= 2; + argv += 2; + + objPtr = Jim_NewListObj(interp, NULL, 0); + ListEnsureLength(objPtr, argc * count); + while (count--) { + ListInsertElements(objPtr, -1, argc, argv); + } + + Jim_SetResult(interp, objPtr); + return JIM_OK; +} + +char **Jim_GetEnviron(void) +{ +#if defined(HAVE__NSGETENVIRON) + return *_NSGetEnviron(); +#elif defined(_environ) + return _environ; +#else + #if !defined(NO_ENVIRON_EXTERN) + extern char **environ; + #endif + return environ; +#endif +} + +void Jim_SetEnviron(char **env) +{ +#if defined(HAVE__NSGETENVIRON) + *_NSGetEnviron() = env; +#elif defined(_environ) + _environ = env; +#else + #if !defined(NO_ENVIRON_EXTERN) + extern char **environ; + #endif + + environ = env; +#endif +} + + +static int Jim_EnvCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + const char *key; + const char *val; + + if (argc == 1) { + char **e = Jim_GetEnviron(); + + int i; + Jim_Obj *listObjPtr = Jim_NewListObj(interp, NULL, 0); + + for (i = 0; e[i]; i++) { + const char *equals = strchr(e[i], '='); + + if (equals) { + Jim_ListAppendElement(interp, listObjPtr, Jim_NewStringObj(interp, e[i], + equals - e[i])); + Jim_ListAppendElement(interp, listObjPtr, Jim_NewStringObj(interp, equals + 1, -1)); + } + } + + Jim_SetResult(interp, listObjPtr); + return JIM_OK; + } + + if (argc > 3) { + Jim_WrongNumArgs(interp, 1, argv, "varName ?default?"); + return JIM_ERR; + } + key = Jim_String(argv[1]); + val = getenv(key); + if (val == NULL) { + if (argc < 3) { + Jim_SetResultFormatted(interp, "environment variable \"%#s\" does not exist", argv[1]); + return JIM_ERR; + } + val = Jim_String(argv[2]); + } + Jim_SetResult(interp, Jim_NewStringObj(interp, val, -1)); + return JIM_OK; +} + + +static int Jim_SourceCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + int retval; + + if (argc != 2) { + Jim_WrongNumArgs(interp, 1, argv, "fileName"); + return JIM_ERR; + } + retval = Jim_EvalFile(interp, Jim_String(argv[1])); + if (retval == JIM_RETURN) + return JIM_OK; + return retval; +} + + +static int Jim_LreverseCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + Jim_Obj *revObjPtr, **ele; + int len; + + if (argc != 2) { + Jim_WrongNumArgs(interp, 1, argv, "list"); + return JIM_ERR; + } + JimListGetElements(interp, argv[1], &len, &ele); + revObjPtr = Jim_NewListObj(interp, NULL, 0); + ListEnsureLength(revObjPtr, len); + len--; + while (len >= 0) + ListAppendElement(revObjPtr, ele[len--]); + Jim_SetResult(interp, revObjPtr); + return JIM_OK; +} + +static int JimRangeLen(jim_wide start, jim_wide end, jim_wide step) +{ + jim_wide len; + + if (step == 0) + return -1; + if (start == end) + return 0; + else if (step > 0 && start > end) + return -1; + else if (step < 0 && end > start) + return -1; + len = end - start; + if (len < 0) + len = -len; + if (step < 0) + step = -step; + len = 1 + ((len - 1) / step); + if (len > INT_MAX) + len = INT_MAX; + return (int)((len < 0) ? -1 : len); +} + + +static int Jim_RangeCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + jim_wide start = 0, end, step = 1; + int len, i; + Jim_Obj *objPtr; + + if (argc < 2 || argc > 4) { + Jim_WrongNumArgs(interp, 1, argv, "?start? end ?step?"); + return JIM_ERR; + } + if (argc == 2) { + if (Jim_GetWideExpr(interp, argv[1], &end) != JIM_OK) + return JIM_ERR; + } + else { + if (Jim_GetWideExpr(interp, argv[1], &start) != JIM_OK || + Jim_GetWideExpr(interp, argv[2], &end) != JIM_OK) + return JIM_ERR; + if (argc == 4 && Jim_GetWideExpr(interp, argv[3], &step) != JIM_OK) + return JIM_ERR; + } + if ((len = JimRangeLen(start, end, step)) == -1) { + Jim_SetResultString(interp, "Invalid (infinite?) range specified", -1); + return JIM_ERR; + } + objPtr = Jim_NewListObj(interp, NULL, 0); + ListEnsureLength(objPtr, len); + for (i = 0; i < len; i++) + ListAppendElement(objPtr, Jim_NewIntObj(interp, start + i * step)); + Jim_SetResult(interp, objPtr); + return JIM_OK; +} + + +static int Jim_RandCoreCommand(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + jim_wide min = 0, max = 0, len, maxMul; + + if (argc < 1 || argc > 3) { + Jim_WrongNumArgs(interp, 1, argv, "?min? max"); + return JIM_ERR; + } + if (argc == 1) { + max = JIM_WIDE_MAX; + } else if (argc == 2) { + if (Jim_GetWideExpr(interp, argv[1], &max) != JIM_OK) + return JIM_ERR; + } else if (argc == 3) { + if (Jim_GetWideExpr(interp, argv[1], &min) != JIM_OK || + Jim_GetWideExpr(interp, argv[2], &max) != JIM_OK) + return JIM_ERR; + } + len = max-min; + if (len < 0) { + Jim_SetResultString(interp, "Invalid arguments (max < min)", -1); + return JIM_ERR; + } + maxMul = JIM_WIDE_MAX - (len ? (JIM_WIDE_MAX%len) : 0); + while (1) { + jim_wide r; + + JimRandomBytes(interp, &r, sizeof(jim_wide)); + if (r < 0 || r >= maxMul) continue; + r = (len == 0) ? 0 : r%len; + Jim_SetResultInt(interp, min+r); + return JIM_OK; + } +} + +static const struct { + const char *name; + Jim_CmdProc *cmdProc; +} Jim_CoreCommandsTable[] = { + {"alias", Jim_AliasCoreCommand}, + {"set", Jim_SetCoreCommand}, + {"unset", Jim_UnsetCoreCommand}, + {"puts", Jim_PutsCoreCommand}, + {"+", Jim_AddCoreCommand}, + {"*", Jim_MulCoreCommand}, + {"-", Jim_SubCoreCommand}, + {"/", Jim_DivCoreCommand}, + {"incr", Jim_IncrCoreCommand}, + {"while", Jim_WhileCoreCommand}, + {"loop", Jim_LoopCoreCommand}, + {"for", Jim_ForCoreCommand}, + {"foreach", Jim_ForeachCoreCommand}, + {"lmap", Jim_LmapCoreCommand}, + {"lassign", Jim_LassignCoreCommand}, + {"if", Jim_IfCoreCommand}, + {"switch", Jim_SwitchCoreCommand}, + {"list", Jim_ListCoreCommand}, + {"lindex", Jim_LindexCoreCommand}, + {"lset", Jim_LsetCoreCommand}, + {"lsearch", Jim_LsearchCoreCommand}, + {"llength", Jim_LlengthCoreCommand}, + {"lappend", Jim_LappendCoreCommand}, + {"linsert", Jim_LinsertCoreCommand}, + {"lreplace", Jim_LreplaceCoreCommand}, + {"lsort", Jim_LsortCoreCommand}, + {"append", Jim_AppendCoreCommand}, + {"eval", Jim_EvalCoreCommand}, + {"uplevel", Jim_UplevelCoreCommand}, + {"expr", Jim_ExprCoreCommand}, + {"break", Jim_BreakCoreCommand}, + {"continue", Jim_ContinueCoreCommand}, + {"proc", Jim_ProcCoreCommand}, + {"xtrace", Jim_XtraceCoreCommand}, + {"concat", Jim_ConcatCoreCommand}, + {"return", Jim_ReturnCoreCommand}, + {"upvar", Jim_UpvarCoreCommand}, + {"global", Jim_GlobalCoreCommand}, + {"string", Jim_StringCoreCommand}, + {"time", Jim_TimeCoreCommand}, + {"timerate", Jim_TimeRateCoreCommand}, + {"exit", Jim_ExitCoreCommand}, + {"catch", Jim_CatchCoreCommand}, + {"try", Jim_TryCoreCommand}, +#ifdef JIM_REFERENCES + {"ref", Jim_RefCoreCommand}, + {"getref", Jim_GetrefCoreCommand}, + {"setref", Jim_SetrefCoreCommand}, + {"finalize", Jim_FinalizeCoreCommand}, + {"collect", Jim_CollectCoreCommand}, +#endif + {"rename", Jim_RenameCoreCommand}, + {"dict", Jim_DictCoreCommand}, + {"subst", Jim_SubstCoreCommand}, + {"info", Jim_InfoCoreCommand}, + {"exists", Jim_ExistsCoreCommand}, + {"split", Jim_SplitCoreCommand}, + {"join", Jim_JoinCoreCommand}, + {"format", Jim_FormatCoreCommand}, + {"scan", Jim_ScanCoreCommand}, + {"error", Jim_ErrorCoreCommand}, + {"lrange", Jim_LrangeCoreCommand}, + {"lrepeat", Jim_LrepeatCoreCommand}, + {"env", Jim_EnvCoreCommand}, + {"source", Jim_SourceCoreCommand}, + {"lreverse", Jim_LreverseCoreCommand}, + {"range", Jim_RangeCoreCommand}, + {"rand", Jim_RandCoreCommand}, + {"tailcall", Jim_TailcallCoreCommand}, + {"local", Jim_LocalCoreCommand}, + {"upcall", Jim_UpcallCoreCommand}, + {"apply", Jim_ApplyCoreCommand}, + {"stacktrace", Jim_StacktraceCoreCommand}, + {NULL, NULL}, +}; + +void Jim_RegisterCoreCommands(Jim_Interp *interp) +{ + int i = 0; + + while (Jim_CoreCommandsTable[i].name != NULL) { + Jim_CreateCommand(interp, + Jim_CoreCommandsTable[i].name, Jim_CoreCommandsTable[i].cmdProc, NULL, NULL); + i++; + } +} + +void Jim_MakeErrorMessage(Jim_Interp *interp) +{ + Jim_Obj *argv[2]; + + argv[0] = Jim_NewStringObj(interp, "errorInfo", -1); + argv[1] = interp->result; + + Jim_EvalObjVector(interp, 2, argv); +} + +static char **JimSortStringTable(const char *const *tablePtr) +{ + int count; + char **tablePtrSorted; + + + for (count = 0; tablePtr[count]; count++) { + } + + + tablePtrSorted = Jim_Alloc(sizeof(char *) * (count + 1)); + memcpy(tablePtrSorted, tablePtr, sizeof(char *) * count); + qsort(tablePtrSorted, count, sizeof(char *), qsortCompareStringPointers); + tablePtrSorted[count] = NULL; + + return tablePtrSorted; +} + +static void JimSetFailedEnumResult(Jim_Interp *interp, const char *arg, const char *badtype, + const char *prefix, const char *const *tablePtr, const char *name) +{ + char **tablePtrSorted; + int i; + + if (name == NULL) { + name = "option"; + } + + Jim_SetResultFormatted(interp, "%s%s \"%s\": must be ", badtype, name, arg); + tablePtrSorted = JimSortStringTable(tablePtr); + for (i = 0; tablePtrSorted[i]; i++) { + if (tablePtrSorted[i + 1] == NULL && i > 0) { + Jim_AppendString(interp, Jim_GetResult(interp), "or ", -1); + } + Jim_AppendStrings(interp, Jim_GetResult(interp), prefix, tablePtrSorted[i], NULL); + if (tablePtrSorted[i + 1]) { + Jim_AppendString(interp, Jim_GetResult(interp), ", ", -1); + } + } + Jim_Free(tablePtrSorted); +} + + +int Jim_CheckShowCommands(Jim_Interp *interp, Jim_Obj *objPtr, const char *const *tablePtr) +{ + if (Jim_CompareStringImmediate(interp, objPtr, "-commands")) { + int i; + char **tablePtrSorted = JimSortStringTable(tablePtr); + Jim_SetResult(interp, Jim_NewListObj(interp, NULL, 0)); + for (i = 0; tablePtrSorted[i]; i++) { + Jim_ListAppendElement(interp, Jim_GetResult(interp), Jim_NewStringObj(interp, tablePtrSorted[i], -1)); + } + Jim_Free(tablePtrSorted); + return JIM_OK; + } + return JIM_ERR; +} + +static const Jim_ObjType getEnumObjType = { + "get-enum", + NULL, + NULL, + NULL, + JIM_TYPE_REFERENCES +}; + +int Jim_GetEnum(Jim_Interp *interp, Jim_Obj *objPtr, + const char *const *tablePtr, int *indexPtr, const char *name, int flags) +{ + const char *bad = "bad "; + const char *const *entryPtr = NULL; + int i; + int match = -1; + int arglen; + const char *arg; + + if (objPtr->typePtr == &getEnumObjType) { + if (objPtr->internalRep.ptrIntValue.ptr == tablePtr && objPtr->internalRep.ptrIntValue.int1 == flags) { + *indexPtr = objPtr->internalRep.ptrIntValue.int2; + return JIM_OK; + } + } + + arg = Jim_GetString(objPtr, &arglen); + + *indexPtr = -1; + + for (entryPtr = tablePtr, i = 0; *entryPtr != NULL; entryPtr++, i++) { + if (Jim_CompareStringImmediate(interp, objPtr, *entryPtr)) { + + match = i; + goto found; + } + if (flags & JIM_ENUM_ABBREV) { + if (strncmp(arg, *entryPtr, arglen) == 0) { + if (*arg == '-' && arglen == 1) { + break; + } + if (match >= 0) { + bad = "ambiguous "; + goto ambiguous; + } + match = i; + } + } + } + + + if (match >= 0) { + found: + + Jim_FreeIntRep(interp, objPtr); + objPtr->typePtr = &getEnumObjType; + objPtr->internalRep.ptrIntValue.ptr = (void *)tablePtr; + objPtr->internalRep.ptrIntValue.int1 = flags; + objPtr->internalRep.ptrIntValue.int2 = match; + + *indexPtr = match; + return JIM_OK; + } + + ambiguous: + if (flags & JIM_ERRMSG) { + JimSetFailedEnumResult(interp, arg, bad, "", tablePtr, name); + } + return JIM_ERR; +} + +int Jim_FindByName(const char *name, const char * const array[], size_t len) +{ + int i; + + for (i = 0; i < (int)len; i++) { + if (array[i] && strcmp(array[i], name) == 0) { + return i; + } + } + return -1; +} + +int Jim_IsDict(Jim_Obj *objPtr) +{ + return objPtr->typePtr == &dictObjType; +} + +int Jim_IsList(Jim_Obj *objPtr) +{ + return objPtr->typePtr == &listObjType; +} + +void Jim_SetResultFormatted(Jim_Interp *interp, const char *format, ...) +{ + + int len = strlen(format); + int extra = 0; + int n = 0; + const char *params[5]; + int nobjparam = 0; + Jim_Obj *objparam[5]; + char *buf; + va_list args; + int i; + + va_start(args, format); + + for (i = 0; i < len && n < 5; i++) { + int l; + + if (strncmp(format + i, "%s", 2) == 0) { + params[n] = va_arg(args, char *); + + l = strlen(params[n]); + } + else if (strncmp(format + i, "%#s", 3) == 0) { + Jim_Obj *objPtr = va_arg(args, Jim_Obj *); + + params[n] = Jim_GetString(objPtr, &l); + objparam[nobjparam++] = objPtr; + Jim_IncrRefCount(objPtr); + } + else { + if (format[i] == '%') { + i++; + } + continue; + } + n++; + extra += l; + } + + len += extra; + buf = Jim_Alloc(len + 1); + len = snprintf(buf, len + 1, format, params[0], params[1], params[2], params[3], params[4]); + + va_end(args); + + Jim_SetResult(interp, Jim_NewStringObjNoAlloc(interp, buf, len)); + + for (i = 0; i < nobjparam; i++) { + Jim_DecrRefCount(interp, objparam[i]); + } +} + +int Jim_CheckAbiVersion(Jim_Interp *interp, int abi_version) +{ + if (abi_version != JIM_ABI_VERSION) { + Jim_SetResultString(interp, "ABI version mismatch", -1); + return JIM_ERR; + } + return JIM_OK; +} + + +#ifndef jim_ext_package +int Jim_PackageProvide(Jim_Interp *interp, const char *name, const char *ver, int flags) +{ + return JIM_OK; +} +#endif +#ifndef jim_ext_aio +int Jim_AioFilehandle(Jim_Interp *interp, Jim_Obj *fhObj) +{ + return -1; +} +#endif + + +#include +#include + + +static int subcmd_null(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + + return JIM_OK; +} + +static const jim_subcmd_type dummy_subcmd = { + "dummy", NULL, subcmd_null, 0, 0, JIM_MODFLAG_HIDDEN +}; + +static Jim_Obj *subcmd_cmd_list(Jim_Interp *interp, const jim_subcmd_type * ct, const char *sep) +{ + + Jim_Obj *listObj = Jim_NewListObj(interp, NULL, 0); + Jim_Obj *sortCmd[2]; + + for (; ct->cmd; ct++) { + if (!(ct->flags & JIM_MODFLAG_HIDDEN)) { + Jim_ListAppendElement(interp, listObj, Jim_NewStringObj(interp, ct->cmd, -1)); + } + } + + + sortCmd[0] = Jim_NewStringObj(interp, "lsort", -1); + sortCmd[1] = listObj; + + if (Jim_EvalObjVector(interp, 2, sortCmd) == JIM_OK) { + return Jim_ListJoin(interp, Jim_GetResult(interp), sep, strlen(sep)); + } + + return Jim_GetResult(interp); +} + +static void bad_subcmd(Jim_Interp *interp, const jim_subcmd_type * command_table, const char *type, + Jim_Obj *cmd, Jim_Obj *subcmd) +{ + Jim_SetResultFormatted(interp, "%#s, %s command \"%#s\": should be %#s", cmd, type, + subcmd, subcmd_cmd_list(interp, command_table, ", ")); +} + +static void show_cmd_usage(Jim_Interp *interp, const jim_subcmd_type * command_table, int argc, + Jim_Obj *const *argv) +{ + Jim_SetResultFormatted(interp, "Usage: \"%#s command ... \", where command is one of: %#s", + argv[0], subcmd_cmd_list(interp, command_table, ", ")); +} + +static void add_cmd_usage(Jim_Interp *interp, const jim_subcmd_type * ct, Jim_Obj *cmd) +{ + if (cmd) { + Jim_AppendStrings(interp, Jim_GetResult(interp), Jim_String(cmd), " ", NULL); + } + Jim_AppendStrings(interp, Jim_GetResult(interp), ct->cmd, NULL); + if (ct->args && *ct->args) { + Jim_AppendStrings(interp, Jim_GetResult(interp), " ", ct->args, NULL); + } +} + +void Jim_SubCmdArgError(Jim_Interp *interp, const jim_subcmd_type * ct, Jim_Obj *subcmd) +{ + Jim_SetResultString(interp, "wrong # args: should be \"", -1); + add_cmd_usage(interp, ct, subcmd); + Jim_AppendStrings(interp, Jim_GetResult(interp), "\"", NULL); +} + +static const Jim_ObjType subcmdLookupObjType = { + "subcmd-lookup", + NULL, + NULL, + NULL, + JIM_TYPE_REFERENCES +}; + +const jim_subcmd_type *Jim_ParseSubCmd(Jim_Interp *interp, const jim_subcmd_type * command_table, + int argc, Jim_Obj *const *argv) +{ + const jim_subcmd_type *ct; + const jim_subcmd_type *partial = 0; + int cmdlen; + Jim_Obj *cmd; + const char *cmdstr; + int help = 0; + int argsok = 1; + + if (argc < 2) { + Jim_SetResultFormatted(interp, "wrong # args: should be \"%#s command ...\"\n" + "Use \"%#s -help ?command?\" for help", argv[0], argv[0]); + return 0; + } + + cmd = argv[1]; + + + if (cmd->typePtr == &subcmdLookupObjType) { + if (cmd->internalRep.ptrIntValue.ptr == command_table) { + ct = command_table + cmd->internalRep.ptrIntValue.int1; + goto found; + } + } + + + if (Jim_CompareStringImmediate(interp, cmd, "-help")) { + if (argc == 2) { + + show_cmd_usage(interp, command_table, argc, argv); + return &dummy_subcmd; + } + help = 1; + + + cmd = argv[2]; + } + + + if (Jim_CompareStringImmediate(interp, cmd, "-commands")) { + Jim_SetResult(interp, subcmd_cmd_list(interp, command_table, " ")); + return &dummy_subcmd; + } + + cmdstr = Jim_GetString(cmd, &cmdlen); + + for (ct = command_table; ct->cmd; ct++) { + if (Jim_CompareStringImmediate(interp, cmd, ct->cmd)) { + + break; + } + if (strncmp(cmdstr, ct->cmd, cmdlen) == 0) { + if (partial) { + + if (help) { + + show_cmd_usage(interp, command_table, argc, argv); + return &dummy_subcmd; + } + bad_subcmd(interp, command_table, "ambiguous", argv[0], argv[1 + help]); + return 0; + } + partial = ct; + } + continue; + } + + + if (partial && !ct->cmd) { + ct = partial; + } + + if (!ct->cmd) { + + if (help) { + + show_cmd_usage(interp, command_table, argc, argv); + return &dummy_subcmd; + } + bad_subcmd(interp, command_table, "unknown", argv[0], argv[1 + help]); + return 0; + } + + if (help) { + Jim_SetResultString(interp, "Usage: ", -1); + + add_cmd_usage(interp, ct, argv[0]); + return &dummy_subcmd; + } + + + Jim_FreeIntRep(interp, cmd); + cmd->typePtr = &subcmdLookupObjType; + cmd->internalRep.ptrIntValue.ptr = (void *)command_table; + cmd->internalRep.ptrIntValue.int1 = ct - command_table; + +found: + + + if (argc - 2 < ct->minargs) { + argsok = 0; + } + else if (ct->maxargs >= 0 && argc - 2 > ct->maxargs) { + argsok = 0; + } + else if (ct->maxargs < -1 && (argc - 2) % -ct->maxargs != 0) { + + argsok = 0; + } + if (!argsok) { + Jim_SetResultString(interp, "wrong # args: should be \"", -1); + + add_cmd_usage(interp, ct, argv[0]); + Jim_AppendStrings(interp, Jim_GetResult(interp), "\"", NULL); + + return 0; + } + + + return ct; +} + +int Jim_CallSubCmd(Jim_Interp *interp, const jim_subcmd_type * ct, int argc, Jim_Obj *const *argv) +{ + int ret = JIM_ERR; + + if (ct) { + if (ct->flags & JIM_MODFLAG_FULLARGV) { + ret = ct->function(interp, argc, argv); + } + else { + ret = ct->function(interp, argc - 2, argv + 2); + } + if (ret < 0) { + Jim_SubCmdArgError(interp, ct, argv[0]); + ret = JIM_ERR; + } + } + return ret; +} + +int Jim_SubCmdProc(Jim_Interp *interp, int argc, Jim_Obj *const *argv) +{ + const jim_subcmd_type *ct = + Jim_ParseSubCmd(interp, (const jim_subcmd_type *)Jim_CmdPrivData(interp), argc, argv); + + return Jim_CallSubCmd(interp, ct, argc, argv); +} + +#include +#include +#include +#include +#include + + +int utf8_fromunicode(char *p, unsigned uc) +{ + if (uc <= 0x7f) { + *p = uc; + return 1; + } + else if (uc <= 0x7ff) { + *p++ = 0xc0 | ((uc & 0x7c0) >> 6); + *p = 0x80 | (uc & 0x3f); + return 2; + } + else if (uc <= 0xffff) { + *p++ = 0xe0 | ((uc & 0xf000) >> 12); + *p++ = 0x80 | ((uc & 0xfc0) >> 6); + *p = 0x80 | (uc & 0x3f); + return 3; + } + + else { + *p++ = 0xf0 | ((uc & 0x1c0000) >> 18); + *p++ = 0x80 | ((uc & 0x3f000) >> 12); + *p++ = 0x80 | ((uc & 0xfc0) >> 6); + *p = 0x80 | (uc & 0x3f); + return 4; + } +} + +#include +#include +#include + + +#define JIM_INTEGER_SPACE 24 +#define MAX_FLOAT_WIDTH 320 + +Jim_Obj *Jim_FormatString(Jim_Interp *interp, Jim_Obj *fmtObjPtr, int objc, Jim_Obj *const *objv) +{ + const char *span, *format, *formatEnd, *msg; + int numBytes = 0, objIndex = 0, gotXpg = 0, gotSequential = 0; + static const char * const mixedXPG = + "cannot mix \"%\" and \"%n$\" conversion specifiers"; + static const char * const badIndex[2] = { + "not enough arguments for all format specifiers", + "\"%n$\" argument index out of range" + }; + int formatLen; + Jim_Obj *resultPtr; + + char *num_buffer = NULL; + int num_buffer_size = 0; + + span = format = Jim_GetString(fmtObjPtr, &formatLen); + formatEnd = format + formatLen; + resultPtr = Jim_NewEmptyStringObj(interp); + + while (format != formatEnd) { + char *end; + int gotMinus, sawFlag; + int gotPrecision, useShort; + long width, precision; + int newXpg; + int ch; + int step; + int doubleType; + char pad = ' '; + char spec[2*JIM_INTEGER_SPACE + 12]; + char *p; + + int formatted_chars; + int formatted_bytes; + const char *formatted_buf; + + step = utf8_tounicode(format, &ch); + format += step; + if (ch != '%') { + numBytes += step; + continue; + } + if (numBytes) { + Jim_AppendString(interp, resultPtr, span, numBytes); + numBytes = 0; + } + + + step = utf8_tounicode(format, &ch); + if (ch == '%') { + span = format; + numBytes = step; + format += step; + continue; + } + + + newXpg = 0; + if (isdigit(ch)) { + int position = strtoul(format, &end, 10); + if (*end == '$') { + newXpg = 1; + objIndex = position - 1; + format = end + 1; + step = utf8_tounicode(format, &ch); + } + } + if (newXpg) { + if (gotSequential) { + msg = mixedXPG; + goto errorMsg; + } + gotXpg = 1; + } else { + if (gotXpg) { + msg = mixedXPG; + goto errorMsg; + } + gotSequential = 1; + } + if ((objIndex < 0) || (objIndex >= objc)) { + msg = badIndex[gotXpg]; + goto errorMsg; + } + + p = spec; + *p++ = '%'; + + gotMinus = 0; + sawFlag = 1; + do { + switch (ch) { + case '-': + gotMinus = 1; + break; + case '0': + pad = ch; + break; + case ' ': + case '+': + case '#': + break; + default: + sawFlag = 0; + continue; + } + *p++ = ch; + format += step; + step = utf8_tounicode(format, &ch); + + } while (sawFlag && (p - spec <= 5)); + + + width = 0; + if (isdigit(ch)) { + width = strtoul(format, &end, 10); + format = end; + step = utf8_tounicode(format, &ch); + } else if (ch == '*') { + if (objIndex >= objc - 1) { + msg = badIndex[gotXpg]; + goto errorMsg; + } + if (Jim_GetLong(interp, objv[objIndex], &width) != JIM_OK) { + goto error; + } + if (width < 0) { + width = -width; + if (!gotMinus) { + *p++ = '-'; + gotMinus = 1; + } + } + objIndex++; + format += step; + step = utf8_tounicode(format, &ch); + } + + + gotPrecision = precision = 0; + if (ch == '.') { + gotPrecision = 1; + format += step; + step = utf8_tounicode(format, &ch); + } + if (isdigit(ch)) { + precision = strtoul(format, &end, 10); + format = end; + step = utf8_tounicode(format, &ch); + } else if (ch == '*') { + if (objIndex >= objc - 1) { + msg = badIndex[gotXpg]; + goto errorMsg; + } + if (Jim_GetLong(interp, objv[objIndex], &precision) != JIM_OK) { + goto error; + } + + + if (precision < 0) { + precision = 0; + } + objIndex++; + format += step; + step = utf8_tounicode(format, &ch); + } + + + useShort = 0; + if (ch == 'h') { + useShort = 1; + format += step; + step = utf8_tounicode(format, &ch); + } else if (ch == 'l') { + + format += step; + step = utf8_tounicode(format, &ch); + if (ch == 'l') { + format += step; + step = utf8_tounicode(format, &ch); + } + } + + format += step; + span = format; + + + if (ch == 'i') { + ch = 'd'; + } + + doubleType = 0; + + switch (ch) { + case '\0': + msg = "format string ended in middle of field specifier"; + goto errorMsg; + case 's': { + formatted_buf = Jim_GetString(objv[objIndex], &formatted_bytes); + formatted_chars = Jim_Utf8Length(interp, objv[objIndex]); + if (gotPrecision && (precision < formatted_chars)) { + + formatted_chars = precision; + formatted_bytes = utf8_index(formatted_buf, precision); + } + break; + } + case 'c': { + jim_wide code; + + if (Jim_GetWide(interp, objv[objIndex], &code) != JIM_OK) { + goto error; + } + + formatted_bytes = utf8_getchars(spec, code); + formatted_buf = spec; + formatted_chars = 1; + break; + } + case 'b': { + unsigned jim_wide w; + int length; + int i; + int j; + + if (Jim_GetWide(interp, objv[objIndex], (jim_wide *)&w) != JIM_OK) { + goto error; + } + length = sizeof(w) * 8; + + + + if (num_buffer_size < length + 1) { + num_buffer_size = length + 1; + num_buffer = Jim_Realloc(num_buffer, num_buffer_size); + } + + j = 0; + for (i = length; i > 0; ) { + i--; + if (w & ((unsigned jim_wide)1 << i)) { + num_buffer[j++] = '1'; + } + else if (j || i == 0) { + num_buffer[j++] = '0'; + } + } + num_buffer[j] = 0; + formatted_chars = formatted_bytes = j; + formatted_buf = num_buffer; + break; + } + + case 'e': + case 'E': + case 'f': + case 'g': + case 'G': + doubleType = 1; + + case 'd': + case 'u': + case 'o': + case 'x': + case 'X': { + jim_wide w; + double d; + int length; + + + if (width) { + p += sprintf(p, "%ld", width); + } + if (gotPrecision) { + p += sprintf(p, ".%ld", precision); + } + + + if (doubleType) { + if (Jim_GetDouble(interp, objv[objIndex], &d) != JIM_OK) { + goto error; + } + length = MAX_FLOAT_WIDTH; + } + else { + if (Jim_GetWide(interp, objv[objIndex], &w) != JIM_OK) { + goto error; + } + length = JIM_INTEGER_SPACE; + if (useShort) { + if (ch == 'd') { + w = (short)w; + } + else { + w = (unsigned short)w; + } + } + *p++ = 'l'; +#ifdef HAVE_LONG_LONG + if (sizeof(long long) == sizeof(jim_wide)) { + *p++ = 'l'; + } +#endif + } + + *p++ = (char) ch; + *p = '\0'; + + + if (width > 10000 || length > 10000 || precision > 10000) { + Jim_SetResultString(interp, "format too long", -1); + goto error; + } + + + + if (width > length) { + length = width; + } + if (gotPrecision) { + length += precision; + } + + + if (num_buffer_size < length + 1) { + num_buffer_size = length + 1; + num_buffer = Jim_Realloc(num_buffer, num_buffer_size); + } + + if (doubleType) { + snprintf(num_buffer, length + 1, spec, d); + } + else { + formatted_bytes = snprintf(num_buffer, length + 1, spec, w); + } + formatted_chars = formatted_bytes = strlen(num_buffer); + formatted_buf = num_buffer; + break; + } + + default: { + + spec[0] = ch; + spec[1] = '\0'; + Jim_SetResultFormatted(interp, "bad field specifier \"%s\"", spec); + goto error; + } + } + + if (!gotMinus) { + while (formatted_chars < width) { + Jim_AppendString(interp, resultPtr, &pad, 1); + formatted_chars++; + } + } + + Jim_AppendString(interp, resultPtr, formatted_buf, formatted_bytes); + + while (formatted_chars < width) { + Jim_AppendString(interp, resultPtr, &pad, 1); + formatted_chars++; + } + + objIndex += gotSequential; + } + if (numBytes) { + Jim_AppendString(interp, resultPtr, span, numBytes); + } + + Jim_Free(num_buffer); + return resultPtr; + + errorMsg: + Jim_SetResultString(interp, msg, -1); + error: + Jim_FreeNewObj(interp, resultPtr); + Jim_Free(num_buffer); + return NULL; +} + + +#if defined(JIM_REGEXP) +#include +#include +#include +#include + + + +#define REG_MAX_PAREN 100 + + + +#define END 0 +#define BOL 1 +#define EOL 2 +#define ANY 3 +#define ANYOF 4 +#define ANYBUT 5 +#define BRANCH 6 +#define BACK 7 +#define EXACTLY 8 +#define NOTHING 9 +#define REP 10 +#define REPMIN 11 +#define REPX 12 +#define REPXMIN 13 +#define BOLX 14 +#define EOLX 15 +#define WORDA 16 +#define WORDZ 17 + +#define OPENNC 1000 +#define OPEN 1001 + + + + +#define CLOSENC 2000 +#define CLOSE 2001 +#define CLOSE_END (CLOSE+REG_MAX_PAREN) + +#define REG_MAGIC 0xFADED00D + + +#define OP(preg, p) (preg->program[p]) +#define NEXT(preg, p) (preg->program[p + 1]) +#define OPERAND(p) ((p) + 2) + + + + +#define FAIL(R,M) { (R)->err = (M); return (M); } +#define ISMULT(c) ((c) == '*' || (c) == '+' || (c) == '?' || (c) == '{') +#define META "^$.[()|?{+*" + +#define HASWIDTH 1 +#define SIMPLE 2 +#define SPSTART 4 +#define WORST 0 + +#define MAX_REP_COUNT 1000000 + +static int reg(regex_t *preg, int paren, int *flagp ); +static int regpiece(regex_t *preg, int *flagp ); +static int regbranch(regex_t *preg, int *flagp ); +static int regatom(regex_t *preg, int *flagp ); +static int regnode(regex_t *preg, int op ); +static int regnext(regex_t *preg, int p ); +static void regc(regex_t *preg, int b ); +static int reginsert(regex_t *preg, int op, int size, int opnd ); +static void regtail(regex_t *preg, int p, int val); +static void regoptail(regex_t *preg, int p, int val ); +static int regopsize(regex_t *preg, int p ); + +static int reg_range_find(const int *string, int c); +static const char *str_find(const char *string, int c, int nocase); +static int prefix_cmp(const int *prog, int proglen, const char *string, int nocase); + + +#ifdef DEBUG +static int regnarrate = 0; +static void regdump(regex_t *preg); +static const char *regprop( int op ); +#endif + + +static int str_int_len(const int *seq) +{ + int n = 0; + while (*seq++) { + n++; + } + return n; +} + +int jim_regcomp(regex_t *preg, const char *exp, int cflags) +{ + int scan; + int longest; + unsigned len; + int flags; + +#ifdef DEBUG + fprintf(stderr, "Compiling: '%s'\n", exp); +#endif + memset(preg, 0, sizeof(*preg)); + + if (exp == NULL) + FAIL(preg, REG_ERR_NULL_ARGUMENT); + + + preg->cflags = cflags; + preg->regparse = exp; + + + preg->proglen = (strlen(exp) + 1) * 5; + preg->program = malloc(preg->proglen * sizeof(int)); + if (preg->program == NULL) + FAIL(preg, REG_ERR_NOMEM); + + regc(preg, REG_MAGIC); + if (reg(preg, 0, &flags) == 0) { + return preg->err; + } + + + if (preg->re_nsub >= REG_MAX_PAREN) + FAIL(preg,REG_ERR_TOO_BIG); + + + preg->regstart = 0; + preg->reganch = 0; + preg->regmust = 0; + preg->regmlen = 0; + scan = 1; + if (OP(preg, regnext(preg, scan)) == END) { + scan = OPERAND(scan); + + + if (OP(preg, scan) == EXACTLY) { + preg->regstart = preg->program[OPERAND(scan)]; + } + else if (OP(preg, scan) == BOL) + preg->reganch++; + + if (flags&SPSTART) { + longest = 0; + len = 0; + for (; scan != 0; scan = regnext(preg, scan)) { + if (OP(preg, scan) == EXACTLY) { + int plen = str_int_len(preg->program + OPERAND(scan)); + if (plen >= len) { + longest = OPERAND(scan); + len = plen; + } + } + } + preg->regmust = longest; + preg->regmlen = len; + } + } + +#ifdef DEBUG + regdump(preg); +#endif + + return 0; +} + +static int reg(regex_t *preg, int paren, int *flagp ) +{ + int ret; + int br; + int ender; + int parno = 0; + int flags; + + *flagp = HASWIDTH; + + + if (paren) { + if (preg->regparse[0] == '?' && preg->regparse[1] == ':') { + + preg->regparse += 2; + parno = -1; + } + else { + parno = ++preg->re_nsub; + } + ret = regnode(preg, OPEN+parno); + } else + ret = 0; + + + br = regbranch(preg, &flags); + if (br == 0) + return 0; + if (ret != 0) + regtail(preg, ret, br); + else + ret = br; + if (!(flags&HASWIDTH)) + *flagp &= ~HASWIDTH; + *flagp |= flags&SPSTART; + while (*preg->regparse == '|') { + preg->regparse++; + br = regbranch(preg, &flags); + if (br == 0) + return 0; + regtail(preg, ret, br); + if (!(flags&HASWIDTH)) + *flagp &= ~HASWIDTH; + *flagp |= flags&SPSTART; + } + + + ender = regnode(preg, (paren) ? CLOSE+parno : END); + regtail(preg, ret, ender); + + + for (br = ret; br != 0; br = regnext(preg, br)) + regoptail(preg, br, ender); + + + if (paren && *preg->regparse++ != ')') { + preg->err = REG_ERR_UNMATCHED_PAREN; + return 0; + } else if (!paren && *preg->regparse != '\0') { + if (*preg->regparse == ')') { + preg->err = REG_ERR_UNMATCHED_PAREN; + return 0; + } else { + preg->err = REG_ERR_JUNK_ON_END; + return 0; + } + } + + return(ret); +} + +static int regbranch(regex_t *preg, int *flagp ) +{ + int ret; + int chain; + int latest; + int flags; + + *flagp = WORST; + + ret = regnode(preg, BRANCH); + chain = 0; + while (*preg->regparse != '\0' && *preg->regparse != ')' && + *preg->regparse != '|') { + latest = regpiece(preg, &flags); + if (latest == 0) + return 0; + *flagp |= flags&HASWIDTH; + if (chain == 0) { + *flagp |= flags&SPSTART; + } + else { + regtail(preg, chain, latest); + } + chain = latest; + } + if (chain == 0) + (void) regnode(preg, NOTHING); + + return(ret); +} + +static int regpiece(regex_t *preg, int *flagp) +{ + int ret; + char op; + int next; + int flags; + int min; + int max; + + ret = regatom(preg, &flags); + if (ret == 0) + return 0; + + op = *preg->regparse; + if (!ISMULT(op)) { + *flagp = flags; + return(ret); + } + + if (!(flags&HASWIDTH) && op != '?') { + preg->err = REG_ERR_OPERAND_COULD_BE_EMPTY; + return 0; + } + + + if (op == '{') { + char *end; + + min = strtoul(preg->regparse + 1, &end, 10); + if (end == preg->regparse + 1) { + preg->err = REG_ERR_BAD_COUNT; + return 0; + } + if (*end == '}') { + max = min; + } + else if (*end == '\0') { + preg->err = REG_ERR_UNMATCHED_BRACES; + return 0; + } + else { + preg->regparse = end; + max = strtoul(preg->regparse + 1, &end, 10); + if (*end != '}') { + preg->err = REG_ERR_UNMATCHED_BRACES; + return 0; + } + } + if (end == preg->regparse + 1) { + max = MAX_REP_COUNT; + } + else if (max < min || max >= 100) { + preg->err = REG_ERR_BAD_COUNT; + return 0; + } + if (min >= 100) { + preg->err = REG_ERR_BAD_COUNT; + return 0; + } + + preg->regparse = strchr(preg->regparse, '}'); + } + else { + min = (op == '+'); + max = (op == '?' ? 1 : MAX_REP_COUNT); + } + + if (preg->regparse[1] == '?') { + preg->regparse++; + next = reginsert(preg, flags & SIMPLE ? REPMIN : REPXMIN, 5, ret); + } + else { + next = reginsert(preg, flags & SIMPLE ? REP: REPX, 5, ret); + } + preg->program[ret + 2] = max; + preg->program[ret + 3] = min; + preg->program[ret + 4] = 0; + + *flagp = (min) ? (WORST|HASWIDTH) : (WORST|SPSTART); + + if (!(flags & SIMPLE)) { + int back = regnode(preg, BACK); + regtail(preg, back, ret); + regtail(preg, next, back); + } + + preg->regparse++; + if (ISMULT(*preg->regparse)) { + preg->err = REG_ERR_NESTED_COUNT; + return 0; + } + + return ret; +} + +static void reg_addrange(regex_t *preg, int lower, int upper) +{ + if (lower > upper) { + reg_addrange(preg, upper, lower); + } + + regc(preg, upper - lower + 1); + regc(preg, lower); +} + +static void reg_addrange_str(regex_t *preg, const char *str) +{ + while (*str) { + reg_addrange(preg, *str, *str); + str++; + } +} + +static int reg_utf8_tounicode_case(const char *s, int *uc, int upper) +{ + int l = utf8_tounicode(s, uc); + if (upper) { + *uc = utf8_upper(*uc); + } + return l; +} + +static int hexdigitval(int c) +{ + if (c >= '0' && c <= '9') + return c - '0'; + if (c >= 'a' && c <= 'f') + return c - 'a' + 10; + if (c >= 'A' && c <= 'F') + return c - 'A' + 10; + return -1; +} + +static int parse_hex(const char *s, int n, int *uc) +{ + int val = 0; + int k; + + for (k = 0; k < n; k++) { + int c = hexdigitval(*s++); + if (c == -1) { + break; + } + val = (val << 4) | c; + } + if (k) { + *uc = val; + } + return k; +} + +static int reg_decode_escape(const char *s, int *ch) +{ + int n; + const char *s0 = s; + + *ch = *s++; + + switch (*ch) { + case 'b': *ch = '\b'; break; + case 'e': *ch = 27; break; + case 'f': *ch = '\f'; break; + case 'n': *ch = '\n'; break; + case 'r': *ch = '\r'; break; + case 't': *ch = '\t'; break; + case 'v': *ch = '\v'; break; + case 'u': + if (*s == '{') { + + n = parse_hex(s + 1, 6, ch); + if (n > 0 && s[n + 1] == '}' && *ch >= 0 && *ch <= 0x1fffff) { + s += n + 2; + } + else { + + *ch = 'u'; + } + } + else if ((n = parse_hex(s, 4, ch)) > 0) { + s += n; + } + break; + case 'U': + if ((n = parse_hex(s, 8, ch)) > 0) { + s += n; + } + break; + case 'x': + if ((n = parse_hex(s, 2, ch)) > 0) { + s += n; + } + break; + case '\0': + s--; + *ch = '\\'; + break; + } + return s - s0; +} + +static int regatom(regex_t *preg, int *flagp) +{ + int ret; + int flags; + int nocase = (preg->cflags & REG_ICASE); + + int ch; + int n = reg_utf8_tounicode_case(preg->regparse, &ch, nocase); + + *flagp = WORST; + + preg->regparse += n; + switch (ch) { + + case '^': + ret = regnode(preg, BOL); + break; + case '$': + ret = regnode(preg, EOL); + break; + case '.': + ret = regnode(preg, ANY); + *flagp |= HASWIDTH|SIMPLE; + break; + case '[': { + const char *pattern = preg->regparse; + + if (*pattern == '^') { + ret = regnode(preg, ANYBUT); + pattern++; + } else + ret = regnode(preg, ANYOF); + + + if (*pattern == ']' || *pattern == '-') { + reg_addrange(preg, *pattern, *pattern); + pattern++; + } + + while (*pattern != ']') { + + int start; + int end; + + enum { + CC_ALPHA, CC_ALNUM, CC_SPACE, CC_BLANK, CC_UPPER, CC_LOWER, + CC_DIGIT, CC_XDIGIT, CC_CNTRL, CC_GRAPH, CC_PRINT, CC_PUNCT, + CC_NUM + }; + int cc; + + if (!*pattern) { + preg->err = REG_ERR_UNMATCHED_BRACKET; + return 0; + } + + pattern += reg_utf8_tounicode_case(pattern, &start, nocase); + if (start == '\\') { + + switch (*pattern) { + case 's': + pattern++; + cc = CC_SPACE; + goto cc_switch; + case 'd': + pattern++; + cc = CC_DIGIT; + goto cc_switch; + case 'w': + pattern++; + reg_addrange(preg, '_', '_'); + cc = CC_ALNUM; + goto cc_switch; + } + pattern += reg_decode_escape(pattern, &start); + if (start == 0) { + preg->err = REG_ERR_NULL_CHAR; + return 0; + } + if (start == '\\' && *pattern == 0) { + preg->err = REG_ERR_INVALID_ESCAPE; + return 0; + } + } + if (pattern[0] == '-' && pattern[1] && pattern[1] != ']') { + + pattern += utf8_tounicode(pattern, &end); + pattern += reg_utf8_tounicode_case(pattern, &end, nocase); + if (end == '\\') { + pattern += reg_decode_escape(pattern, &end); + if (end == 0) { + preg->err = REG_ERR_NULL_CHAR; + return 0; + } + if (end == '\\' && *pattern == 0) { + preg->err = REG_ERR_INVALID_ESCAPE; + return 0; + } + } + + reg_addrange(preg, start, end); + continue; + } + if (start == '[' && pattern[0] == ':') { + static const char *character_class[] = { + ":alpha:", ":alnum:", ":space:", ":blank:", ":upper:", ":lower:", + ":digit:", ":xdigit:", ":cntrl:", ":graph:", ":print:", ":punct:", + }; + + for (cc = 0; cc < CC_NUM; cc++) { + n = strlen(character_class[cc]); + if (strncmp(pattern, character_class[cc], n) == 0) { + if (pattern[n] != ']') { + preg->err = REG_ERR_UNMATCHED_BRACKET; + return 0; + } + + pattern += n + 1; + break; + } + } + if (cc != CC_NUM) { +cc_switch: + switch (cc) { + case CC_ALNUM: + reg_addrange(preg, '0', '9'); + + case CC_ALPHA: + if ((preg->cflags & REG_ICASE) == 0) { + reg_addrange(preg, 'a', 'z'); + } + reg_addrange(preg, 'A', 'Z'); + break; + case CC_SPACE: + reg_addrange_str(preg, " \t\r\n\f\v"); + break; + case CC_BLANK: + reg_addrange_str(preg, " \t"); + break; + case CC_UPPER: + reg_addrange(preg, 'A', 'Z'); + break; + case CC_LOWER: + reg_addrange(preg, 'a', 'z'); + break; + case CC_XDIGIT: + reg_addrange(preg, 'a', 'f'); + reg_addrange(preg, 'A', 'F'); + + case CC_DIGIT: + reg_addrange(preg, '0', '9'); + break; + case CC_CNTRL: + reg_addrange(preg, 0, 31); + reg_addrange(preg, 127, 127); + break; + case CC_PRINT: + reg_addrange(preg, ' ', '~'); + break; + case CC_GRAPH: + reg_addrange(preg, '!', '~'); + break; + case CC_PUNCT: + reg_addrange(preg, '!', '/'); + reg_addrange(preg, ':', '@'); + reg_addrange(preg, '[', '`'); + reg_addrange(preg, '{', '~'); + break; + } + continue; + } + } + + reg_addrange(preg, start, start); + } + regc(preg, '\0'); + + if (*pattern) { + pattern++; + } + preg->regparse = pattern; + + *flagp |= HASWIDTH|SIMPLE; + } + break; + case '(': + ret = reg(preg, 1, &flags); + if (ret == 0) + return 0; + *flagp |= flags&(HASWIDTH|SPSTART); + break; + case '\0': + case '|': + case ')': + preg->err = REG_ERR_INTERNAL; + return 0; + case '?': + case '+': + case '*': + case '{': + preg->err = REG_ERR_COUNT_FOLLOWS_NOTHING; + return 0; + case '\\': + ch = *preg->regparse++; + switch (ch) { + case '\0': + preg->err = REG_ERR_INVALID_ESCAPE; + return 0; + case 'A': + ret = regnode(preg, BOLX); + break; + case 'Z': + ret = regnode(preg, EOLX); + break; + case '<': + case 'm': + ret = regnode(preg, WORDA); + break; + case '>': + case 'M': + ret = regnode(preg, WORDZ); + break; + case 'd': + case 'D': + ret = regnode(preg, ch == 'd' ? ANYOF : ANYBUT); + reg_addrange(preg, '0', '9'); + regc(preg, '\0'); + *flagp |= HASWIDTH|SIMPLE; + break; + case 'w': + case 'W': + ret = regnode(preg, ch == 'w' ? ANYOF : ANYBUT); + if ((preg->cflags & REG_ICASE) == 0) { + reg_addrange(preg, 'a', 'z'); + } + reg_addrange(preg, 'A', 'Z'); + reg_addrange(preg, '0', '9'); + reg_addrange(preg, '_', '_'); + regc(preg, '\0'); + *flagp |= HASWIDTH|SIMPLE; + break; + case 's': + case 'S': + ret = regnode(preg, ch == 's' ? ANYOF : ANYBUT); + reg_addrange_str(preg," \t\r\n\f\v"); + regc(preg, '\0'); + *flagp |= HASWIDTH|SIMPLE; + break; + + default: + + + preg->regparse--; + goto de_fault; + } + break; + de_fault: + default: { + int added = 0; + + + preg->regparse -= n; + + ret = regnode(preg, EXACTLY); + + + + while (*preg->regparse && strchr(META, *preg->regparse) == NULL) { + n = reg_utf8_tounicode_case(preg->regparse, &ch, (preg->cflags & REG_ICASE)); + if (ch == '\\' && preg->regparse[n]) { + if (strchr("<>mMwWdDsSAZ", preg->regparse[n])) { + + break; + } + n += reg_decode_escape(preg->regparse + n, &ch); + if (ch == 0) { + preg->err = REG_ERR_NULL_CHAR; + return 0; + } + } + + + if (ISMULT(preg->regparse[n])) { + + if (added) { + + break; + } + + regc(preg, ch); + added++; + preg->regparse += n; + break; + } + + + regc(preg, ch); + added++; + preg->regparse += n; + } + regc(preg, '\0'); + + *flagp |= HASWIDTH; + if (added == 1) + *flagp |= SIMPLE; + break; + } + break; + } + + return(ret); +} + +static void reg_grow(regex_t *preg, int n) +{ + if (preg->p + n >= preg->proglen) { + preg->proglen = (preg->p + n) * 2; + preg->program = realloc(preg->program, preg->proglen * sizeof(int)); + } +} + + +static int regnode(regex_t *preg, int op) +{ + reg_grow(preg, 2); + + + preg->program[preg->p++] = op; + preg->program[preg->p++] = 0; + + + return preg->p - 2; +} + +static void regc(regex_t *preg, int b ) +{ + reg_grow(preg, 1); + preg->program[preg->p++] = b; +} + +static int reginsert(regex_t *preg, int op, int size, int opnd ) +{ + reg_grow(preg, size); + + + memmove(preg->program + opnd + size, preg->program + opnd, sizeof(int) * (preg->p - opnd)); + + memset(preg->program + opnd, 0, sizeof(int) * size); + + preg->program[opnd] = op; + + preg->p += size; + + return opnd + size; +} + +static void regtail(regex_t *preg, int p, int val) +{ + int scan; + int temp; + int offset; + + + scan = p; + for (;;) { + temp = regnext(preg, scan); + if (temp == 0) + break; + scan = temp; + } + + if (OP(preg, scan) == BACK) + offset = scan - val; + else + offset = val - scan; + + preg->program[scan + 1] = offset; +} + + +static void regoptail(regex_t *preg, int p, int val ) +{ + + if (p != 0 && OP(preg, p) == BRANCH) { + regtail(preg, OPERAND(p), val); + } +} + + +static int regtry(regex_t *preg, const char *string ); +static int regmatch(regex_t *preg, int prog); +static int regrepeat(regex_t *preg, int p, int max); + +int jim_regexec(regex_t *preg, const char *string, size_t nmatch, regmatch_t pmatch[], int eflags) +{ + const char *s; + int scan; + + + if (preg == NULL || preg->program == NULL || string == NULL) { + return REG_ERR_NULL_ARGUMENT; + } + + + if (*preg->program != REG_MAGIC) { + return REG_ERR_CORRUPTED; + } + +#ifdef DEBUG + fprintf(stderr, "regexec: %s\n", string); + regdump(preg); +#endif + + preg->eflags = eflags; + preg->pmatch = pmatch; + preg->nmatch = nmatch; + preg->start = string; + + + for (scan = OPERAND(1); scan != 0; scan += regopsize(preg, scan)) { + int op = OP(preg, scan); + if (op == END) + break; + if (op == REPX || op == REPXMIN) + preg->program[scan + 4] = 0; + } + + + if (preg->regmust != 0) { + s = string; + while ((s = str_find(s, preg->program[preg->regmust], preg->cflags & REG_ICASE)) != NULL) { + if (prefix_cmp(preg->program + preg->regmust, preg->regmlen, s, preg->cflags & REG_ICASE) >= 0) { + break; + } + s++; + } + if (s == NULL) + return REG_NOMATCH; + } + + + preg->regbol = string; + + + if (preg->reganch) { + if (eflags & REG_NOTBOL) { + + goto nextline; + } + while (1) { + if (regtry(preg, string)) { + return REG_NOERROR; + } + if (*string) { +nextline: + if (preg->cflags & REG_NEWLINE) { + + string = strchr(string, '\n'); + if (string) { + preg->regbol = ++string; + continue; + } + } + } + return REG_NOMATCH; + } + } + + + s = string; + if (preg->regstart != '\0') { + + while ((s = str_find(s, preg->regstart, preg->cflags & REG_ICASE)) != NULL) { + if (regtry(preg, s)) + return REG_NOERROR; + s++; + } + } + else + + while (1) { + if (regtry(preg, s)) + return REG_NOERROR; + if (*s == '\0') { + break; + } + else { + int c; + s += utf8_tounicode(s, &c); + } + } + + + return REG_NOMATCH; +} + + +static int regtry( regex_t *preg, const char *string ) +{ + int i; + + preg->reginput = string; + + for (i = 0; i < preg->nmatch; i++) { + preg->pmatch[i].rm_so = -1; + preg->pmatch[i].rm_eo = -1; + } + if (regmatch(preg, 1)) { + preg->pmatch[0].rm_so = string - preg->start; + preg->pmatch[0].rm_eo = preg->reginput - preg->start; + return(1); + } else + return(0); +} + +static int prefix_cmp(const int *prog, int proglen, const char *string, int nocase) +{ + const char *s = string; + while (proglen && *s) { + int ch; + int n = reg_utf8_tounicode_case(s, &ch, nocase); + if (ch != *prog) { + return -1; + } + prog++; + s += n; + proglen--; + } + if (proglen == 0) { + return s - string; + } + return -1; +} + +static int reg_range_find(const int *range, int c) +{ + while (*range) { + + if (c >= range[1] && c <= (range[0] + range[1] - 1)) { + return 1; + } + range += 2; + } + return 0; +} + +static const char *str_find(const char *string, int c, int nocase) +{ + if (nocase) { + + c = utf8_upper(c); + } + while (*string) { + int ch; + int n = reg_utf8_tounicode_case(string, &ch, nocase); + if (c == ch) { + return string; + } + string += n; + } + return NULL; +} + +static int reg_iseol(regex_t *preg, int ch) +{ + if (preg->cflags & REG_NEWLINE) { + return ch == '\0' || ch == '\n'; + } + else { + return ch == '\0'; + } +} + +static int regmatchsimplerepeat(regex_t *preg, int scan, int matchmin) +{ + int nextch = '\0'; + const char *save; + int no; + int c; + + int max = preg->program[scan + 2]; + int min = preg->program[scan + 3]; + int next = regnext(preg, scan); + + if (OP(preg, next) == EXACTLY) { + nextch = preg->program[OPERAND(next)]; + } + save = preg->reginput; + no = regrepeat(preg, scan + 5, max); + if (no < min) { + return 0; + } + if (matchmin) { + + max = no; + no = min; + } + + while (1) { + if (matchmin) { + if (no > max) { + break; + } + } + else { + if (no < min) { + break; + } + } + preg->reginput = save + utf8_index(save, no); + reg_utf8_tounicode_case(preg->reginput, &c, (preg->cflags & REG_ICASE)); + + if (reg_iseol(preg, nextch) || c == nextch) { + if (regmatch(preg, next)) { + return(1); + } + } + if (matchmin) { + + no++; + } + else { + + no--; + } + } + return(0); +} + +static int regmatchrepeat(regex_t *preg, int scan, int matchmin) +{ + int *scanpt = preg->program + scan; + + int max = scanpt[2]; + int min = scanpt[3]; + + + if (scanpt[4] < min) { + + scanpt[4]++; + if (regmatch(preg, scan + 5)) { + return 1; + } + scanpt[4]--; + return 0; + } + if (scanpt[4] > max) { + return 0; + } + + if (matchmin) { + + if (regmatch(preg, regnext(preg, scan))) { + return 1; + } + + scanpt[4]++; + if (regmatch(preg, scan + 5)) { + return 1; + } + scanpt[4]--; + return 0; + } + + if (scanpt[4] < max) { + scanpt[4]++; + if (regmatch(preg, scan + 5)) { + return 1; + } + scanpt[4]--; + } + + return regmatch(preg, regnext(preg, scan)); +} + + +static int regmatch(regex_t *preg, int prog) +{ + int scan; + int next; + const char *save; + + scan = prog; + +#ifdef DEBUG + if (scan != 0 && regnarrate) + fprintf(stderr, "%s(\n", regprop(scan)); +#endif + while (scan != 0) { + int n; + int c; +#ifdef DEBUG + if (regnarrate) { + fprintf(stderr, "%3d: %s...\n", scan, regprop(OP(preg, scan))); + } +#endif + next = regnext(preg, scan); + n = reg_utf8_tounicode_case(preg->reginput, &c, (preg->cflags & REG_ICASE)); + + switch (OP(preg, scan)) { + case BOLX: + if ((preg->eflags & REG_NOTBOL)) { + return(0); + } + + case BOL: + if (preg->reginput != preg->regbol) { + return(0); + } + break; + case EOLX: + if (c != 0) { + + return 0; + } + break; + case EOL: + if (!reg_iseol(preg, c)) { + return(0); + } + break; + case WORDA: + + if ((!isalnum(UCHAR(c))) && c != '_') + return(0); + + if (preg->reginput > preg->regbol && + (isalnum(UCHAR(preg->reginput[-1])) || preg->reginput[-1] == '_')) + return(0); + break; + case WORDZ: + + if (preg->reginput > preg->regbol) { + + if (reg_iseol(preg, c) || !(isalnum(UCHAR(c)) || c == '_')) { + c = preg->reginput[-1]; + + if (isalnum(UCHAR(c)) || c == '_') { + break; + } + } + } + + return(0); + + case ANY: + if (reg_iseol(preg, c)) + return 0; + preg->reginput += n; + break; + case EXACTLY: { + int opnd; + int len; + int slen; + + opnd = OPERAND(scan); + len = str_int_len(preg->program + opnd); + + slen = prefix_cmp(preg->program + opnd, len, preg->reginput, preg->cflags & REG_ICASE); + if (slen < 0) { + return(0); + } + preg->reginput += slen; + } + break; + case ANYOF: + if (reg_iseol(preg, c) || reg_range_find(preg->program + OPERAND(scan), c) == 0) { + return(0); + } + preg->reginput += n; + break; + case ANYBUT: + if (reg_iseol(preg, c) || reg_range_find(preg->program + OPERAND(scan), c) != 0) { + return(0); + } + preg->reginput += n; + break; + case NOTHING: + break; + case BACK: + break; + case BRANCH: + if (OP(preg, next) != BRANCH) + next = OPERAND(scan); + else { + do { + save = preg->reginput; + if (regmatch(preg, OPERAND(scan))) { + return(1); + } + preg->reginput = save; + scan = regnext(preg, scan); + } while (scan != 0 && OP(preg, scan) == BRANCH); + return(0); + + } + break; + case REP: + case REPMIN: + return regmatchsimplerepeat(preg, scan, OP(preg, scan) == REPMIN); + + case REPX: + case REPXMIN: + return regmatchrepeat(preg, scan, OP(preg, scan) == REPXMIN); + + case END: + return 1; + + case OPENNC: + case CLOSENC: + return regmatch(preg, next); + + default: + if (OP(preg, scan) >= OPEN+1 && OP(preg, scan) < CLOSE_END) { + save = preg->reginput; + if (regmatch(preg, next)) { + if (OP(preg, scan) < CLOSE) { + int no = OP(preg, scan) - OPEN; + if (no < preg->nmatch && preg->pmatch[no].rm_so == -1) { + preg->pmatch[no].rm_so = save - preg->start; + } + } + else { + int no = OP(preg, scan) - CLOSE; + if (no < preg->nmatch && preg->pmatch[no].rm_eo == -1) { + preg->pmatch[no].rm_eo = save - preg->start; + } + } + return(1); + } + + preg->reginput = save; + return(0); + } + return REG_ERR_INTERNAL; + } + + scan = next; + } + + return REG_ERR_INTERNAL; +} + +static int regrepeat(regex_t *preg, int p, int max) +{ + int count = 0; + const char *scan; + int opnd; + int ch; + int n; + + scan = preg->reginput; + opnd = OPERAND(p); + switch (OP(preg, p)) { + case ANY: + while (!reg_iseol(preg, *scan) && count < max) { + count++; + scan += utf8_charlen(*scan); + } + break; + case EXACTLY: + while (count < max) { + n = reg_utf8_tounicode_case(scan, &ch, preg->cflags & REG_ICASE); + if (preg->program[opnd] != ch) { + break; + } + count++; + scan += n; + } + break; + case ANYOF: + while (count < max) { + n = reg_utf8_tounicode_case(scan, &ch, preg->cflags & REG_ICASE); + if (reg_iseol(preg, ch) || reg_range_find(preg->program + opnd, ch) == 0) { + break; + } + count++; + scan += n; + } + break; + case ANYBUT: + while (count < max) { + n = reg_utf8_tounicode_case(scan, &ch, preg->cflags & REG_ICASE); + if (reg_iseol(preg, ch) || reg_range_find(preg->program + opnd, ch) != 0) { + break; + } + count++; + scan += n; + } + break; + default: + preg->err = REG_ERR_INTERNAL; + count = 0; + break; + } + preg->reginput = scan; + + return(count); +} + +static int regnext(regex_t *preg, int p ) +{ + int offset; + + offset = NEXT(preg, p); + + if (offset == 0) + return 0; + + if (OP(preg, p) == BACK) + return(p-offset); + else + return(p+offset); +} + +static int regopsize(regex_t *preg, int p ) +{ + + switch (OP(preg, p)) { + case REP: + case REPMIN: + case REPX: + case REPXMIN: + return 5; + + case ANYOF: + case ANYBUT: + case EXACTLY: { + int s = p + 2; + while (preg->program[s++]) { + } + return s - p; + } + } + return 2; +} + + +size_t jim_regerror(int errcode, const regex_t *preg, char *errbuf, size_t errbuf_size) +{ + static const char *error_strings[] = { + "success", + "no match", + "bad pattern", + "null argument", + "unknown error", + "too big", + "out of memory", + "too many ()", + "parentheses () not balanced", + "braces {} not balanced", + "invalid repetition count(s)", + "extra characters", + "*+ of empty atom", + "nested count", + "internal error", + "count follows nothing", + "invalid escape \\ sequence", + "corrupted program", + "contains null char", + "brackets [] not balanced", + }; + const char *err; + + if (errcode < 0 || errcode >= REG_ERR_NUM) { + err = "Bad error code"; + } + else { + err = error_strings[errcode]; + } + + return snprintf(errbuf, errbuf_size, "%s", err); +} + +void jim_regfree(regex_t *preg) +{ + free(preg->program); +} + +#endif +#include + +void Jim_SetResultErrno(Jim_Interp *interp, const char *msg) +{ + Jim_SetResultFormatted(interp, "%s: %s", msg, strerror(Jim_Errno())); +} + +#if defined(_WIN32) || defined(WIN32) +#include + +int Jim_Errno(void) +{ + switch (GetLastError()) { + case ERROR_FILE_NOT_FOUND: return ENOENT; + case ERROR_PATH_NOT_FOUND: return ENOENT; + case ERROR_TOO_MANY_OPEN_FILES: return EMFILE; + case ERROR_ACCESS_DENIED: return EACCES; + case ERROR_INVALID_HANDLE: return EBADF; + case ERROR_BAD_ENVIRONMENT: return E2BIG; + case ERROR_BAD_FORMAT: return ENOEXEC; + case ERROR_INVALID_ACCESS: return EACCES; + case ERROR_INVALID_DRIVE: return ENOENT; + case ERROR_CURRENT_DIRECTORY: return EACCES; + case ERROR_NOT_SAME_DEVICE: return EXDEV; + case ERROR_NO_MORE_FILES: return ENOENT; + case ERROR_WRITE_PROTECT: return EROFS; + case ERROR_BAD_UNIT: return ENXIO; + case ERROR_NOT_READY: return EBUSY; + case ERROR_BAD_COMMAND: return EIO; + case ERROR_CRC: return EIO; + case ERROR_BAD_LENGTH: return EIO; + case ERROR_SEEK: return EIO; + case ERROR_WRITE_FAULT: return EIO; + case ERROR_READ_FAULT: return EIO; + case ERROR_GEN_FAILURE: return EIO; + case ERROR_SHARING_VIOLATION: return EACCES; + case ERROR_LOCK_VIOLATION: return EACCES; + case ERROR_SHARING_BUFFER_EXCEEDED: return ENFILE; + case ERROR_HANDLE_DISK_FULL: return ENOSPC; + case ERROR_NOT_SUPPORTED: return ENODEV; + case ERROR_REM_NOT_LIST: return EBUSY; + case ERROR_DUP_NAME: return EEXIST; + case ERROR_BAD_NETPATH: return ENOENT; + case ERROR_NETWORK_BUSY: return EBUSY; + case ERROR_DEV_NOT_EXIST: return ENODEV; + case ERROR_TOO_MANY_CMDS: return EAGAIN; + case ERROR_ADAP_HDW_ERR: return EIO; + case ERROR_BAD_NET_RESP: return EIO; + case ERROR_UNEXP_NET_ERR: return EIO; + case ERROR_NETNAME_DELETED: return ENOENT; + case ERROR_NETWORK_ACCESS_DENIED: return EACCES; + case ERROR_BAD_DEV_TYPE: return ENODEV; + case ERROR_BAD_NET_NAME: return ENOENT; + case ERROR_TOO_MANY_NAMES: return ENFILE; + case ERROR_TOO_MANY_SESS: return EIO; + case ERROR_SHARING_PAUSED: return EAGAIN; + case ERROR_REDIR_PAUSED: return EAGAIN; + case ERROR_FILE_EXISTS: return EEXIST; + case ERROR_CANNOT_MAKE: return ENOSPC; + case ERROR_OUT_OF_STRUCTURES: return ENFILE; + case ERROR_ALREADY_ASSIGNED: return EEXIST; + case ERROR_INVALID_PASSWORD: return EPERM; + case ERROR_NET_WRITE_FAULT: return EIO; + case ERROR_NO_PROC_SLOTS: return EAGAIN; + case ERROR_DISK_CHANGE: return EXDEV; + case ERROR_BROKEN_PIPE: return EPIPE; + case ERROR_OPEN_FAILED: return ENOENT; + case ERROR_DISK_FULL: return ENOSPC; + case ERROR_NO_MORE_SEARCH_HANDLES: return EMFILE; + case ERROR_INVALID_TARGET_HANDLE: return EBADF; + case ERROR_INVALID_NAME: return ENOENT; + case ERROR_PROC_NOT_FOUND: return ESRCH; + case ERROR_WAIT_NO_CHILDREN: return ECHILD; + case ERROR_CHILD_NOT_COMPLETE: return ECHILD; + case ERROR_DIRECT_ACCESS_HANDLE: return EBADF; + case ERROR_SEEK_ON_DEVICE: return ESPIPE; + case ERROR_BUSY_DRIVE: return EAGAIN; + case ERROR_DIR_NOT_EMPTY: return EEXIST; + case ERROR_NOT_LOCKED: return EACCES; + case ERROR_BAD_PATHNAME: return ENOENT; + case ERROR_LOCK_FAILED: return EACCES; + case ERROR_ALREADY_EXISTS: return EEXIST; + case ERROR_FILENAME_EXCED_RANGE: return ENAMETOOLONG; + case ERROR_BAD_PIPE: return EPIPE; + case ERROR_PIPE_BUSY: return EAGAIN; + case ERROR_PIPE_NOT_CONNECTED: return EPIPE; + case ERROR_DIRECTORY: return ENOTDIR; + } + return EINVAL; +} + +long JimProcessPid(phandle_t pid) +{ + if (pid == INVALID_HANDLE_VALUE) { + return -1; + } + return GetProcessId(pid); +} + +phandle_t JimWaitPid(long pid, int *status, int nohang) +{ + if (pid > 0) { + HANDLE h = OpenProcess(PROCESS_QUERY_INFORMATION | SYNCHRONIZE, FALSE, pid); + if (h) { + long pid = waitpid(h, status, nohang); + CloseHandle(h); + if (pid > 0) { + return h; + } + } + } + return JIM_BAD_PHANDLE; +} + +long waitpid(phandle_t phandle, int *status, int nohang) +{ + long pid; + DWORD ret = WaitForSingleObject(phandle, nohang ? 0 : INFINITE); + if (ret == WAIT_TIMEOUT || ret == WAIT_FAILED) { + + return -1; + } + GetExitCodeProcess(phandle, &ret); + *status = ret; + + pid = GetProcessId(phandle); + CloseHandle(phandle); + return pid; +} + +int Jim_MakeTempFile(Jim_Interp *interp, const char *filename_template, int unlink_file) +{ + char name[MAX_PATH]; + HANDLE handle; + + if (!GetTempPath(MAX_PATH, name) || !GetTempFileName(name, filename_template ? filename_template : "JIM", 0, name)) { + return -1; + } + + handle = CreateFile(name, GENERIC_READ | GENERIC_WRITE, 0, NULL, + CREATE_ALWAYS, FILE_ATTRIBUTE_TEMPORARY | (unlink_file ? FILE_FLAG_DELETE_ON_CLOSE : 0), + NULL); + + if (handle == INVALID_HANDLE_VALUE) { + goto error; + } + + Jim_SetResultString(interp, name, -1); + return _open_osfhandle((intptr_t)handle, _O_RDWR | _O_TEXT); + + error: + Jim_SetResultErrno(interp, name); + DeleteFile(name); + return -1; +} + +int Jim_OpenForWrite(const char *filename, int append) +{ + if (strcmp(filename, "/dev/null") == 0) { + filename = "nul:"; + } + int fd = _open(filename, _O_WRONLY | _O_CREAT | _O_TEXT | (append ? _O_APPEND : _O_TRUNC), _S_IREAD | _S_IWRITE); + if (fd >= 0 && append) { + + _lseek(fd, 0L, SEEK_END); + } + return fd; +} + +int Jim_OpenForRead(const char *filename) +{ + if (strcmp(filename, "/dev/null") == 0) { + filename = "nul:"; + } + return _open(filename, _O_RDONLY | _O_TEXT, 0); +} + +#elif defined(HAVE_UNISTD_H) + + + +int Jim_MakeTempFile(Jim_Interp *interp, const char *filename_template, int unlink_file) +{ + int fd; + mode_t mask; + Jim_Obj *filenameObj; + + if (filename_template == NULL) { + const char *tmpdir = getenv("TMPDIR"); + if (tmpdir == NULL || *tmpdir == '\0' || access(tmpdir, W_OK) != 0) { + tmpdir = "/tmp/"; + } + filenameObj = Jim_NewStringObj(interp, tmpdir, -1); + if (tmpdir[0] && tmpdir[strlen(tmpdir) - 1] != '/') { + Jim_AppendString(interp, filenameObj, "/", 1); + } + Jim_AppendString(interp, filenameObj, "tcl.tmp.XXXXXX", -1); + } + else { + filenameObj = Jim_NewStringObj(interp, filename_template, -1); + } + + +#ifdef HAVE_UMASK + mask = umask(S_IXUSR | S_IRWXG | S_IRWXO); +#endif +#ifdef HAVE_MKSTEMP + fd = mkstemp(filenameObj->bytes); +#else + if (mktemp(filenameObj->bytes) == NULL) { + fd = -1; + } + else { + fd = open(filenameObj->bytes, O_RDWR | O_CREAT | O_TRUNC); + } +#endif +#ifdef HAVE_UMASK + umask(mask); +#endif + if (fd < 0) { + Jim_SetResultErrno(interp, Jim_String(filenameObj)); + Jim_FreeNewObj(interp, filenameObj); + return -1; + } + if (unlink_file) { + remove(Jim_String(filenameObj)); + } + + Jim_SetResult(interp, filenameObj); + return fd; +} + +int Jim_OpenForWrite(const char *filename, int append) +{ + return open(filename, O_WRONLY | O_CREAT | (append ? O_APPEND : O_TRUNC), 0666); +} + +int Jim_OpenForRead(const char *filename) +{ + return open(filename, O_RDONLY, 0); +} + +#endif + +#if defined(_WIN32) || defined(WIN32) +#ifndef STRICT +#define STRICT +#endif +#define WIN32_LEAN_AND_MEAN +#include + +#if defined(HAVE_DLOPEN_COMPAT) +void *dlopen(const char *path, int mode) +{ + JIM_NOTUSED(mode); + + return (void *)LoadLibraryA(path); +} + +int dlclose(void *handle) +{ + FreeLibrary((HANDLE)handle); + return 0; +} + +void *dlsym(void *handle, const char *symbol) +{ + return GetProcAddress((HMODULE)handle, symbol); +} + +char *dlerror(void) +{ + static char msg[121]; + FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM, NULL, GetLastError(), + LANG_NEUTRAL, msg, sizeof(msg) - 1, NULL); + return msg; +} +#endif + +#ifdef _MSC_VER + +#include + + +int gettimeofday(struct timeval *tv, void *unused) +{ + struct _timeb tb; + + _ftime(&tb); + tv->tv_sec = tb.time; + tv->tv_usec = tb.millitm * 1000; + + return 0; +} + + +DIR *opendir(const char *name) +{ + DIR *dir = 0; + + if (name && name[0]) { + size_t base_length = strlen(name); + const char *all = + strchr("/\\", name[base_length - 1]) ? "*" : "/*"; + + if ((dir = (DIR *) Jim_Alloc(sizeof *dir)) != 0 && + (dir->name = (char *)Jim_Alloc(base_length + strlen(all) + 1)) != 0) { + strcat(strcpy(dir->name, name), all); + + if ((dir->handle = (long)_findfirst(dir->name, &dir->info)) != -1) + dir->result.d_name = 0; + else { + Jim_Free(dir->name); + Jim_Free(dir); + dir = 0; + } + } + else { + Jim_Free(dir); + dir = 0; + errno = ENOMEM; + } + } + else { + errno = EINVAL; + } + return dir; +} + +int closedir(DIR * dir) +{ + int result = -1; + + if (dir) { + if (dir->handle != -1) + result = _findclose(dir->handle); + Jim_Free(dir->name); + Jim_Free(dir); + } + if (result == -1) + errno = EBADF; + return result; +} + +struct dirent *readdir(DIR * dir) +{ + struct dirent *result = 0; + + if (dir && dir->handle != -1) { + if (!dir->result.d_name || _findnext(dir->handle, &dir->info) != -1) { + result = &dir->result; + result->d_name = dir->info.name; + } + } + else { + errno = EBADF; + } + return result; +} +#endif +#endif +#include +#include + + + + + + +#ifndef SIGPIPE +#define SIGPIPE 13 +#endif +#ifndef SIGINT +#define SIGINT 2 +#endif + +const char *Jim_SignalId(int sig) +{ + static char buf[10]; + switch (sig) { + case SIGINT: return "SIGINT"; + case SIGPIPE: return "SIGPIPE"; + + } + snprintf(buf, sizeof(buf), "%d", sig); + return buf; +} +#ifndef JIM_BOOTSTRAP_LIB_ONLY +#include +#include +#include + + +#ifdef USE_LINENOISE +#ifdef HAVE_UNISTD_H + #include +#endif +#ifdef HAVE_SYS_STAT_H + #include +#endif +#include "linenoise.h" +#else +#define MAX_LINE_LEN 512 +#endif + +#ifdef USE_LINENOISE +struct JimCompletionInfo { + Jim_Interp *interp; + Jim_Obj *completion_command; + Jim_Obj *hints_command; + +}; + +static struct JimCompletionInfo *JimGetCompletionInfo(Jim_Interp *interp); +static void JimCompletionCallback(const char *prefix, linenoiseCompletions *comp, void *userdata); +static const char completion_callback_assoc_key[] = "interactive-completion"; +static char *JimHintsCallback(const char *prefix, int *color, int *bold, void *userdata); +static void JimFreeHintsCallback(void *hint, void *userdata); +#endif + +char *Jim_HistoryGetline(Jim_Interp *interp, const char *prompt) +{ +#ifdef USE_LINENOISE + struct JimCompletionInfo *compinfo = JimGetCompletionInfo(interp); + char *result; + Jim_Obj *objPtr; + long mlmode = 0; + if (compinfo->completion_command) { + linenoiseSetCompletionCallback(JimCompletionCallback, compinfo); + } + if (compinfo->hints_command) { + linenoiseSetHintsCallback(JimHintsCallback, compinfo); + linenoiseSetFreeHintsCallback(JimFreeHintsCallback); + } + objPtr = Jim_GetVariableStr(interp, "history::multiline", JIM_NONE); + if (objPtr && Jim_GetLong(interp, objPtr, &mlmode) == JIM_NONE) { + linenoiseSetMultiLine(mlmode); + } + + result = linenoise(prompt); + + linenoiseSetCompletionCallback(NULL, NULL); + linenoiseSetHintsCallback(NULL, NULL); + linenoiseSetFreeHintsCallback(NULL); + return result; +#else + int len; + char *line = Jim_Alloc(MAX_LINE_LEN); + + fputs(prompt, stdout); + fflush(stdout); + + if (fgets(line, MAX_LINE_LEN, stdin) == NULL) { + Jim_Free(line); + return NULL; + } + len = strlen(line); + if (len && line[len - 1] == '\n') { + line[len - 1] = '\0'; + } + return line; +#endif +} + +void Jim_HistoryLoad(const char *filename) +{ +#ifdef USE_LINENOISE + linenoiseHistoryLoad(filename); +#endif +} + +void Jim_HistoryAdd(const char *line) +{ +#ifdef USE_LINENOISE + linenoiseHistoryAdd(line); +#endif +} + +void Jim_HistorySave(const char *filename) +{ +#ifdef USE_LINENOISE +#ifdef HAVE_UMASK + mode_t mask; + + mask = umask(S_IXUSR | S_IRWXG | S_IRWXO); +#endif + linenoiseHistorySave(filename); +#ifdef HAVE_UMASK + umask(mask); +#endif +#endif +} + +void Jim_HistoryShow(void) +{ +#ifdef USE_LINENOISE + + int i; + int len; + char **history = linenoiseHistory(&len); + for (i = 0; i < len; i++) { + printf("%4d %s\n", i + 1, history[i]); + } +#endif +} + +void Jim_HistorySetMaxLen(int length) +{ +#ifdef USE_LINENOISE + linenoiseHistorySetMaxLen(length); +#endif +} + +int Jim_HistoryGetMaxLen(void) +{ +#ifdef USE_LINENOISE + return linenoiseHistoryGetMaxLen(); +#endif + return 0; +} + +#ifdef USE_LINENOISE +static void JimCompletionCallback(const char *prefix, linenoiseCompletions *comp, void *userdata) +{ + struct JimCompletionInfo *info = (struct JimCompletionInfo *)userdata; + Jim_Obj *objv[2]; + int ret; + + objv[0] = info->completion_command; + objv[1] = Jim_NewStringObj(info->interp, prefix, -1); + + ret = Jim_EvalObjVector(info->interp, 2, objv); + + + if (ret == JIM_OK) { + int i; + Jim_Obj *listObj = Jim_GetResult(info->interp); + int len = Jim_ListLength(info->interp, listObj); + for (i = 0; i < len; i++) { + linenoiseAddCompletion(comp, Jim_String(Jim_ListGetIndex(info->interp, listObj, i))); + } + } +} + +static char *JimHintsCallback(const char *prefix, int *color, int *bold, void *userdata) +{ + struct JimCompletionInfo *info = (struct JimCompletionInfo *)userdata; + Jim_Obj *objv[2]; + int ret; + char *result = NULL; + + objv[0] = info->hints_command; + objv[1] = Jim_NewStringObj(info->interp, prefix, -1); + + ret = Jim_EvalObjVector(info->interp, 2, objv); + + + if (ret == JIM_OK) { + Jim_Obj *listObj = Jim_GetResult(info->interp); + Jim_IncrRefCount(listObj); + + int len = Jim_ListLength(info->interp, listObj); + if (len >= 1) { + long x; + result = Jim_StrDup(Jim_String(Jim_ListGetIndex(info->interp, listObj, 0))); + if (len >= 2 && Jim_GetLong(info->interp, Jim_ListGetIndex(info->interp, listObj, 1), &x) == JIM_OK) { + *color = x; + } + if (len >= 3 && Jim_GetLong(info->interp, Jim_ListGetIndex(info->interp, listObj, 2), &x) == JIM_OK) { + *bold = x; + } + } + Jim_DecrRefCount(info->interp, listObj); + } + return result; +} + +static void JimFreeHintsCallback(void *hint, void *userdata) +{ + Jim_Free(hint); +} + +static void JimHistoryFreeCompletion(Jim_Interp *interp, void *data) +{ + struct JimCompletionInfo *compinfo = data; + + if (compinfo->completion_command) { + Jim_DecrRefCount(interp, compinfo->completion_command); + } + if (compinfo->hints_command) { + Jim_DecrRefCount(interp, compinfo->hints_command); + } + + Jim_Free(compinfo); +} + +static struct JimCompletionInfo *JimGetCompletionInfo(Jim_Interp *interp) +{ + struct JimCompletionInfo *compinfo = Jim_GetAssocData(interp, completion_callback_assoc_key); + if (compinfo == NULL) { + compinfo = Jim_Alloc(sizeof(*compinfo)); + compinfo->interp = interp; + compinfo->completion_command = NULL; + compinfo->hints_command = NULL; + Jim_SetAssocData(interp, completion_callback_assoc_key, JimHistoryFreeCompletion, compinfo); + } + return compinfo; +} +#endif + +void Jim_HistorySetCompletion(Jim_Interp *interp, Jim_Obj *completionCommandObj) +{ +#ifdef USE_LINENOISE + struct JimCompletionInfo *compinfo = JimGetCompletionInfo(interp); + + if (completionCommandObj) { + + Jim_IncrRefCount(completionCommandObj); + } + if (compinfo->completion_command) { + Jim_DecrRefCount(interp, compinfo->completion_command); + } + compinfo->completion_command = completionCommandObj; +#endif +} + +void Jim_HistorySetHints(Jim_Interp *interp, Jim_Obj *hintsCommandObj) +{ +#ifdef USE_LINENOISE + struct JimCompletionInfo *compinfo = JimGetCompletionInfo(interp); + + if (hintsCommandObj) { + + Jim_IncrRefCount(hintsCommandObj); + } + if (compinfo->hints_command) { + Jim_DecrRefCount(interp, compinfo->hints_command); + } + compinfo->hints_command = hintsCommandObj; +#endif +} + +int Jim_InteractivePrompt(Jim_Interp *interp) +{ + int retcode = JIM_OK; + char *history_file = NULL; +#ifdef USE_LINENOISE + const char *home; + + home = getenv("HOME"); + if (home && isatty(STDIN_FILENO)) { + int history_len = strlen(home) + sizeof("/.jim_history"); + history_file = Jim_Alloc(history_len); + snprintf(history_file, history_len, "%s/.jim_history", home); + Jim_HistoryLoad(history_file); + } + + Jim_HistorySetCompletion(interp, Jim_NewStringObj(interp, "tcl::autocomplete", -1)); + Jim_HistorySetHints(interp, Jim_NewStringObj(interp, "tcl::stdhint", -1)); +#endif + + printf("Welcome to Jim version %d.%d\n", + JIM_VERSION / 100, JIM_VERSION % 100); + Jim_SetVariableStrWithStr(interp, JIM_INTERACTIVE, "1"); + + while (1) { + Jim_Obj *scriptObjPtr; + const char *result; + int reslen; + char prompt[20]; + + if (retcode != JIM_OK) { + const char *retcodestr = Jim_ReturnCode(retcode); + + if (*retcodestr == '?') { + snprintf(prompt, sizeof(prompt) - 3, "[%d] . ", retcode); + } + else { + snprintf(prompt, sizeof(prompt) - 3, "[%s] . ", retcodestr); + } + } + else { + strcpy(prompt, ". "); + } + + scriptObjPtr = Jim_NewStringObj(interp, "", 0); + Jim_IncrRefCount(scriptObjPtr); + while (1) { + char state; + char *line; + + line = Jim_HistoryGetline(interp, prompt); + if (line == NULL) { + if (errno == EINTR) { + continue; + } + Jim_DecrRefCount(interp, scriptObjPtr); + retcode = JIM_OK; + goto out; + } + if (Jim_Length(scriptObjPtr) != 0) { + + Jim_AppendString(interp, scriptObjPtr, "\n", 1); + } + Jim_AppendString(interp, scriptObjPtr, line, -1); + Jim_Free(line); + if (Jim_ScriptIsComplete(interp, scriptObjPtr, &state)) + break; + + snprintf(prompt, sizeof(prompt), "%c> ", state); + } +#ifdef USE_LINENOISE + if (strcmp(Jim_String(scriptObjPtr), "h") == 0) { + + Jim_HistoryShow(); + Jim_DecrRefCount(interp, scriptObjPtr); + continue; + } + + Jim_HistoryAdd(Jim_String(scriptObjPtr)); + if (history_file) { + Jim_HistorySave(history_file); + } +#endif + retcode = Jim_EvalObj(interp, scriptObjPtr); + Jim_DecrRefCount(interp, scriptObjPtr); + + if (retcode == JIM_EXIT) { + break; + } + if (retcode == JIM_ERR) { + Jim_MakeErrorMessage(interp); + } + result = Jim_GetString(Jim_GetResult(interp), &reslen); + if (reslen) { + if (fwrite(result, reslen, 1, stdout) == 0) { + + } + putchar('\n'); + } + } + out: + Jim_Free(history_file); + + return retcode; +} + +#include +#include +#include + + + +extern int Jim_initjimshInit(Jim_Interp *interp); + +static void JimSetArgv(Jim_Interp *interp, int argc, char *const argv[]) +{ + int n; + Jim_Obj *listObj = Jim_NewListObj(interp, NULL, 0); + + + for (n = 0; n < argc; n++) { + Jim_Obj *obj = Jim_NewStringObj(interp, argv[n], -1); + + Jim_ListAppendElement(interp, listObj, obj); + } + + Jim_SetVariableStr(interp, "argv", listObj); + Jim_SetVariableStr(interp, "argc", Jim_NewIntObj(interp, argc)); +} + +static void JimPrintErrorMessage(Jim_Interp *interp) +{ + Jim_MakeErrorMessage(interp); + fprintf(stderr, "%s\n", Jim_String(Jim_GetResult(interp))); +} + +void usage(const char* executable_name) +{ + printf("jimsh version %d.%d\n", JIM_VERSION / 100, JIM_VERSION % 100); + printf("Usage: %s\n", executable_name); + printf("or : %s [options] [filename]\n", executable_name); + printf("\n"); + printf("Without options: Interactive mode\n"); + printf("\n"); + printf("Options:\n"); + printf(" --version : prints the version string\n"); + printf(" --help : prints this text\n"); + printf(" -e CMD : executes command CMD\n"); + printf(" NOTE: all subsequent options will be passed as arguments to the command\n"); + printf(" [filename|-] : executes the script contained in the named file, or from stdin if \"-\"\n"); + printf(" NOTE: all subsequent options will be passed to the script\n\n"); +} + +int main(int argc, char *const argv[]) +{ + int retcode; + Jim_Interp *interp; + char *const orig_argv0 = argv[0]; + + + if (argc > 1 && strcmp(argv[1], "--version") == 0) { + printf("%d.%d\n", JIM_VERSION / 100, JIM_VERSION % 100); + return 0; + } + else if (argc > 1 && strcmp(argv[1], "--help") == 0) { + usage(argv[0]); + return 0; + } + + + interp = Jim_CreateInterp(); + Jim_RegisterCoreCommands(interp); + + + if (Jim_InitStaticExtensions(interp) != JIM_OK) { + JimPrintErrorMessage(interp); + } + + Jim_SetVariableStrWithStr(interp, "jim::argv0", orig_argv0); + Jim_SetVariableStrWithStr(interp, JIM_INTERACTIVE, argc == 1 ? "1" : "0"); +#ifdef USE_LINENOISE + Jim_SetVariableStrWithStr(interp, "jim::lineedit", "1"); +#else + Jim_SetVariableStrWithStr(interp, "jim::lineedit", "0"); +#endif + retcode = Jim_initjimshInit(interp); + + if (argc == 1) { + + if (retcode == JIM_ERR) { + JimPrintErrorMessage(interp); + } + if (retcode != JIM_EXIT) { + JimSetArgv(interp, 0, NULL); + if (!isatty(STDIN_FILENO)) { + + goto eval_stdin; + } + retcode = Jim_InteractivePrompt(interp); + } + } + else { + + if (argc > 2 && strcmp(argv[1], "-e") == 0) { + + JimSetArgv(interp, argc - 3, argv + 3); + retcode = Jim_Eval(interp, argv[2]); + if (retcode != JIM_ERR) { + int len; + const char *msg = Jim_GetString(Jim_GetResult(interp), &len); + if (fwrite(msg, len, 1, stdout) == 0) { + + } + putchar('\n'); + } + } + else { + Jim_SetVariableStr(interp, "argv0", Jim_NewStringObj(interp, argv[1], -1)); + JimSetArgv(interp, argc - 2, argv + 2); + if (strcmp(argv[1], "-") == 0) { +eval_stdin: + retcode = Jim_Eval(interp, "eval [info source [stdin read] stdin 1]"); + } else { + retcode = Jim_EvalFile(interp, argv[1]); + } + } + if (retcode == JIM_ERR) { + JimPrintErrorMessage(interp); + } + } + if (retcode == JIM_EXIT) { + retcode = Jim_GetExitCode(interp); + } + else if (retcode == JIM_ERR) { + retcode = 1; + } + else { + retcode = 0; + } + Jim_FreeInterp(interp); + return retcode; +} +#endif diff --git a/autosetup/pkg-config.tcl b/autosetup/pkg-config.tcl new file mode 100644 index 0000000000..9ce7111f55 --- /dev/null +++ b/autosetup/pkg-config.tcl @@ -0,0 +1,168 @@ +# Copyright (c) 2016 WorkWare Systems http://www.workware.net.au/ +# All rights reserved + +# @synopsis: +# +# The 'pkg-config' module allows package information to be found via 'pkg-config'. +# +# If not cross-compiling, the package path should be determined automatically +# by 'pkg-config'. +# If cross-compiling, the default package path is the compiler sysroot. +# If the C compiler doesn't support '-print-sysroot', the path can be supplied +# by the '--sysroot' option or by defining 'SYSROOT'. +# +# 'PKG_CONFIG' may be set to use an alternative to 'pkg-config'. + +use cc + +options { + sysroot:dir => "Override compiler sysroot for pkg-config search path" +} + +# @pkg-config-init ?required? +# +# Initialises the 'pkg-config' system. Unless '$required' is set to 0, +# it is a fatal error if a usable 'pkg-config' is not found . +# +# This command will normally be called automatically as required, +# but it may be invoked explicitly if lack of 'pkg-config' is acceptable. +# +# Returns 1 if ok, or 0 if 'pkg-config' not found/usable (only if '$required' is 0). +# +proc pkg-config-init {{required 1}} { + if {[is-defined HAVE_PKG_CONFIG]} { + return [get-define HAVE_PKG_CONFIG] + } + set found 0 + + define PKG_CONFIG [get-env PKG_CONFIG pkg-config] + msg-checking "Checking for pkg-config..." + + if {[catch {exec [get-define PKG_CONFIG] --version} version]} { + msg-result "[get-define PKG_CONFIG] (not found)" + if {$required} { + user-error "No usable pkg-config" + } + } else { + msg-result $version + define PKG_CONFIG_VERSION $version + + set found 1 + + if {[opt-str sysroot o]} { + define SYSROOT [file-normalize $o] + msg-result "Using specified sysroot [get-define SYSROOT]" + } elseif {[get-define build] ne [get-define host]} { + if {[catch {exec-with-stderr {*}[get-define CC] -print-sysroot} result errinfo] == 0} { + # Use the compiler sysroot, if there is one + define SYSROOT $result + msg-result "Found compiler sysroot $result" + } else { + configlog "[get-define CC] -print-sysroot: $result" + set msg "pkg-config: Cross compiling, but no compiler sysroot and no --sysroot supplied" + if {$required} { + user-error $msg + } else { + msg-result $msg + } + set found 0 + } + } + if {[is-defined SYSROOT]} { + set sysroot [get-define SYSROOT] + + # XXX: It's possible that these should be set only when invoking pkg-config + global env + set env(PKG_CONFIG_DIR) "" + # Supposedly setting PKG_CONFIG_LIBDIR means that PKG_CONFIG_PATH is ignored, + # but it doesn't seem to work that way in practice + set env(PKG_CONFIG_PATH) "" + # Do we need to try /usr/local as well or instead? + set env(PKG_CONFIG_LIBDIR) $sysroot/usr/lib/pkgconfig:$sysroot/usr/share/pkgconfig + set env(PKG_CONFIG_SYSROOT_DIR) $sysroot + } + } + define HAVE_PKG_CONFIG $found + return $found +} + +# @pkg-config module ?requirements? +# +# Use 'pkg-config' to find the given module meeting the given requirements. +# e.g. +# +## pkg-config pango >= 1.37.0 +# +# If found, returns 1 and sets 'HAVE_PKG_PANGO' to 1 along with: +# +## PKG_PANGO_VERSION to the found version +## PKG_PANGO_LIBS to the required libs (--libs-only-l) +## PKG_PANGO_LDFLAGS to the required linker flags (--libs-only-L) +## PKG_PANGO_CFLAGS to the required compiler flags (--cflags) +# +# If not found, returns 0. +# +proc pkg-config {module args} { + set ok [pkg-config-init] + + msg-checking "Checking for $module $args..." + + if {!$ok} { + msg-result "no pkg-config" + return 0 + } + + set pkgconfig [get-define PKG_CONFIG] + + set ret [catch {exec $pkgconfig --modversion "$module $args"} version] + configlog "$pkgconfig --modversion $module $args: $version" + if {$ret} { + msg-result "not found" + return 0 + } + # Sometimes --modversion succeeds but because of dependencies it isn't usable + # This seems to show up with --cflags + set ret [catch {exec $pkgconfig --cflags $module} cflags] + if {$ret} { + msg-result "unusable ($version - see config.log)" + configlog "$pkgconfig --cflags $module" + configlog $cflags + return 0 + } + msg-result $version + set prefix [feature-define-name $module PKG_] + define HAVE_${prefix} + define ${prefix}_VERSION $version + define ${prefix}_CFLAGS $cflags + define ${prefix}_LIBS [exec $pkgconfig --libs-only-l $module] + define ${prefix}_LDFLAGS [exec $pkgconfig --libs-only-L $module] + return 1 +} + +# @pkg-config-get module setting +# +# Convenience access to the results of 'pkg-config'. +# +# For example, '[pkg-config-get pango CFLAGS]' returns +# the value of 'PKG_PANGO_CFLAGS', or '""' if not defined. +proc pkg-config-get {module name} { + set prefix [feature-define-name $module PKG_] + get-define ${prefix}_${name} "" +} + +# @pkg-config-get-var module variable +# +# Return the value of the given variable from the given pkg-config module. +# The module must already have been successfully detected with pkg-config. +# e.g. +# +## if {[pkg-config harfbuzz >= 2.5]} { +## define harfbuzz_libdir [pkg-config-get-var harfbuzz libdir] +## } +# +# Returns the empty string if the variable isn't defined. +proc pkg-config-get-var {module variable} { + set pkgconfig [get-define PKG_CONFIG] + set prefix [feature-define-name $module HAVE_PKG_] + exec $pkgconfig $module --variable $variable +} diff --git a/autosetup/proj.tcl b/autosetup/proj.tcl new file mode 100644 index 0000000000..86f4df44e2 --- /dev/null +++ b/autosetup/proj.tcl @@ -0,0 +1,2549 @@ +######################################################################## +# 2024 September 25 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# * May you do good and not evil. +# * May you find forgiveness for yourself and forgive others. +# * May you share freely, never taking more than you give. +# + +# +# ----- @module proj.tcl ----- +# @section Project-agnostic Helper APIs +# + +# +# Routines for Steve Bennett's autosetup which are common to trees +# managed in and around the umbrella of the SQLite project. +# +# The intent is that these routines be relatively generic, independent +# of a given project. +# +# For practical purposes, the copy of this file hosted in the SQLite +# project is the "canonical" one: +# +# https://sqlite.org/src/file/autosetup/proj.tcl +# +# This file was initially derived from one used in the libfossil +# project, authored by the same person who ported it here, and this is +# noted here only as an indication that there are no licensing issues +# despite this code having a handful of near-twins running around a +# handful of third-party source trees. +# +# Design notes: +# +# - Symbols with _ separators are intended for internal use within +# this file, and are not part of the API which auto.def files should +# rely on. Symbols with - separators are public APIs. +# +# - By and large, autosetup prefers to update global state with the +# results of feature checks, e.g. whether the compiler supports flag +# --X. In this developer's opinion that (A) causes more confusion +# than it solves[^1] and (B) adds an unnecessary layer of "voodoo" +# between the autosetup user and its internals. This module, in +# contrast, instead injects the results of its own tests into +# well-defined variables and leaves the integration of those values +# to the caller's discretion. +# +# [1]: As an example: testing for the -rpath flag, using +# cc-check-flags, can break later checks which use +# [cc-check-function-in-lib ...] because the resulting -rpath flag +# implicitly becomes part of those tests. In the case of an rpath +# test, downstream tests may not like the $prefix/lib path added by +# the rpath test. To avoid such problems, we avoid (intentionally) +# updating global state via feature tests. +# + +# +# $proj__Config is an internal-use-only array for storing whatever generic +# internal stuff we need stored. +# +array set ::proj__Config [subst { + self-tests [get-env proj.self-tests 0] + verbose-assert [get-env proj.assert-verbose 0] + isatty [isatty? stdout] +}] + +# +# List of dot-in files to filter in the final stages of +# configuration. Some configuration steps may append to this. Each +# one in this list which exists will trigger the generation of a +# file with that same name, minus the ".in", in the build directory +# (which differ from the source dir in out-of-tree builds). +# +# See: proj-dot-ins-append and proj-dot-ins-process +# +set ::proj__Config(dot-in-files) [list] + +# +# @proj-warn msg +# +# Emits a warning message to stderr. All args are appended with a +# space between each. +# +proc proj-warn {args} { + show-notices + puts stderr [join [list "WARNING:" \[ [proj-scope 1] \]: {*}$args] " "] +} + + +# +# Internal impl of [proj-fatal] and [proj-error]. It must be called +# using tailcall. +# +proc proj__faterr {failMode args} { + show-notices + set lvl 1 + while {"-up" eq [lindex $args 0]} { + set args [lassign $args -] + incr lvl + } + if {$failMode} { + puts stderr [join [list "FATAL:" \[ [proj-scope $lvl] \]: {*}$args]] + exit 1 + } else { + error [join [list in \[ [proj-scope $lvl] \]: {*}$args]] + } +} + +# +# @proj-fatal ?-up...? msg... +# +# Emits an error message to stderr and exits with non-0. All args are +# appended with a space between each. +# +# The calling scope's name is used in the error message. To instead +# use the name of a call higher up in the stack, use -up once for each +# additional level. +# +proc proj-fatal {args} { + tailcall proj__faterr 1 {*}$args +} + +# +# @proj-error ?-up...? msg... +# +# Works like proj-fatal but uses [error] intead of [exit]. +# +proc proj-error {args} { + tailcall proj__faterr 0 {*}$args +} + +# +# @proj-assert script ?message? +# +# Kind of like a C assert: if uplevel of [list expr $script] is false, +# a fatal error is triggered. The error message, by default, includes +# the body of the failed assertion, but if $msg is set then that is +# used instead. +# +proc proj-assert {script {msg ""}} { + if {1 eq $::proj__Config(verbose-assert)} { + msg-result [proj-bold "asserting: $script"] + } + if {![uplevel 1 [list expr $script]]} { + if {"" eq $msg} { + set msg $script + } + tailcall proj__faterr 1 "Assertion failed:" $msg + } +} + +# +# @proj-bold str +# +# If this function believes that the current console might support +# ANSI escape sequences then this returns $str wrapped in a sequence +# to bold that text, else it returns $str as-is. +# +proc proj-bold {args} { + if {$::autosetup(iswin) || !$::proj__Config(isatty)} { + return [join $args] + } + return "\033\[1m${args}\033\[0m" +} + +# +# @proj-indented-notice ?-error? ?-notice? msg +# +# Takes a multi-line message and emits it with consistent indentation. +# It does not perform any line-wrapping of its own. Which output +# routine it uses depends on its flags, defaulting to msg-result. +# For -error and -notice it uses user-notice. +# +# If the -notice flag it used then it emits using [user-notice], which +# means its rendering will (A) go to stderr and (B) be delayed until +# the next time autosetup goes to output a message. +# +# If the -error flag is provided then it renders the message +# immediately to stderr and then exits. +# +# If neither -notice nor -error are used, the message will be sent to +# stdout without delay. +# +proc proj-indented-notice {args} { + set fErr "" + set outFunc "msg-result" + while {[llength $args] > 1} { + switch -exact -- [lindex $args 0] { + -error { + set args [lassign $args fErr] + set outFunc "user-notice" + } + -notice { + set args [lassign $args -] + set outFunc "user-notice" + } + default { + break + } + } + } + set lines [split [join $args] \n] + foreach line $lines { + set line [string trimleft $line] + if {"" eq $line} { + $outFunc $line + } else { + $outFunc " $line" + } + } + if {"" ne $fErr} { + show-notices + exit 1 + } +} + +# +# @proj-is-cross-compiling +# +# Returns 1 if cross-compiling, else 0. +# +proc proj-is-cross-compiling {} { + expr {[get-define host] ne [get-define build]} +} + +# +# @proj-strip-hash-comments value +# +# Expects to receive string input, which it splits on newlines, strips +# out any lines which begin with any number of whitespace followed by +# a '#', and returns a value containing the [append]ed results of each +# remaining line with a \n between each. It does not strip out +# comments which appear after the first non-whitespace character. +# +proc proj-strip-hash-comments {val} { + set x {} + foreach line [split $val \n] { + if {![string match "#*" [string trimleft $line]]} { + append x $line \n + } + } + return $x +} + +# +# @proj-cflags-without-werror +# +# Fetches [define $var], strips out any -Werror entries, and returns +# the new value. This is intended for temporarily stripping -Werror +# from CFLAGS or CPPFLAGS within the scope of a [define-push] block. +# +proc proj-cflags-without-werror {{var CFLAGS}} { + set rv {} + foreach f [get-define $var ""] { + switch -exact -- $f { + -Werror {} + default { lappend rv $f } + } + } + join $rv " " +} + +# +# @proj-check-function-in-lib +# +# A proxy for cc-check-function-in-lib with the following differences: +# +# - Does not make any global changes to the LIBS define. +# +# - Strips out the -Werror flag from CFLAGS before running the test, +# as these feature tests will often fail if -Werror is used. +# +# Returns the result of cc-check-function-in-lib (i.e. true or false). +# The resulting linker flags are stored in the [define] named +# lib_${function}. +# +proc proj-check-function-in-lib {function libs {otherlibs {}}} { + set found 0 + define-push {LIBS CFLAGS} { + #puts "CFLAGS before=[get-define CFLAGS]" + define CFLAGS [proj-cflags-without-werror] + #puts "CFLAGS after =[get-define CFLAGS]" + set found [cc-check-function-in-lib $function $libs $otherlibs] + } + return $found +} + +# +# @proj-search-for-header-dir ?-dirs LIST? ?-subdirs LIST? header +# +# Searches for $header in a combination of dirs and subdirs, specified +# by the -dirs {LIST} and -subdirs {LIST} flags (each of which have +# sane defaults). Returns either the first matching dir or an empty +# string. The return value does not contain the filename part. +# +proc proj-search-for-header-dir {header args} { + set subdirs {include} + set dirs {/usr /usr/local /mingw} +# Debatable: +# if {![proj-is-cross-compiling]} { +# lappend dirs [get-define prefix] +# } + while {[llength $args]} { + switch -exact -- [lindex $args 0] { + -dirs { set args [lassign $args - dirs] } + -subdirs { set args [lassign $args - subdirs] } + default { + proj-error "Unhandled argument: $args" + } + } + } + foreach dir $dirs { + foreach sub $subdirs { + if {[file exists $dir/$sub/$header]} { + return "$dir/$sub" + } + } + } + return "" +} + +# +# @proj-find-executable-path ?-v? binaryName +# +# Works similarly to autosetup's [find-executable-path $binName] but: +# +# - If the first arg is -v, it's verbose about searching, else it's quiet. +# +# Returns the full path to the result or an empty string. +# +proc proj-find-executable-path {args} { + set binName $args + set verbose 0 + if {[lindex $args 0] eq "-v"} { + set verbose 1 + set args [lassign $args - binName] + msg-checking "Looking for $binName ... " + } + set check [find-executable-path $binName] + if {$verbose} { + if {"" eq $check} { + msg-result "not found" + } else { + msg-result $check + } + } + return $check +} + +# +# @proj-bin-define binName ?defName? +# +# Uses [proj-find-executable-path $binName] to (verbosely) search for +# a binary, sets a define (see below) to the result, and returns the +# result (an empty string if not found). +# +# The define'd name is: If $defName is not empty, it is used as-is. If +# $defName is empty then "BIN_X" is used, where X is the upper-case +# form of $binName with any '-' characters replaced with '_'. +# +proc proj-bin-define {binName {defName {}}} { + set check [proj-find-executable-path -v $binName] + if {"" eq $defName} { + set defName "BIN_[string toupper [string map {- _} $binName]]" + } + define $defName $check + return $check +} + +# +# @proj-first-bin-of bin... +# +# Looks for the first binary found of the names passed to this +# function. If a match is found, the full path to that binary is +# returned, else "" is returned. +# +# Despite using cc-path-progs to do the search, this function clears +# any define'd name that function stores for the result (because the +# caller has no sensible way of knowing which [define] name it has +# unless they pass only a single argument). +# +proc proj-first-bin-of {args} { + set rc "" + foreach b $args { + set u [string toupper $b] + # Note that cc-path-progs defines $u to "false" if it finds no + # match. + if {[cc-path-progs $b]} { + set rc [get-define $u] + } + undefine $u + if {"" ne $rc} break + } + return $rc +} + +# +# @proj-opt-was-provided key +# +# Returns 1 if the user specifically provided the given configure flag +# or if it was specifically set using proj-opt-set, else 0. This can +# be used to distinguish between options which have a default value +# and those which were explicitly provided by the user, even if the +# latter is done in a way which uses the default value. +# +# For example, with a configure flag defined like: +# +# { foo-bar:=baz => {its help text} } +# +# This function will, when passed foo-bar, return 1 only if the user +# passes --foo-bar to configure, even if that invocation would resolve +# to the default value of baz. If the user does not explicitly pass in +# --foo-bar (with or without a value) then this returns 0. +# +# Calling [proj-opt-set] is, for purposes of the above, equivalent to +# explicitly passing in the flag. +# +# Note: unlike most functions which deal with configure --flags, this +# one does not validate that $key refers to a pre-defined flag. i.e. +# it accepts arbitrary keys, even those not defined via an [options] +# call. [proj-opt-set] manipulates the internal list of flags, such +# that new options set via that function will cause this function to +# return true. (That's an unintended and unavoidable side-effect, not +# specifically a feature which should be made use of.) +# +proc proj-opt-was-provided {key} { + dict exists $::autosetup(optset) $key +} + +# +# @proj-opt-set flag ?val? +# +# Force-set autosetup option $flag to $val. The value can be fetched +# later with [opt-val], [opt-bool], and friends. +# +# Returns $val. +# +proc proj-opt-set {flag {val 1}} { + if {$flag ni $::autosetup(options)} { + # We have to add this to autosetup(options) or else future calls + # to [opt-bool $flag] will fail validation of $flag. + lappend ::autosetup(options) $flag + } + dict set ::autosetup(optset) $flag $val + return $val +} + +# +# @proj-opt-exists flag +# +# Returns 1 if the given flag has been defined as a legal configure +# option, else returns 0. Options set via proj-opt-set "exist" for +# this purpose even if they were not defined via autosetup's +# [options] function. +# +proc proj-opt-exists {flag} { + expr {$flag in $::autosetup(options)}; +} + +# +# @proj-val-truthy val +# +# Returns 1 if $val appears to be a truthy value, else returns +# 0. Truthy values are any of {1 on true yes enabled} +# +proc proj-val-truthy {val} { + expr {$val in {1 on true yes enabled}} +} + +# +# @proj-opt-truthy flag +# +# Returns 1 if [opt-val $flag] appears to be a truthy value or +# [opt-bool $flag] is true. See proj-val-truthy. +# +proc proj-opt-truthy {flag} { + if {[proj-val-truthy [opt-val $flag]]} { return 1 } + set rc 0 + catch { + # opt-bool will throw if $flag is not a known boolean flag + set rc [opt-bool $flag] + } + return $rc +} + +# +# @proj-if-opt-truthy boolFlag thenScript ?elseScript? +# +# If [proj-opt-truthy $flag] is true, eval $then, else eval $else. +# +proc proj-if-opt-truthy {boolFlag thenScript {elseScript {}}} { + if {[proj-opt-truthy $boolFlag]} { + uplevel 1 $thenScript + } else { + uplevel 1 $elseScript + } +} + +# +# @proj-define-for-opt flag def ?msg? ?iftrue? ?iffalse? +# +# If [proj-opt-truthy $flag] then [define $def $iftrue] else [define +# $def $iffalse]. If $msg is not empty, output [msg-checking $msg] and +# a [msg-results ...] which corresponds to the result. Returns 1 if +# the opt-truthy check passes, else 0. +# +proc proj-define-for-opt {flag def {msg ""} {iftrue 1} {iffalse 0}} { + if {"" ne $msg} { + msg-checking "$msg " + } + set rcMsg "" + set rc 0 + if {[proj-opt-truthy $flag]} { + define $def $iftrue + set rc 1 + } else { + define $def $iffalse + } + switch -- [proj-val-truthy [get-define $def]] { + 0 { set rcMsg no } + 1 { set rcMsg yes } + } + if {"" ne $msg} { + msg-result $rcMsg + } + return $rc +} + +# +# @proj-opt-define-bool ?-v? optName defName ?descr? +# +# Checks [proj-opt-truthy $optName] and calls [define $defName X] +# where X is 0 for false and 1 for true. $descr is an optional +# [msg-checking] argument which defaults to $defName. Returns X. +# +# If args[0] is -v then the boolean semantics are inverted: if +# the option is set, it gets define'd to 0, else 1. Returns the +# define'd value. +# +proc proj-opt-define-bool {args} { + set invert 0 + if {[lindex $args 0] eq "-v"} { + incr invert + lassign $args - optName defName descr + } else { + lassign $args optName defName descr + } + if {"" eq $descr} { + set descr $defName + } + #puts "optName=$optName defName=$defName descr=$descr" + set rc 0 + msg-checking "[join $descr] ... " + set rc [proj-opt-truthy $optName] + if {$invert} { + set rc [expr {!$rc}] + } + msg-result [string map {0 no 1 yes} $rc] + define $defName $rc + return $rc +} + +# +# @proj-check-module-loader +# +# Check for module-loading APIs (libdl/libltdl)... +# +# Looks for libltdl or dlopen(), the latter either in -ldl or built in +# to libc (as it is on some platforms). Returns 1 if found, else +# 0. Either way, it `define`'s: +# +# - HAVE_LIBLTDL to 1 or 0 if libltdl is found/not found +# - HAVE_LIBDL to 1 or 0 if dlopen() is found/not found +# - LDFLAGS_MODULE_LOADER one of ("-lltdl", "-ldl", or ""), noting +# that -ldl may legally be empty on some platforms even if +# HAVE_LIBDL is true (indicating that dlopen() is available without +# extra link flags). LDFLAGS_MODULE_LOADER also gets "-rdynamic" appended +# to it because otherwise trying to open DLLs will result in undefined +# symbol errors. +# +# Note that if it finds LIBLTDL it does not look for LIBDL, so will +# report only that is has LIBLTDL. +# +proc proj-check-module-loader {} { + msg-checking "Looking for module-loader APIs... " + if {99 ne [get-define LDFLAGS_MODULE_LOADER 99]} { + if {1 eq [get-define HAVE_LIBLTDL 0]} { + msg-result "(cached) libltdl" + return 1 + } elseif {1 eq [get-define HAVE_LIBDL 0]} { + msg-result "(cached) libdl" + return 1 + } + # else: wha??? + } + set HAVE_LIBLTDL 0 + set HAVE_LIBDL 0 + set LDFLAGS_MODULE_LOADER "" + set rc 0 + puts "" ;# cosmetic kludge for cc-check-XXX + if {[cc-check-includes ltdl.h] && [cc-check-function-in-lib lt_dlopen ltdl]} { + set HAVE_LIBLTDL 1 + set LDFLAGS_MODULE_LOADER "-lltdl -rdynamic" + msg-result " - Got libltdl." + set rc 1 + } elseif {[cc-with {-includes dlfcn.h} { + cctest -link 1 -declare "extern char* dlerror(void);" -code "dlerror();"}]} { + msg-result " - This system can use dlopen() without -ldl." + set HAVE_LIBDL 1 + set LDFLAGS_MODULE_LOADER "" + set rc 1 + } elseif {[cc-check-includes dlfcn.h]} { + set HAVE_LIBDL 1 + set rc 1 + if {[cc-check-function-in-lib dlopen dl]} { + msg-result " - dlopen() needs libdl." + set LDFLAGS_MODULE_LOADER "-ldl -rdynamic" + } else { + msg-result " - dlopen() not found in libdl. Assuming dlopen() is built-in." + set LDFLAGS_MODULE_LOADER "-rdynamic" + } + } + define HAVE_LIBLTDL $HAVE_LIBLTDL + define HAVE_LIBDL $HAVE_LIBDL + define LDFLAGS_MODULE_LOADER $LDFLAGS_MODULE_LOADER + return $rc +} + +# +# @proj-no-check-module-loader +# +# Sets all flags which would be set by proj-check-module-loader to +# empty/falsy values, as if those checks had failed to find a module +# loader. Intended to be called in place of that function when +# a module loader is explicitly not desired. +# +proc proj-no-check-module-loader {} { + define HAVE_LIBDL 0 + define HAVE_LIBLTDL 0 + define LDFLAGS_MODULE_LOADER "" +} + +# +# @proj-file-content ?-trim? filename +# +# Opens the given file, reads all of its content, and returns it. If +# the first arg is -trim, the contents of the file named by the second +# argument are trimmed before returning them. +# +proc proj-file-content {args} { + set trim 0 + set fname $args + if {"-trim" eq [lindex $args 0]} { + set trim 1 + lassign $args - fname + } + set fp [open $fname rb] + set rc [read $fp] + close $fp + if {$trim} { return [string trim $rc] } + return $rc +} + +# +# @proj-file-conent filename +# +# Returns the contents of the given file as an array of lines, with +# the EOL stripped from each input line. +# +proc proj-file-content-list {fname} { + set fp [open $fname rb] + set rc {} + while { [gets $fp line] >= 0 } { + lappend rc $line + } + close $fp + return $rc +} + +# +# @proj-file-write ?-ro? fname content +# +# Works like autosetup's [writefile] but explicitly uses binary mode +# to avoid EOL translation on Windows. If $fname already exists, it is +# overwritten, even if it's flagged as read-only. +# +proc proj-file-write {args} { + if {"-ro" eq [lindex $args 0]} { + lassign $args ro fname content + } else { + set ro "" + lassign $args fname content + } + file delete -force -- $fname; # in case it's read-only + set f [open $fname wb] + puts -nonewline $f $content + close $f + if {"" ne $ro} { + catch { + exec chmod -w $fname + #file attributes -w $fname; #jimtcl has no 'attributes' + } + } +} + +# +# @proj-check-compile-commands ?-assume-for-clang? ?configFlag? +# +# Checks the compiler for compile_commands.json support. If +# $configFlag is not empty then it is assumed to be the name of an +# autosetup boolean config which controls whether to run/skip this +# check. +# +# If -assume-for-clang is provided and $configFlag is not empty and CC +# matches *clang* and no --$configFlag was explicitly provided to the +# configure script then behave as if --$configFlag had been provided. +# To disable that assumption, either don't pass -assume-for-clang or +# pass --$configFlag=0 to the configure script. (The reason for this +# behavior is that clang supports compile-commands but some other +# compilers report false positives with these tests.) +# +# Returns 1 if supported, else 0, and defines HAVE_COMPILE_COMMANDS to +# that value. Defines MAKE_COMPILATION_DB to "yes" if supported, "no" +# if not. The use of MAKE_COMPILATION_DB is deprecated/discouraged: +# HAVE_COMPILE_COMMANDS is preferred. +# +# ACHTUNG: this test has a long history of false positive results +# because of compilers reacting differently to the -MJ flag. Because +# of this, it is recommended that this support be an opt-in feature, +# rather than an on-by-default default one. That is: in the +# configure script define the option as +# {--the-flag-name=0 => {Enable ....}} +# +proc proj-check-compile-commands {args} { + set i 0 + set configFlag {} + set fAssumeForClang 0 + set doAssume 0 + msg-checking "compile_commands.json support... " + if {"-assume-for-clang" eq [lindex $args 0]} { + lassign $args - configFlag + incr fAssumeForClang + } elseif {1 == [llength $args]} { + lassign $args configFlag + } else { + proj-error "Invalid arguments" + } + if {1 == $fAssumeForClang && "" ne $configFlag} { + if {[string match *clang* [get-define CC]] + && ![proj-opt-was-provided $configFlag] + && ![proj-opt-truthy $configFlag]} { + proj-indented-notice [subst -nocommands -nobackslashes { + CC appears to be clang, so assuming that --$configFlag is likely + to work. To disable this assumption use --$configFlag=0.}] + incr doAssume + } + } + if {!$doAssume && "" ne $configFlag && ![proj-opt-truthy $configFlag]} { + msg-result "check disabled. Use --${configFlag} to enable it." + define HAVE_COMPILE_COMMANDS 0 + define MAKE_COMPILATION_DB no + return 0 + } else { + if {[cctest -lang c -cflags {/dev/null -MJ} -source {}]} { + # This test reportedly incorrectly succeeds on one of + # Martin G.'s older systems. drh also reports a false + # positive on an unspecified older Mac system. + msg-result "compiler supports -MJ. Assuming it's useful for compile_commands.json" + define MAKE_COMPILATION_DB yes; # deprecated + define HAVE_COMPILE_COMMANDS 1 + return 1 + } else { + msg-result "compiler does not support compile_commands.json" + define MAKE_COMPILATION_DB no + define HAVE_COMPILE_COMMANDS 0 + return 0 + } + } +} + +# +# @proj-touch filename +# +# Runs the 'touch' external command on one or more files, ignoring any +# errors. +# +proc proj-touch {filename} { + catch { exec touch {*}$filename } +} + +# +# @proj-make-from-dot-in ?-touch? infile ?outfile? +# +# Uses [make-template] to create makefile(-like) file(s) $outfile from +# $infile but explicitly makes the output read-only, to avoid +# inadvertent editing (who, me?). +# +# If $outfile is empty then: +# +# - If $infile is a 2-element list, it is assumed to be an in/out pair, +# and $outfile is set from the 2nd entry in that list. Else... +# +# - $outfile is set to $infile stripped of its extension. +# +# If the first argument is -touch then the generated file is touched +# to update its timestamp. This can be used as a workaround for +# cases where (A) autosetup does not update the file because it was +# not really modified and (B) the file *really* needs to be updated to +# please the build process. +# +# Failures when running chmod or touch are silently ignored. +# +proc proj-make-from-dot-in {args} { + set fIn "" + set fOut "" + set touch 0 + if {[lindex $args 0] eq "-touch"} { + set touch 1 + lassign $args - fIn fOut + } else { + lassign $args fIn fOut + } + if {"" eq $fOut} { + if {[llength $fIn]>1} { + lassign $fIn fIn fOut + } else { + set fOut [file rootname $fIn] + } + } + #puts "filenames=$filename" + if {[file exists $fOut]} { + catch { exec chmod u+w $fOut } + } + #puts "making template: $fIn ==> $fOut" + #define-push {top_srcdir} { + #puts "--- $fIn $fOut top_srcdir=[get-define top_srcdir]" + make-template $fIn $fOut + #puts "--- $fIn $fOut top_srcdir=[get-define top_srcdir]" + # make-template modifies top_srcdir + #} + if {$touch} { + proj-touch $fOut + } + catch { + exec chmod -w $fOut + #file attributes -w $f; #jimtcl has no 'attributes' + } +} + +# +# @proj-check-profile-flag ?flagname? +# +# Checks for the boolean configure option named by $flagname. If set, +# it checks if $CC seems to refer to gcc. If it does (or appears to) +# then it defines CC_PROFILE_FLAG to "-pg" and returns 1, else it +# defines CC_PROFILE_FLAG to "" and returns 0. +# +# Note that the resulting flag must be added to both CFLAGS and +# LDFLAGS in order for binaries to be able to generate "gmon.out". In +# order to avoid potential problems with escaping, space-containing +# tokens, and interfering with autosetup's use of these vars, this +# routine does not directly modify CFLAGS or LDFLAGS. +# +proc proj-check-profile-flag {{flagname profile}} { + #puts "flagname=$flagname ?[proj-opt-truthy $flagname]?" + if {[proj-opt-truthy $flagname]} { + set CC [get-define CC] + regsub {.*ccache *} $CC "" CC + # ^^^ if CC="ccache gcc" then [exec] treats "ccache gcc" as a + # single binary name and fails. So strip any leading ccache part + # for this purpose. + if { ![catch { exec $CC --version } msg]} { + if {[string first gcc $CC] != -1} { + define CC_PROFILE_FLAG "-pg" + return 1 + } + } + } + define CC_PROFILE_FLAG "" + return 0 +} + +# +# @proj-looks-like-windows ?key? +# +# Returns 1 if this appears to be a Windows environment (MinGw, +# Cygwin, MSys), else returns 0. The optional argument is the name of +# an autosetup define which contains platform name info, defaulting to +# "host" (meaning, somewhat counterintuitively, the target system, not +# the current host). The other legal value is "build" (the build +# machine, i.e. the local host). If $key == "build" then some +# additional checks may be performed which are not applicable when +# $key == "host". +# +proc proj-looks-like-windows {{key host}} { + global autosetup + switch -glob -- [get-define $key] { + *-*-ming* - *-*-cygwin - *-*-msys - *windows* { + return 1 + } + } + if {$key eq "build"} { + # These apply only to the local OS, not a cross-compilation target, + # as the above check potentially can. + if {$::autosetup(iswin)} { return 1 } + if {[find-an-executable cygpath] ne "" || $::tcl_platform(os) eq "Windows NT"} { + return 1 + } + } + return 0 +} + +# +# @proj-looks-like-mac ?key? +# +# Looks at either the 'host' (==compilation target platform) or +# 'build' (==the being-built-on platform) define value and returns if +# if that value seems to indicate that it represents a Mac platform, +# else returns 0. +# +proc proj-looks-like-mac {{key host}} { + switch -glob -- [get-define $key] { + *-*-darwin* { + # https://sqlite.org/forum/forumpost/7b218c3c9f207646 + # There's at least one Linux out there which matches *apple*. + return 1 + } + default { + return 0 + } + } +} + +# +# @proj-exe-extension +# +# Checks autosetup's "host" and "build" defines to see if the build +# host and target are Windows-esque (Cygwin, MinGW, MSys). If the +# build environment is then BUILD_EXEEXT is [define]'d to ".exe", else +# "". If the target, a.k.a. "host", is then TARGET_EXEEXT is +# [define]'d to ".exe", else "". +# +proc proj-exe-extension {} { + set rH "" + set rB "" + if {[proj-looks-like-windows host]} { + set rH ".exe" + } + if {[proj-looks-like-windows build]} { + set rB ".exe" + } + define BUILD_EXEEXT $rB + define TARGET_EXEEXT $rH +} + +# +# @proj-dll-extension +# +# Works like proj-exe-extension except that it defines BUILD_DLLEXT +# and TARGET_DLLEXT to one of (.so, ,dll, .dylib). +# +# Trivia: for .dylib files, the linker needs the -dynamiclib flag +# instead of -shared. +# +proc proj-dll-extension {} { + set inner {{key} { + if {[proj-looks-like-mac $key]} { + return ".dylib" + } + if {[proj-looks-like-windows $key]} { + return ".dll" + } + return ".so" + }} + define BUILD_DLLEXT [apply $inner build] + define TARGET_DLLEXT [apply $inner host] +} + +# +# @proj-lib-extension +# +# Static-library counterpart of proj-dll-extension. Defines +# BUILD_LIBEXT and TARGET_LIBEXT to the conventional static library +# extension for the being-built-on resp. the target platform. +# +proc proj-lib-extension {} { + set inner {{key} { + switch -glob -- [get-define $key] { + *-*-ming* - *-*-cygwin - *-*-msys { + return ".a" + # ^^^ this was ".lib" until 2025-02-07. See + # https://sqlite.org/forum/forumpost/02db2d4240 + } + default { + return ".a" + } + } + }} + define BUILD_LIBEXT [apply $inner build] + define TARGET_LIBEXT [apply $inner host] +} + +# +# @proj-file-extensions +# +# Calls all of the proj-*-extension functions. +# +proc proj-file-extensions {} { + proj-exe-extension + proj-dll-extension + proj-lib-extension +} + +# +# @proj-affirm-files-exist ?-v? filename... +# +# Expects a list of file names. If any one of them does not exist in +# the filesystem, it fails fatally with an informative message. +# Returns the last file name it checks. If the first argument is -v +# then it emits msg-checking/msg-result messages for each file. +# +proc proj-affirm-files-exist {args} { + set rc "" + set verbose 0 + if {[lindex $args 0] eq "-v"} { + set verbose 1 + set args [lrange $args 1 end] + } + foreach f $args { + if {$verbose} { msg-checking "Looking for $f ... " } + if {![file exists $f]} { + user-error "not found: $f" + } + if {$verbose} { msg-result "" } + set rc $f + } + return rc +} + +# +# @proj-check-emsdk +# +# Emscripten is used for doing in-tree builds of web-based WASM stuff, +# as opposed to WASI-based WASM or WASM binaries we import from other +# places. This is only set up for Unix-style OSes and is untested +# anywhere but Linux. Requires that the --with-emsdk flag be +# registered with autosetup. +# +# It looks for the SDK in the location specified by --with-emsdk. +# Values of "" or "auto" mean to check for the environment var EMSDK +# (which gets set by the emsdk_env.sh script from the SDK) or that +# same var passed to configure. +# +# If the given directory is found, it expects to find emsdk_env.sh in +# that directory, as well as the emcc compiler somewhere under there. +# +# If the --with-emsdk[=DIR] flag is explicitly provided and the SDK is +# not found then a fatal error is generated, otherwise failure to find +# the SDK is not fatal. +# +# Defines the following: +# +# - HAVE_EMSDK = 0 or 1 (this function's return value) +# - EMSDK_HOME = "" or top dir of the emsdk +# - EMSDK_ENV_SH = "" or $EMSDK_HOME/emsdk_env.sh +# - BIN_EMCC = "" or $EMSDK_HOME/upstream/emscripten/emcc +# +# Returns 1 if EMSDK_ENV_SH is found, else 0. If EMSDK_HOME is not empty +# but BIN_EMCC is then emcc was not found in the EMSDK_HOME, in which +# case we have to rely on the fact that sourcing $EMSDK_ENV_SH from a +# shell will add emcc to the $PATH. +# +proc proj-check-emsdk {} { + set emsdkHome [opt-val with-emsdk] + define EMSDK_HOME "" + define EMSDK_ENV_SH "" + define BIN_EMCC "" + set hadValue [llength $emsdkHome] + msg-checking "Emscripten SDK? " + if {$emsdkHome in {"" "auto"}} { + # Check the environment. $EMSDK gets set by sourcing emsdk_env.sh. + set emsdkHome [get-env EMSDK ""] + } + set rc 0 + if {$emsdkHome ne ""} { + define EMSDK_HOME $emsdkHome + set emsdkEnv "$emsdkHome/emsdk_env.sh" + if {[file exists $emsdkEnv]} { + msg-result "$emsdkHome" + define EMSDK_ENV_SH $emsdkEnv + set rc 1 + set emcc "$emsdkHome/upstream/emscripten/emcc" + if {[file exists $emcc]} { + define BIN_EMCC $emcc + } + } else { + msg-result "emsdk_env.sh not found in $emsdkHome" + } + } else { + msg-result "not found" + } + if {$hadValue && 0 == $rc} { + # Fail if it was explicitly requested but not found + proj-fatal "Cannot find the Emscripten SDK" + } + define HAVE_EMSDK $rc + return $rc +} + +# +# @proj-cc-check-Wl-flag ?flag ?args?? +# +# Checks whether the given linker flag (and optional arguments) can be +# passed from the compiler to the linker using one of these formats: +# +# - -Wl,flag[,arg1[,...argN]] +# - -Wl,flag -Wl,arg1 ...-Wl,argN +# +# If so, that flag string is returned, else an empty string is +# returned. +# +proc proj-cc-check-Wl-flag {args} { + cc-with {-link 1} { + # Try -Wl,flag,...args + set fli "-Wl" + foreach f $args { append fli ",$f" } + if {[cc-check-flags $fli]} { + return $fli + } + # Try -Wl,flag -Wl,arg1 ...-Wl,argN + set fli "" + foreach f $args { append fli "-Wl,$f " } + if {[cc-check-flags $fli]} { + return [string trim $fli] + } + return "" + } +} + +# +# @proj-check-rpath +# +# Tries various approaches to handling the -rpath link-time +# flag. Defines LDFLAGS_RPATH to that/those flag(s) or an empty +# string. Returns 1 if it finds an option, else 0. +# +# By default, the rpath is set to $prefix/lib. However, if either of +# --exec-prefix=... or --libdir=... are explicitly passed to +# configure then [get-define libdir] is used (noting that it derives +# from exec-prefix by default). +# +proc proj-check-rpath {} { + if {[proj-opt-was-provided libdir] + || [proj-opt-was-provided exec-prefix]} { + set lp "[get-define libdir]" + } else { + set lp "[get-define prefix]/lib" + } + # If we _don't_ use cc-with {} here (to avoid updating the global + # CFLAGS or LIBS or whatever it is that cc-check-flags updates) then + # downstream tests may fail because the resulting rpath gets + # implicitly injected into them. + cc-with {-link 1} { + if {[cc-check-flags "-rpath $lp"]} { + define LDFLAGS_RPATH "-rpath $lp" + } else { + set wl [proj-cc-check-Wl-flag -rpath $lp] + if {"" eq $wl} { + set wl [proj-cc-check-Wl-flag -R$lp] + } + if {"" eq $wl} { + # HP-UX: https://sqlite.org/forum/forumpost/d80ecdaddd + set wl [proj-cc-check-Wl-flag +b $lp] + } + define LDFLAGS_RPATH $wl + } + } + expr {"" ne [get-define LDFLAGS_RPATH]} +} + +# +# @proj-check-soname ?libname? +# +# Checks whether CC supports the -Wl,-soname,lib... flag. If so, it +# returns 1 and defines LDFLAGS_SONAME_PREFIX to the flag's prefix, to +# which the client would need to append "libwhatever.N". If not, it +# returns 0 and defines LDFLAGS_SONAME_PREFIX to an empty string. +# +# The libname argument is only for purposes of running the flag +# compatibility test, and is not included in the resulting +# LDFLAGS_SONAME_PREFIX. It is provided so that clients may +# potentially avoid some end-user confusion by using their own lib's +# name here (which shows up in the "checking..." output). +# +proc proj-check-soname {{libname "libfoo.so.0"}} { + cc-with {-link 1} { + if {[cc-check-flags "-Wl,-soname,${libname}"]} { + define LDFLAGS_SONAME_PREFIX "-Wl,-soname," + return 1 + } elseif {[cc-check-flags "-Wl,+h,${libname}"]} { + # HP-UX: https://sqlite.org/forum/forumpost/d80ecdaddd + define LDFLAGS_SONAME_PREFIX "-Wl,+h," + return 1 + } else { + define LDFLAGS_SONAME_PREFIX "" + return 0 + } + } +} + +# +# @proj-check-fsanitize ?list-of-opts? +# +# Checks whether CC supports -fsanitize=X, where X is each entry of +# the given list of flags. If any of those flags are supported, it +# returns the string "-fsanitize=X..." where X... is a comma-separated +# list of all flags from the original set which are supported. If none +# of the given options are supported then it returns an empty string. +# +# Example: +# +# set f [proj-check-fsanitize {address bounds-check just-testing}] +# +# Will, on many systems, resolve to "-fsanitize=address,bounds-check", +# but may also resolve to "-fsanitize=address". +# +proc proj-check-fsanitize {{opts {address bounds-strict}}} { + set sup {} + foreach opt $opts { + # -nooutput is used because -fsanitize=hwaddress will otherwise + # pass this test on x86_64, but then warn at build time that + # "hwaddress is not supported for this target". + cc-with {-nooutput 1} { + if {[cc-check-flags "-fsanitize=$opt"]} { + lappend sup $opt + } + } + } + if {[llength $sup] > 0} { + return "-fsanitize=[join $sup ,]" + } + return "" +} + +# +# Internal helper for proj-dump-defs-json. Expects to be passed a +# [define] name and the variadic $args which are passed to +# proj-dump-defs-json. If it finds a pattern match for the given +# $name in the various $args, it returns the type flag for that $name, +# e.g. "-str" or "-bare", else returns an empty string. +# +proc proj-defs-type_ {name spec} { + foreach {type patterns} $spec { + foreach pattern $patterns { + if {[string match $pattern $name]} { + return $type + } + } + } + return "" +} + +# +# Internal helper for proj-defs-format_: returns a JSON-ish quoted +# form of the given string-type values. It only performs the most +# basic of escaping. The input must not contain any control +# characters. +# +proc proj-quote-str_ {value} { + return \"[string map [list \\ \\\\ \" \\\"] $value]\" +} + +# +# An internal impl detail of proj-dump-defs-json. Requires a data +# type specifier, as used by make-config-header, and a value. Returns +# the formatted value or the value $::proj__Config(defs-skip) if the caller +# should skip emitting that value. +# +set ::proj__Config(defs-skip) "-proj-defs-format_ sentinel" +proc proj-defs-format_ {type value} { + switch -exact -- $type { + -bare { + # Just output the value unchanged + } + -none { + set value $::proj__Config(defs-skip) + } + -str { + set value [proj-quote-str_ $value] + } + -auto { + # Automatically determine the type + if {![string is integer -strict $value]} { + set value [proj-quote-str_ $value] + } + } + -array { + set ar {} + foreach v $value { + set v [proj-defs-format_ -auto $v] + if {$::proj__Config(defs-skip) ne $v} { + lappend ar $v + } + } + set value "\[ [join $ar {, }] \]" + } + "" { + set value $::proj__Config(defs-skip) + } + default { + proj-fatal "Unknown type in proj-dump-defs-json: $type" + } + } + return $value +} + +# +# @proj-dump-defs-json outfile ...flags +# +# This function works almost identically to autosetup's +# make-config-header but emits its output in JSON form. It is not a +# fully-functional JSON emitter, and will emit broken JSON for +# complicated outputs, but should be sufficient for purposes of +# emitting most configure vars (numbers and simple strings). +# +# In addition to the formatting flags supported by make-config-header, +# it also supports: +# +# -array {patterns...} +# +# Any defines matching the given patterns will be treated as a list of +# values, each of which will be formatted as if it were in an -auto {...} +# set, and the define will be emitted to JSON in the form: +# +# "ITS_NAME": [ "value1", ...valueN ] +# +# Achtung: if a given -array pattern contains values which themselves +# contains spaces... +# +# define-append foo {"-DFOO=bar baz" -DBAR="baz barre"} +# +# will lead to: +# +# ["-DFOO=bar baz", "-DBAR=\"baz", "barre\""] +# +# Neither is especially satisfactory (and the second is useless), and +# handling of such values is subject to change if any such values ever +# _really_ need to be processed by our source trees. +# +proc proj-dump-defs-json {file args} { + file mkdir [file dirname $file] + set lines {} + lappend args -bare {SIZEOF_* HAVE_DECL_*} -auto HAVE_* + foreach n [lsort [dict keys [all-defines]]] { + set type [proj-defs-type_ $n $args] + set value [proj-defs-format_ $type [get-define $n]] + if {$::proj__Config(defs-skip) ne $value} { + lappend lines "\"$n\": ${value}" + } + } + set buf {} + lappend buf [join $lines ",\n"] + write-if-changed $file $buf { + msg-result "Created $file" + } +} + +# +# @proj-xfer-option-aliases map +# +# Expects a list of pairs of configure flags which have been +# registered with autosetup, in this form: +# +# { alias1 => canonical1 +# aliasN => canonicalN ... } +# +# The names must not have their leading -- part and must be in the +# form which autosetup will expect for passing to [opt-val NAME] and +# friends. +# +# Comment lines are permitted in the input. +# +# For each pair of ALIAS and CANONICAL, if --ALIAS is provided but +# --CANONICAL is not, the value of the former is copied to the +# latter. If --ALIAS is not provided, this is a no-op. If both have +# explicitly been provided a fatal usage error is triggered. +# +# Motivation: autosetup enables "hidden aliases" in [options] lists, +# and elides the aliases from --help output but does no further +# handling of them. For example, when --alias is a hidden alias of +# --canonical and a user passes --alias=X, [opt-val canonical] returns +# no value. i.e. the script must check both [opt-val alias] and +# [opt-val canonical]. The intent here is that this function be +# passed such mappings immediately after [options] is called, to carry +# over any values from hidden aliases into their canonical names, such +# that [opt-value canonical] will return X if --alias=X is passed to +# configure. +# +# That said: autosetup's [opt-str] does support alias forms, but it +# requires that the caller know all possible aliases. It's simpler, in +# terms of options handling, if there's only a single canonical name +# which each down-stream call of [opt-...] has to know. +# +proc proj-xfer-options-aliases {mapping} { + foreach {hidden - canonical} [proj-strip-hash-comments $mapping] { + if {[proj-opt-was-provided $hidden]} { + if {[proj-opt-was-provided $canonical]} { + proj-fatal "both --$canonical and its alias --$hidden were used. Use only one or the other." + } else { + proj-opt-set $canonical [opt-val $hidden] + } + } + } +} + +# +# Arguable/debatable... +# +# When _not_ cross-compiling and CC_FOR_BUILD is _not_ explicitly +# specified, force CC_FOR_BUILD to be the same as CC, so that: +# +# ./configure CC=clang +# +# will use CC_FOR_BUILD=clang, instead of cc, for building in-tree +# tools. This is based off of an email discussion and is thought to +# be likely to cause less confusion than seeing 'cc' invocations +# when when the user passes CC=clang. +# +# Sidebar: if we do this before the cc package is installed, it gets +# reverted by that package. Ergo, the cc package init will tell the +# user "Build C compiler...cc" shortly before we tell them otherwise. +# +proc proj-redefine-cc-for-build {} { + if {![proj-is-cross-compiling] + && [get-define CC] ne [get-define CC_FOR_BUILD] + && "nope" eq [get-env CC_FOR_BUILD "nope"]} { + user-notice "Re-defining CC_FOR_BUILD to CC=[get-define CC]. To avoid this, explicitly pass CC_FOR_BUILD=..." + define CC_FOR_BUILD [get-define CC] + } +} + +# +# @proj-which-linenoise headerFile +# +# Attempts to determine whether the given linenoise header file is of +# the "antirez" or "msteveb" flavor. It returns 2 for msteveb, else 1 +# (it does not validate that the header otherwise contains the +# linenoise API). +# +proc proj-which-linenoise {dotH} { + set srcHeader [proj-file-content $dotH] + if {[string match *userdata* $srcHeader]} { + return 2 + } else { + return 1 + } +} + +# +# @proj-remap-autoconf-dir-vars +# +# "Re-map" the autoconf-conventional --XYZdir flags into something +# which is more easily overridable from a make invocation. +# +# Based off of notes in . +# +# Consider: +# +# $ ./configure --prefix=/foo +# $ make install prefix=/blah +# +# In that make invocation, $(libdir) would, at make-time, normally be +# hard-coded to /foo/lib, rather than /blah/lib. That happens because +# autosetup exports conventional $prefix-based values for the numerous +# autoconfig-compatible XYZdir vars at configure-time. What we would +# normally want, however, is that --libdir derives from the make-time +# $(prefix). The distinction between configure-time and make-time is +# the significant factor there. +# +# This function attempts to reconcile those vars in such a way that +# they will derive, at make-time, from $(prefix) in a conventional +# manner unless they are explicitly overridden at configure-time, in +# which case those overrides takes precedence. +# +# Each autoconf-relvant --XYZ flag which is explicitly passed to +# configure is exported as-is, as are those which default to some +# top-level system directory, e.g. /etc or /var. All which derive +# from either $prefix or $exec_prefix are exported in the form of a +# Makefile var reference, e.g. libdir=${exec_prefix}/lib. Ergo, if +# --exec-prefix=FOO is passed to configure, libdir will still derive, +# at make-time, from whatever exec_prefix is passed to make, and will +# use FOO if exec_prefix is not overridden at make-time. Without this +# post-processing, libdir would be cemented in as FOO/lib at +# configure-time, so could be tedious to override properly via a make +# invocation. +# +proc proj-remap-autoconf-dir-vars {} { + set prefix [get-define prefix] + set exec_prefix [get-define exec_prefix $prefix] + # The following var derefs must be formulated such that they are + # legal for use in (A) makefiles, (B) pkgconfig files, and (C) TCL's + # [subst] command. i.e. they must use the form ${X}. + foreach {flag makeVar makeDeref} { + exec-prefix exec_prefix ${prefix} + datadir datadir ${prefix}/share + mandir mandir ${datadir}/man + includedir includedir ${prefix}/include + bindir bindir ${exec_prefix}/bin + libdir libdir ${exec_prefix}/lib + sbindir sbindir ${exec_prefix}/sbin + sysconfdir sysconfdir /etc + sharedstatedir sharedstatedir ${prefix}/com + localstatedir localstatedir /var + runstatedir runstatedir /run + infodir infodir ${datadir}/info + libexecdir libexecdir ${exec_prefix}/libexec + } { + if {[proj-opt-was-provided $flag]} { + define $makeVar [join [opt-val $flag]] + } else { + define $makeVar [join $makeDeref] + } + # Maintenance reminder: the [join] call is to avoid {braces} + # around the output when someone passes in, + # e.g. --libdir=\${prefix}/foo/bar. Debian's SQLite package build + # script does that. + } +} + +# +# @proj-env-file flag ?default? +# +# If a file named .env-$flag exists, this function returns a +# trimmed copy of its contents, else it returns $dflt. The intended +# usage is that things like developer-specific CFLAGS preferences can +# be stored in .env-CFLAGS. +# +proc proj-env-file {flag {dflt ""}} { + set fn ".env-${flag}" + if {[file readable $fn]} { + return [proj-file-content -trim $fn] + } + return $dflt +} + +# +# @proj-get-env var ?default? +# +# Extracts the value of "environment" variable $var from the first of +# the following places where it's defined: +# +# - Passed to configure as $var=... +# - Exists as an environment variable +# - A file named .env-$var (see [proj-env-file]) +# +# If none of those are set, $dflt is returned. +# +proc proj-get-env {var {dflt ""}} { + get-env $var [proj-env-file $var $dflt] +} + +# +# @proj-scope ?lvl? +# +# Returns the name of the _calling_ proc from ($lvl + 1) levels up the +# call stack (where the caller's level will be 1 up from _this_ +# call). If $lvl would resolve to global scope "global scope" is +# returned and if it would be negative then a string indicating such +# is returned (as opposed to throwing an error). +# +proc proj-scope {{lvl 0}} { + #uplevel [expr {$lvl + 1}] {lindex [info level 0] 0} + set ilvl [info level] + set offset [expr {$ilvl - $lvl - 1}] + if { $offset < 0} { + return "invalid scope ($offset)" + } elseif { $offset == 0} { + return "global scope" + } else { + return [lindex [info level $offset] 0] + } +} + +# +# Deprecated name of [proj-scope]. +# +proc proj-current-scope {{lvl 0}} { + puts stderr \ + "Deprecated proj-current-scope called from [proj-scope 1]. Use proj-scope instead." + proj-scope [incr lvl] +} + +# +# Converts parts of tclConfig.sh to autosetup [define]s. +# +# Expects to be passed the name of a value tclConfig.sh or an empty +# string. It converts certain parts of that file's contents to +# [define]s (see the code for the whole list). If $tclConfigSh is an +# empty string then it [define]s the various vars as empty strings. +# +proc proj-tclConfig-sh-to-autosetup {tclConfigSh} { + set shBody {} + set tclVars { + TCL_INCLUDE_SPEC + TCL_LIBS + TCL_LIB_SPEC + TCL_STUB_LIB_SPEC + TCL_EXEC_PREFIX + TCL_PREFIX + TCL_VERSION + TCL_MAJOR_VERSION + TCL_MINOR_VERSION + TCL_PACKAGE_PATH + TCL_PATCH_LEVEL + TCL_SHLIB_SUFFIX + } + # Build a small shell script which proxies the $tclVars from + # $tclConfigSh into autosetup code... + lappend shBody "if test x = \"x${tclConfigSh}\"; then" + foreach v $tclVars { + lappend shBody "$v= ;" + } + lappend shBody "else . \"${tclConfigSh}\"; fi" + foreach v $tclVars { + lappend shBody "echo define $v {\$$v} ;" + } + lappend shBody "exit" + set shBody [join $shBody "\n"] + #puts "shBody=$shBody\n"; exit + eval [exec echo $shBody | sh] +} + +# +# @proj-tweak-default-env-dirs +# +# This function is not useful before [use system] is called to set up +# --prefix and friends. It should be called as soon after [use system] +# as feasible. +# +# For certain target environments, if --prefix is _not_ passed in by +# the user, set the prefix to an environment-specific default. For +# such environments its does [define prefix ...] and [proj-opt-set +# prefix ...], but it does not process vars derived from the prefix, +# e.g. exec-prefix. To do so it is generally necessary to also call +# proj-remap-autoconf-dir-vars late in the config process (immediately +# before ".in" files are filtered). +# +# Similar modifications may be made for --mandir. +# +# Returns >0 if it modifies the environment, else 0. +# +proc proj-tweak-default-env-dirs {} { + set rc 0 + switch -glob -- [get-define host] { + *-haiku { + if {![proj-opt-was-provided prefix]} { + set hdir /boot/home/config/non-packaged + proj-opt-set prefix $hdir + define prefix $hdir + incr rc + } + if {![proj-opt-was-provided mandir]} { + set hdir /boot/system/documentation/man + proj-opt-set mandir $hdir + define mandir $hdir + incr rc + } + } + } + return $rc +} + +# +# @proj-dot-ins-append file ?fileOut ?postProcessScript?? +# +# Queues up an autosetup [make-template]-style file to be processed +# at a later time using [proj-dot-ins-process]. +# +# $file is the input file. If $fileOut is empty then this function +# derives $fileOut from $file, stripping both its directory and +# extension parts. i.e. it defaults to writing the output to the +# current directory (typically $::autosetup(builddir)). +# +# If $postProcessScript is not empty then, during +# [proj-dot-ins-process], it will be eval'd immediately after +# processing the file. In the context of that script, the vars +# $dotInsIn and $dotInsOut will be set to the input and output file +# names. This can be used, for example, to make the output file +# executable or perform validation on its contents: +# +## proj-dot-ins-append my.sh.in my.sh { +## catch {exec chmod u+x $dotInsOut} +## } +# +# See [proj-dot-ins-process], [proj-dot-ins-list] +# +proc proj-dot-ins-append {fileIn args} { + set srcdir $::autosetup(srcdir) + switch -exact -- [llength $args] { + 0 { + lappend fileIn [file rootname [file tail $fileIn]] "" + } + 1 { + lappend fileIn [join $args] "" + } + 2 { + lappend fileIn {*}$args + } + default { + proj-fatal "Too many arguments: $fileIn $args" + } + } + #puts "******* [proj-scope]: adding [llength $fileIn]-length item: $fileIn" + lappend ::proj__Config(dot-in-files) $fileIn +} + +# +# @proj-dot-ins-list +# +# Returns the current list of [proj-dot-ins-append]'d files, noting +# that each entry is a 3-element list of (inputFileName, +# outputFileName, postProcessScript). +# +proc proj-dot-ins-list {} { + return $::proj__Config(dot-in-files) +} + +# +# @proj-dot-ins-process ?-touch? ?-validate? ?-clear? +# +# Each file which has previously been passed to [proj-dot-ins-append] +# is processed, with its passing its in-file out-file names to +# [proj-make-from-dot-in]. +# +# The intent is that a project accumulate any number of files to +# filter and delay their actual filtering until the last stage of the +# configure script, calling this function at that time. +# +# Optional flags: +# +# -touch: gets passed on to [proj-make-from-dot-in] +# +# -validate: after processing each file, before running the file's +# associated script, if any, it runs the file through +# proj-validate-no-unresolved-ats, erroring out if that does. +# +# -clear: after processing, empty the dot-ins list. This effectively +# makes proj-dot-ins-append available for re-use. +# +proc proj-dot-ins-process {args} { + proj-parse-flags args flags { + -touch "" {return "-touch"} + -clear 0 {expr 1} + -validate 0 {expr 1} + } + #puts "args=$args"; parray flags + if {[llength $args] > 0} { + error "Invalid argument to [proj-scope]: $args" + } + foreach f $::proj__Config(dot-in-files) { + proj-assert {3==[llength $f]} \ + "Expecting proj-dot-ins-list to be stored in 3-entry lists. Got: $f" + lassign $f fIn fOut fScript + #puts "DOING $fIn ==> $fOut" + proj-make-from-dot-in {*}$flags(-touch) $fIn $fOut + if {$flags(-validate)} { + proj-validate-no-unresolved-ats $fOut + } + if {"" ne $fScript} { + uplevel 1 [join [list set dotInsIn $fIn \; \ + set dotInsOut $fOut \; \ + eval \{${fScript}\} \; \ + unset dotInsIn dotInsOut]] + } + } + if {$flags(-clear)} { + set ::proj__Config(dot-in-files) [list] + } +} + +# +# @proj-validate-no-unresolved-ats filenames... +# +# For each filename given to it, it validates that the file has no +# unresolved @VAR@ references. If it finds any, it produces an error +# with location information. +# +# Exception: if a filename matches the pattern {*[Mm]ake*} AND a given +# line begins with a # (not including leading whitespace) then that +# line is ignored for purposes of this validation. The intent is that +# @VAR@ inside of makefile comments should not (necessarily) cause +# validation to fail, as it's sometimes convenient to comment out +# sections during development of a configure script and its +# corresponding makefile(s). +# +proc proj-validate-no-unresolved-ats {args} { + foreach f $args { + set lnno 1 + set isMake [string match {*[Mm]ake*} $f] + foreach line [proj-file-content-list $f] { + if {!$isMake || ![string match "#*" [string trimleft $line]]} { + if {[regexp {(@[A-Za-z0-9_\.]+@)} $line match]} { + error "Unresolved reference to $match at line $lnno of $f" + } + } + incr lnno + } + } +} + +# +# @proj-first-file-found tgtVar fileList +# +# Searches $fileList for an existing file. If one is found, its name +# is assigned to tgtVar and 1 is returned, else tgtVar is set to "" +# and 0 is returned. +# +proc proj-first-file-found {tgtVar fileList} { + upvar $tgtVar tgt + foreach f $fileList { + if {[file exists $f]} { + set tgt $f + return 1 + } + } + set tgt "" + return 0 +} + +# +# Defines $defName to contain makefile recipe commands for re-running +# the configure script with its current set of $::argv flags. This +# can be used to automatically reconfigure. +# +proc proj-setup-autoreconfig {defName} { + define $defName \ + [join [list \ + cd \"$::autosetup(builddir)\" \ + && [get-define AUTOREMAKE "error - missing @AUTOREMAKE@"]]] +} + +# +# @prop-append-to defineName args... +# +# A proxy for Autosetup's [define-append]. Appends all non-empty $args +# to [define-append $defineName]. +# +proc proj-define-append {defineName args} { + foreach a $args { + if {"" ne $a} { + define-append $defineName {*}$a + } + } +} + +# +# @prod-define-amend ?-p|-prepend? ?-d|-define? defineName args... +# +# A proxy for Autosetup's [define-append]. +# +# Appends all non-empty $args to the define named by $defineName. If +# one of (-p | -prepend) are used it instead prepends them, in their +# given order, to $defineName. +# +# If -define is used then each argument is assumed to be a [define]'d +# flag and [get-define X ""] is used to fetch it. +# +# Re. linker flags: typically, -lXYZ flags need to be in "reverse" +# order, with each -lY resolving symbols for -lX's to its left. This +# order is largely historical, and not relevant on all environments, +# but it is technically correct and still relevant on some +# environments. +# +# See: proj-append-to +# +proc proj-define-amend {args} { + set defName "" + set prepend 0 + set isdefs 0 + set xargs [list] + foreach arg $args { + switch -exact -- $arg { + "" {} + -p - -prepend { incr prepend } + -d - -define { incr isdefs } + default { + if {"" eq $defName} { + set defName $arg + } else { + lappend xargs $arg + } + } + } + } + if {"" eq $defName} { + proj-error "Missing defineName argument in call from [proj-scope 1]" + } + if {$isdefs} { + set args $xargs + set xargs [list] + foreach arg $args { + lappend xargs [get-define $arg ""] + } + set args $xargs + } +# puts "**** args=$args" +# puts "**** xargs=$xargs" + + set args $xargs + if {$prepend} { + lappend args {*}[get-define $defName ""] + define $defName [join $args]; # join to eliminate {} entries + } else { + proj-define-append $defName {*}$args + } +} + +# +# @proj-define-to-cflag ?-list? ?-quote? ?-zero-undef? defineName... +# +# Treat each argument as the name of a [define] and renders it like a +# CFLAGS value in one of the following forms: +# +# -D$name +# -D$name=integer (strict integer matches only) +# '-D$name=value' (without -quote) +# '-D$name="value"' (with -quote) +# +# It treats integers as numbers and everything else as a quoted +# string, noting that it does not handle strings which themselves +# contain quotes. +# +# The -zero-undef flag causes no -D to be emitted for integer values +# of 0. +# +# By default it returns the result as string of all -D... flags, +# but if passed the -list flag it will return a list of the +# individual CFLAGS. +# +proc proj-define-to-cflag {args} { + set rv {} + proj-parse-flags args flags { + -list 0 {expr 1} + -quote 0 {expr 1} + -zero-undef 0 {expr 1} + } + foreach d $args { + set v [get-define $d ""] + set li {} + if {"" eq $d} { + set v "-D${d}" + } elseif {[string is integer -strict $v]} { + if {!$flags(-zero-undef) || $v ne "0"} { + set v "-D${d}=$v" + } + } elseif {$flags(-quote)} { + set v "'-D${d}=\"$v\"'" + } else { + set v "'-D${d}=$v'" + } + lappend rv $v + } + expr {$flags(-list) ? $rv : [join $rv]} +} + + +if {0} { + # Turns out that autosetup's [options-add] essentially does exactly + # this... + + # A list of lists of Autosetup [options]-format --flags definitions. + # Append to this using [proj-options-add] and use + # [proj-options-combine] to merge them into a single list for passing + # to [options]. + # + set ::proj__Config(extra-options) {} + + # @proj-options-add list + # + # Adds a list of options to the pending --flag processing. It must be + # in the format used by Autosetup's [options] function. + # + # This will have no useful effect if called from after [options] + # is called. + # + # Use [proj-options-combine] to get a combined list of all added + # options. + # + # PS: when writing this i wasn't aware of autosetup's [options-add], + # works quite similarly. Only the timing is different. + proc proj-options-add {list} { + lappend ::proj__Config(extra-options) $list + } + + # @proj-options-combine list1 ?...listN? + # + # Expects each argument to be a list of options compatible with + # autosetup's [options] function. This function concatenates the + # contents of each list into a new top-level list, stripping the outer + # list part of each argument, and returning that list + # + # If passed no arguments, it uses the list generated by calls to + # [proj-options-add]. + proc proj-options-combine {args} { + set rv [list] + if {0 == [llength $args]} { + set args $::proj__Config(extra-options) + } + foreach e $args { + lappend rv {*}$e + } + return $rv + } +}; # proj-options-* + +# Internal cache for use via proj-cache-*. +array set proj__Cache {} + +# +# @proj-cache-key arg {addLevel 0} +# +# Helper to generate cache keys for [proj-cache-*]. +# +# $addLevel should almost always be 0. +# +# Returns a cache key for the given argument: +# +# integer: relative call stack levels to get the scope name of for +# use as a key. [proj-scope [expr {1 + $arg + addLevel}]] is +# then used to generate the key. i.e. the default of 0 uses the +# calling scope's name as the key. +# +# Anything else: returned as-is +# +proc proj-cache-key {arg {addLevel 0}} { + if {[string is integer -strict $arg]} { + return [proj-scope [expr {$arg + $addLevel + 1}]] + } + return $arg +} + +# +# @proj-cache-set ?-key KEY? ?-level 0? value +# +# Sets a feature-check cache entry with the given key. +# +# See proj-cache-key for -key's and -level's semantics, noting that +# this function adds one to -level for purposes of that call. +proc proj-cache-set {args} { + proj-parse-flags args flags { + -key => 0 + -level => 0 + } + lassign $args val + set key [proj-cache-key $flags(-key) [expr {1 + $flags(-level)}]] + #puts "** fcheck set $key = $val" + set ::proj__Cache($key) $val +} + +# +# @proj-cache-remove ?key? ?addLevel? +# +# Removes an entry from the proj-cache. +proc proj-cache-remove {{key 0} {addLevel 0}} { + set key [proj-cache-key $key [expr {1 + $addLevel}]] + set rv "" + if {[info exists ::proj__Cache($key)]} { + set rv $::proj__Cache($key) + unset ::proj__Cache($key) + } + return $rv; +} + +# +# @proj-cache-check ?-key KEY? ?-level LEVEL? tgtVarName +# +# Checks for a feature-check cache entry with the given key. +# +# If the feature-check cache has a matching entry then this function +# assigns its value to tgtVar and returns 1, else it assigns tgtVar to +# "" and returns 0. +# +# See proj-cache-key for $key's and $addLevel's semantics, noting that +# this function adds one to $addLevel for purposes of that call. +proc proj-cache-check {args} { + proj-parse-flags args flags { + -key => 0 + -level => 0 + } + lassign $args tgtVar + upvar $tgtVar tgt + set rc 0 + set key [proj-cache-key $flags(-key) [expr {1 + $flags(-level)}]] + #puts "** fcheck get key=$key" + if {[info exists ::proj__Cache($key)]} { + set tgt $::proj__Cache($key) + incr rc + } else { + set tgt "" + } + return $rc +} + +# +# @proj-coalesce ...args +# +# Returns the first argument which is not empty (eq ""), or an empty +# string on no match. +proc proj-coalesce {args} { + foreach arg $args { + if {"" ne $arg} { + return $arg + } + } + return "" +} + +# +# @proj-parse-flags argvListName targetArrayName {prototype} +# +# A helper to parse flags from proc argument lists. +# +# The first argument is the name of a var holding the args to +# parse. It will be overwritten, possibly with a smaller list. +# +# The second argument is the name of an array variable to create in +# the caller's scope. +# +# The third argument, $prototype, is a description of how to handle +# the flags. Each entry in that list must be in one of the +# following forms: +# +# -flag defaultValue ?-literal|-call|-apply? +# script|number|incr|proc-name|{apply $aLambda} +# +# -flag* ...as above... +# +# -flag => defaultValue ?-call proc-name-and-args|-apply lambdaExpr? +# +# -flag* => ...as above... +# +# :PRAGMA +# +# The first two forms represents a basic flag with no associated +# following argument. The third and fourth forms, called arg-consuming +# flags, extract the value from the following argument in $argvName +# (pneumonic: => points to the next argument.). The :PRAGMA form +# offers a way to configure certain aspects of this call. +# +# If $argv contains any given flag from $prototype, its default value +# is overridden depending on several factors: +# +# - If the -literal flag is used, or the flag's script is a number, +# value is used verbatim. +# +# - Else if the -call flag is used, the argument must be a proc name +# and any leading arguments, e.g. {apply $myLambda}. The proc is passed +# the (flag, value) as arguments (non-consuming flags will get +# passed the flag's current/starting value and consuming flags will +# get the next argument). Its result becomes the result of the +# flag. +# +# - Else if -apply X is used, it's effectively shorthand for -call +# {apply X}. Its argument may either be a $lambaRef or a {{f v} +# {body}} construct. +# +# - Else if $script is one of the following values, it is treated as +# the result of... +# +# - incr: increments the current value of the flag. +# +# - Else $script is eval'd to get its result value. That result +# becomes the new flag value for $tgtArrayName(-flag). This +# function intercepts [return $val] from eval'ing $script. Any +# empty script will result in the flag having "" assigned to it. +# +# Unless the -flag has a trailing asterisk, e.g. -flag*, this function +# assumes that each flag is unique, and using a flag more than once +# causes an error to be triggered. the -flag* forms works similarly +# except that may appear in $argv any number of times: +# +# - For non-arg-consuming flags, each invocation of -flag causes the +# result of $script to overwrite the previous value. e.g. so +# {-flag* {x} {incr foo}} has a default value of x, but passing in +# -flag twice would change it to the result of incrementing foo +# twice. This form can be used to implement, e.g., increasing +# verbosity levels by passing -verbose multiple times. +# +# - For arg-consuming flags, the given flag starts with value X, but +# if the flag is provided in $argv, the default is cleared, then +# each instance of -flag causes its value to be appended to the +# result, so {-flag* => {a b c}} defaults to {a b c}, but passing +# in -flag y -flag z would change it to {y z}, not {a b c y z}.. +# +# By default, the args list is only inspected until the first argument +# which is not described by $prototype. i.e. the first "non-flag" (not +# counting values consumed for flags defined like -flag => default). +# The :all-flags pragma (see below) can modify this behavior. +# +# If a "--" flag is encountered, no more arguments are inspected as +# flags unless the :all-flags pragma (see below) is in effect. The +# first instance of "--" is removed from the target result list but +# all remaining instances of "--" are are passed through. +# +# Any argvName entries not described in $prototype are considered to +# be "non-flags" for purposes of this function, even if they +# ostensibly look like flags. +# +# Returns the number of flags it processed in $argvName, not counting +# "--". +# +# Example: +# +## set args [list -foo -bar {blah} -z 8 9 10 -theEnd] +## proj-parse-flags args flags { +## -foo 0 {expr 1} +## -bar => 0 +## -no-baz 1 {return 0} +## -z 0 2 +## } +# +# After that $flags would contain {-foo 1 -bar {blah} -no-baz 1 -z 2} +# and $args would be {8 9 10 -theEnd}. +# +# Pragmas: +# +# Passing :PRAGMAS to this function may modify how it works. The +# following pragmas are supported (note the leading ":"): +# +# :all-flags indicates that the whole input list should be scanned, +# not stopping at the first non-flag or "--". +# +proc proj-parse-flags {argvName tgtArrayName prototype} { + upvar $argvName argv + upvar $tgtArrayName outFlags + array set flags {}; # staging area + array set blob {}; # holds markers for various per-key state and options + set incrSkip 1; # 1 if we stop at the first non-flag, else 0 + # Parse $prototype for flag definitions... + set n [llength $prototype] + set checkProtoFlag { + #puts "**** checkProtoFlag #$i of $n k=$k fv=$fv" + switch -exact -- $fv { + -literal { + proj-assert {![info exists blob(${k}.consumes)]} + set blob(${k}.script) [list expr [lindex $prototype [incr i]]] + } + -apply { + set fv [lindex $prototype [incr i]] + if {2 == [llength $fv]} { + # Treat this as a lambda literal + set fv [list $fv] + } + lappend blob(${k}.call) "apply $fv" + } + -call { + # arg is either a proc name or {apply $aLambda} + set fv [lindex $prototype [incr i]] + lappend blob(${k}.call) $fv + } + default { + proj-assert {![info exists blob(${k}.consumes)]} + set blob(${k}.script) $fv + } + } + if {$i >= $n} { + proj-error -up "[proj-scope]: Missing argument for $k flag" + } + } + for {set i 0} {$i < $n} {incr i} { + set k [lindex $prototype $i] + #puts "**** #$i of $n k=$k" + + # Check for :PRAGMA... + switch -exact -- $k { + :all-flags { + set incrSkip 0 + continue + } + } + + proj-assert {[string match -* $k]} \ + "Invalid argument: $k" + + if {[string match {*\*} $k]} { + # Re-map -foo* to -foo and flag -foo as a repeatable flag + set k [string map {* ""} $k] + incr blob(${k}.multi) + } + + if {[info exists flags($k)]} { + proj-error -up "[proj-scope]: Duplicated prototype for flag $k" + } + + switch -exact -- [lindex $prototype [expr {$i + 1}]] { + => { + # -flag => DFLT ?-subflag arg? + incr i 2 + if {$i >= $n} { + proj-error -up "[proj-scope]: Missing argument for $k => flag" + } + incr blob(${k}.consumes) + set vi [lindex $prototype $i] + if {$vi in {-apply -call}} { + proj-error -up "[proj-scope]: Missing default value for $k flag" + } else { + set fv [lindex $prototype [expr {$i + 1}]] + if {$fv in {-apply -call}} { + incr i + eval $checkProtoFlag + } + } + } + default { + # -flag VALUE ?flag? SCRIPT + set vi [lindex $prototype [incr i]] + set fv [lindex $prototype [incr i]] + eval $checkProtoFlag + } + } + #puts "**** #$i of $n k=$k vi=$vi" + set flags($k) $vi + } + #puts "-- flags"; parray flags + #puts "-- blob"; parray blob + set rc 0 + set rv {}; # staging area for the target argv value + set skipMode 0 + set n [llength $argv] + # Now look for those flags in $argv... + for {set i 0} {$i < $n} {incr i} { + set arg [lindex $argv $i] + #puts "-- [proj-scope] arg=$arg" + if {$skipMode} { + lappend rv $arg + } elseif {"--" eq $arg} { + # "--" is the conventional way to end processing of args + if {[incr blob(--)] > 1} { + # Elide only the first one + lappend rv $arg + } + incr skipMode $incrSkip + } elseif {[info exists flags($arg)]} { + # A known flag... + set isMulti [info exists blob(${arg}.multi)] + incr blob(${arg}.seen) + if {1 < $blob(${arg}.seen) && !$isMulti} { + proj-error -up [proj-scope] "$arg flag was used multiple times" + } + set vMode 0; # 0=as-is, 1=eval, 2=call + set isConsuming [info exists blob(${arg}.consumes)] + if {$isConsuming} { + incr i + if {$i >= $n} { + proj-error -up [proj-scope] "is missing argument for $arg flag" + } + set vv [lindex $argv $i] + } elseif {[info exists blob(${arg}.script)]} { + set vMode 1 + set vv $blob(${arg}.script) + } else { + set vv $flags($arg) + } + + if {[info exists blob(${arg}.call)]} { + set vMode 2 + set vv [concat {*}$blob(${arg}.call) $arg $vv] + } elseif {$isConsuming} { + proj-assert {!$vMode} + # fall through + } elseif {"" eq $vv || [string is double -strict $vv]} { + set vMode 0 + } elseif {$vv in {incr}} { + set vMode 0 + switch -exact $vv { + incr { + set xx $flags($k); incr xx; set vv $xx; unset xx + } + default { + proj-error "Unhandled \$vv value $vv" + } + } + } else { + set vv [list eval $vv] + set vMode 1 + } + if {$vMode} { + set code [catch [list uplevel 1 $vv] vv xopt] + if {$code ni {0 2}} { + return {*}$xopt $vv + } + } + if {$isConsuming && $isMulti} { + if {1 == $blob(${arg}.seen)} { + # On the first hit, overwrite the default with a new list. + set flags($arg) [list $vv] + } else { + # On subsequent hits, append to the list. + lappend flags($arg) $vv + } + } else { + set flags($arg) $vv + } + incr rc + } else { + # Non-flag + incr skipMode $incrSkip + lappend rv $arg + } + } + set argv $rv + array set outFlags [array get flags] + #puts "-- rv=$rv argv=$argv flags="; parray flags + return $rc +}; # proj-parse-flags + +# +# Older (deprecated) name of proj-parse-flags. +# +proc proj-parse-simple-flags {args} { + tailcall proj-parse-flags {*}$args +} + +if {$::proj__Config(self-tests)} { + set __ova $::proj__Config(verbose-assert); + set ::proj__Config(verbose-assert) 1 + puts "Running [info script] self-tests..." + # proj-cache... + apply {{} { + #proj-warn "Test code for proj-cache" + proj-assert {![proj-cache-check -key here check]} + proj-assert {"here" eq [proj-cache-key here]} + proj-assert {"" eq $check} + proj-cache-set -key here thevalue + proj-assert {[proj-cache-check -key here check]} + proj-assert {"thevalue" eq $check} + + proj-assert {![proj-cache-check check]} + #puts "*** key = ([proj-cache-key 0])" + proj-assert {"" eq $check} + proj-cache-set abc + proj-assert {[proj-cache-check check]} + proj-assert {"abc" eq $check} + + #parray ::proj__Cache; + proj-assert {"" ne [proj-cache-remove]} + proj-assert {![proj-cache-check check]} + proj-assert {"" eq [proj-cache-remove]} + proj-assert {"" eq $check} + }} + + # proj-parse-flags ... + apply {{} { + set foo 3 + set argv {-a "hi - world" -b -b -b -- -a {bye bye} -- -d -D c -a "" --} + proj-parse-flags argv flags { + :all-flags + -a* => "gets overwritten" + -b* 7 {incr foo} + -d 1 0 + -D 0 1 + } + + #puts "-- argv = $argv"; parray flags; + proj-assert {"-- c --" eq $argv} + proj-assert {$flags(-a) eq "{hi - world} {bye bye} {}"} + proj-assert {$foo == 6} + proj-assert {$flags(-b) eq $foo} + proj-assert {$flags(-d) == 0} + proj-assert {$flags(-D) == 1} + set foo 0 + foreach x $flags(-a) { + proj-assert {$x in {{hi - world} {bye bye} {}}} + incr foo + } + proj-assert {3 == $foo} + + set argv {-a {hi world} -b -maybe -- -a {bye bye} -- -b c --} + set foo 0 + proj-parse-flags argv flags { + -a => "aaa" + -b 0 {incr foo} + -maybe no -literal yes + } + #parray flags; puts "--- argv = $argv" + proj-assert {"-a {bye bye} -- -b c --" eq $argv} + proj-assert {$flags(-a) eq "hi world"} + proj-assert {1 == $flags(-b)} + proj-assert {"yes" eq $flags(-maybe)} + + set argv {-f -g -a aaa -M -M -M -L -H -A AAA a b c} + set foo 0 + set myLambda {{flag val} { + proj-assert {$flag in {-f -g -M}} + #puts "myLambda flag=$flag val=$val" + incr val + }} + proc myNonLambda {flag val} { + proj-assert {$flag in {-A -a}} + #puts "myNonLambda flag=$flag val=$val" + concat $val $val + } + proj-parse-flags argv flags { + -f 0 -call {apply $myLambda} + -g 2 -apply $myLambda + -h 3 -apply $myLambda + -H 30 33 + -a => aAAAa -apply {{f v} { + set v + }} + -A => AaaaA -call myNonLambda + -B => 17 -call myNonLambda + -M* 0 -apply $myLambda + -L "" -literal $myLambda + } + rename myNonLambda "" + #puts "--- argv = $argv"; parray flags + proj-assert {$flags(-f) == 1} + proj-assert {$flags(-g) == 3} + proj-assert {$flags(-h) == 3} + proj-assert {$flags(-H) == 33} + proj-assert {$flags(-a) == {aaa}} + proj-assert {$flags(-A) eq "AAA AAA"} + proj-assert {$flags(-B) == 17} + proj-assert {$flags(-M) == 3} + proj-assert {$flags(-L) eq $myLambda} + + set argv {-touch -validate} + proj-parse-flags argv flags { + -touch "" {return "-touch"} + -validate 0 1 + } + #puts "----- argv = $argv"; parray flags + proj-assert {$flags(-touch) eq "-touch"} + proj-assert {$flags(-validate) == 1} + proj-assert {$argv eq {}} + + set argv {-i -i -i} + proj-parse-flags argv flags { + -i* 0 incr + } + proj-assert {3 == $flags(-i)} + }} + set ::proj__Config(verbose-assert) $__ova + unset __ova + puts "Done running [info script] self-tests." +}; # proj- API self-tests diff --git a/autosetup/sqlite-config.tcl b/autosetup/sqlite-config.tcl new file mode 100644 index 0000000000..7c798b31a2 --- /dev/null +++ b/autosetup/sqlite-config.tcl @@ -0,0 +1,2237 @@ +# This file holds functions for autosetup which are specific to the +# sqlite build tree. They are in this file, instead of auto.def, so +# that they can be reused in the autoconf sub-tree. This file requires +# functions from the project-agnostic proj.tcl. + +if {[string first " " $autosetup(srcdir)] != -1} { + user-error "The pathname of the source tree\ + may not contain space characters" +} +if {[string first " " $autosetup(builddir)] != -1} { + user-error "The pathname of the build directory\ + may not contain space characters" +} + +use proj +# +# We want the package version info to be emitted early on, but doing +# so requires a bit of juggling. We have to [use system] for +# --prefix=... to work and to emit the Host/Build system info, but we +# don't want those to interfere with --help output. +define PACKAGE_VERSION [proj-file-content -trim $::autosetup(srcdir)/VERSION] +if {"--help" ni $::argv} { + msg-result "Configuring SQLite version [get-define PACKAGE_VERSION]" +} +use system ; # Will output "Host System" and "Build System" lines +if {"--help" ni $::argv} { + proj-tweak-default-env-dirs + msg-result "Source dir = $::autosetup(srcdir)" + msg-result "Build dir = $::autosetup(builddir)" + use cc cc-db cc-shared cc-lib pkg-config +} + +# +# Object for communicating certain config-time state across various +# auto.def-related pieces. +array set sqliteConfig [subst [proj-strip-hash-comments { + # + # Gets set by [sqlite-configure] (the main configure script driver). + build-mode unknown + # + # Gets set to 1 when using jimsh for code generation. May affect + # later decisions. + use-jim-for-codegen 0 + # + # Set to 1 when cross-compiling This value may be changed by certain + # build options, so it's important that config code which checks for + # cross-compilation uses this var instead of + # [proj-is-cross-compiling]. + is-cross-compiling [proj-is-cross-compiling] + # + # Pass msg-debug=1 to configure to enable obnoxiously loud output + # from [msg-debug]. + msg-debug-enabled 0 + # + # Output file for --dump-defines. Intended only for build debugging + # and not part of the public build interface. + dump-defines-txt ./config.defines.txt + # + # If not empty then --dump-defines will dump not only + # (dump-defines-txt) but also a JSON file named after this option's + # value. + dump-defines-json "" + + # + # The list of feature --flags which the --all flag implies. This + # requires special handling in a few places. + # + all-flag-enables {fts4 fts5 rtree geopoly session dbpage dbstat carray} + + # + # Default value for the --all flag. Can hypothetically be modified + # by non-canonical builds (it was added for a Tcl extension build + # mode which was eventually removed). + # + all-flag-default 0 +}]] + +######################################################################## +# Processes all configure --flags for this build, run build-specific +# config checks, then finalize the configure process. $buildMode must +# be one of (canonical, autoconf), and others may be added in the +# future. After bootstrapping, $configScript is eval'd in the caller's +# scope, then post-configuration finalization is run. $configScript is +# intended to hold configure code which is specific to the given +# $buildMode, with the caveat that _some_ build-specific code is +# encapsulated in the configuration finalization step. +# +# The intent is that all (or almost all) build-mode-specific +# configuration goes inside the $configScript argument to this +# function, and that an auto.def file contains only two commands: +# +# use sqlite-config +# sqlite-configure BUILD_NAME { build-specific configure script } +# +# There are snippets of build-mode-specific decision-making in +# [sqlite-configure-finalize], which gets run after $configScript. +proc sqlite-configure {buildMode configScript} { + proj-assert {$::sqliteConfig(build-mode) eq "unknown"} \ + "sqlite-configure must not be called more than once" + set allBuildModes {canonical autoconf} + if {$buildMode ni $allBuildModes} { + user-error "Invalid build mode: $buildMode. Expecting one of: $allBuildModes" + } + if {$::sqliteConfig(all-flag-default)} { + set allFlagHelp "Disable these extensions: $::sqliteConfig(all-flag-enables)" + } else { + set allFlagHelp "Enable these extensions: $::sqliteConfig(all-flag-enables)" + } + + set ::sqliteConfig(build-mode) $buildMode + ######################################################################## + # A gentle introduction to flags handling in autosetup + # + # Reference: https://msteveb.github.io/autosetup/developer/ + # + # All configure flags must be described in one or more calls to + # autosetup's [options] and [options-add] functions. The general + # syntax of the single argument to those functions is a list contain + # a mapping of flags to help text: + # + # FLAG => {Help text} + # + # Where FLAG can have any of the following formats: + # + # boolopt => "a boolean option which defaults to disabled" + # boolopt2=1 => "a boolean option which defaults to enabled" + # stringopt: => "an option which takes an argument, e.g. --stringopt=value" + # stringopt:DESCR => As for stringopt: with a description for the value + # stringopt2:=value => "an option where the argument is optional and defaults to 'value'" + # optalias booltopt3 => "a boolean with a hidden alias. --optalias is not shown in --help" + # + # Autosetup does no small amount of specialized handling for flags, + # especially booleans. Each bool-type --FLAG implicitly gets + # --enable-FLAG and --disable-FLAG forms. That can lead lead to some + # confusion when writing help text. For example: + # + # options { json=1 {Disable JSON functions} } + # + # The reason the help text says "disable" is because a boolean option + # which defaults to true is, in the --help text, rendered as: + # + # --disable-json Disable JSON functions + # + # Whereas a bool flag which defaults to false will instead render as: + # + # --enable-FLAG + # + # Non-boolean flags, in contrast, use the names specifically given to + # them in the [options] invocation. e.g. "with-tcl" is the --with-tcl + # flag. + # + # Fetching values for flags: + # + # booleans: use one of: + # - [opt-bool FLAG] is autosetup's built-in command for this, but we + # have some convenience variants: + # - [proj-opt-truthy FLAG] + # - [proj-opt-if-truthy FLAG {THEN} {ELSE}] + # + # Non-boolean (i.e. string) flags: + # - [opt-val FLAG ?default?] + # - [opt-str ...] - see the docs in ./autosetup/autosetup + # + # [proj-opt-was-provided] can be used to determine whether a flag was + # explicitly provided, which is often useful for distinguishing from + # the case of a default value. + ######################################################################## + set allFlags { + # Structure: a list of M {Z} pairs, where M is a descriptive + # option group name and Z is a list of X Y pairs. X is a list of + # $buildMode name(s) to which the Y flags apply, or {*} to apply + # to all builds. Y is a {block} in the form expected by + # autosetup's [options] and [options-add] command. Each block + # which is applicable to $buildMode is passed on to + # [options-add]. The order of each Y and sub-Y is retained, which + # is significant for rendering of --help. + # + # Maintenance note: [options] does not support comments in + # options, but we filter this object through + # [proj-strip-hash-comments] to remove them before passing them on + # to [options]. + + # When writing {help text blocks}, be aware that: + # + # A) autosetup formats them differently if the {block} starts with + # a newline: it starts left-aligned, directly under the --flag, and + # the rest of the block is pasted verbatim rather than + # pretty-printed. + # + # B) Vars and commands are NOT expanded, but we use a [subst] call + # below which will replace (only) $var refs. + + # Options for how to build the library + build-modes { + {canonical autoconf} { + shared=1 => {Disable build of shared library} + static=1 => {Disable build of static library} + } + {canonical} { + amalgamation=1 => {Disable the amalgamation and instead build all files separately} + } + } + + # Library-level features and defaults + lib-features { + {*} { + threadsafe=1 => {Disable mutexing} + with-tempstore:=no => {Use an in-RAM database for temporary tables: never,no,yes,always} + load-extension=1 => {Disable loading of external extensions} + # ^^^ one of the downstream custom builds overrides the load-extension default to 0, which + # confuses the --help text generator. https://github.com/msteveb/autosetup/issues/77 + math=1 => {Disable math functions} + json=1 => {Disable JSON functions} + memsys5 => {Enable MEMSYS5} + memsys3 => {Enable MEMSYS3} + fts3 => {Enable the FTS3 extension} + fts4 => {Enable the FTS4 extension} + fts5 => {Enable the FTS5 extension} + update-limit => {Enable the UPDATE/DELETE LIMIT clause} + geopoly => {Enable the GEOPOLY extension} + rtree => {Enable the RTREE extension} + session => {Enable the SESSION extension} + dbpage => {Enable the sqlite3_dbpage extension} + dbstat => {Enable the sqlite3_dbstat extension} + carray=1 => {Disable the CARRAY extension} + all=$::sqliteConfig(all-flag-default) => {$allFlagHelp} + largefile=1 + => {This legacy flag has no effect on the library but may influence + the generated sqlite_cfg.h by adding #define HAVE_LFS} + } + {canonical} { + column-metadata => {Enable the column metadata APIs} + # ^^^ Affects how sqlite3.c is generated, so is not available in + # the autoconf build. + } + } + + # Options for TCL support + tcl { + {canonical} { + tcl=1 + => {Disable components which require TCL, including all tests. + This tree requires TCL for code generation but can use the in-tree + copy of autosetup/jimsh0.c for that. The SQLite TCL extension and the + test code require a canonical tclsh.} + with-tcl:DIR + => {Directory containing tclConfig.sh or a directory one level up from + that, from which we can derive a directory containing tclConfig.sh. + A dir name of "prefix" is equivalent to the directory specified by + the --prefix flag.} + with-tclsh:PATH + => {Full pathname of tclsh to use. It is used for (A) trying to find + tclConfig.sh and (B) all TCL-based code generation. Use --with-tcl + unless you have a specific need for this flag. Warning: if its + containing dir has multiple tclsh versions, it may select the + wrong tclConfig.sh!} + static-tclsqlite3=0 + => {Statically-link tclsqlite3. This only works if TCL support is + enabled and all requisite libraries are available in + static form. Note that glibc is unable to fully statically + link certain libraries required by tclsqlite3, so this won't + work on most Linux environments.} + } + } + + # Options for line-editing modes for the CLI shell + line-editing { + {canonical autoconf} { + readline=1 + => {Disable readline support} + # --with-readline-lib is a backwards-compatible alias for + # --with-readline-ldflags + with-readline-lib: + with-readline-ldflags:=auto + => {Readline LDFLAGS, e.g. -lreadline -lncurses} + # --with-readline-inc is a backwards-compatible alias for + # --with-readline-cflags. + with-readline-inc: + with-readline-cflags:=auto + => {Readline CFLAGS, e.g. -I/path/to/includes} + with-readline-header:PATH + => {Full path to readline.h, from which --with-readline-cflags will be derived} + with-linenoise:DIR + => {Source directory for linenoise.c and linenoise.h} + editline=0 + => {Enable BSD editline support} + } + } + + # Options for ICU: International Components for Unicode + icu { + {*} { + with-icu-ldflags:LDFLAGS + => {Enable SQLITE_ENABLE_ICU and add the given linker flags for the + ICU libraries. e.g. on Ubuntu systems, try '-licui18n -licuuc -licudata'.} + with-icu-cflags:CFLAGS + => {Apply extra CFLAGS/CPPFLAGS necessary for building with ICU. + e.g. -I/usr/local/include} + with-icu-config:=auto + => {Enable SQLITE_ENABLE_ICU. Value must be one of: auto, pkg-config, + /path/to/icu-config} + icu-collations=0 + => {Enable SQLITE_ENABLE_ICU_COLLATIONS. Requires --with-icu-ldflags=... + or --with-icu-config} + } + } + + # Options for exotic/alternative build modes + alternative-builds { + {canonical autoconf} { + with-wasi-sdk:=/opt/wasi-sdk + => {Top-most dir of the wasi-sdk for a WASI build} + } + + {*} { + # Note that --static-cli-shell has a completely different + # meaning from --static-shell in the autoconf build! + # --[disable-]static-shell is a legacy flag which we can't + # remove without breaking downstream builds. + static-cli-shell=0 + => {Statically-link the sqlite3 CLI shell. + This only works if the requisite libraries are all available in + static form.} + } + + {canonical} { + static-shells=0 + => {Shorthand for --static-cli-shell --static-tclsqlite3} + + with-emsdk:=auto + => {Top-most dir of the Emscripten SDK installation. + Needed only by ext/wasm. Default=EMSDK env var.} + + amalgamation-extra-src:FILES + => {Space-separated list of source files to append as-is to the resulting + sqlite3.c amalgamation file. May be provided multiple times.} + } + } + + # Options primarily for downstream packagers/package maintainers + packaging { + {autoconf} { + # --disable-static-shell: https://sqlite.org/forum/forumpost/cc219ee704 + # Note that this has a different meaning from --static-cli-shell in the + # canonical build! + static-shell=1 + => {Link the sqlite3 shell app against the DLL instead of embedding sqlite3.c} + } + {canonical autoconf} { + rpath=1 => {Disable use of the rpath linker flag} + # soname: https://sqlite.org/src/forumpost/5a3b44f510df8ded + soname:=legacy + => {SONAME for libsqlite3.so. "none", or not using this flag, sets no + soname. "legacy" sets it to its historical value of + libsqlite3.so.0. A value matching the glob "libsqlite3.*" sets + it to that literal value. Any other value is assumed to be a + suffix which gets applied to "libsqlite3.so.", + e.g. --soname=9.10 equates to "libsqlite3.so.9.10".} + # dll-basename: https://sqlite.org/forum/forumpost/828fdfe904 + dll-basename:=auto + => {Specifies the base name of the resulting DLL file. + If not provided, "libsqlite3" is usually assumed but on some platforms + a platform-dependent default is used. On some platforms this flag + gets automatically enabled if it is not provided. Use "default" to + explicitly disable platform-dependent activation on such systems.} + # out-implib: https://sqlite.org/forum/forumpost/0c7fc097b2 + out-implib:=auto + => {Enable use of --out-implib linker flag to generate an + "import library" for the DLL. The output's base name is + specified by this flag's value, with "auto" meaning to figure + out a name automatically. On some platforms this flag gets + automatically enabled if it is not provided. Use "none" to + explicitly disable this feature on such platforms.} + } + } + + # Options mostly for sqlite's own development + developer { + {*} { + # Note that using the --debug/--enable-debug flag here + # requires patching autosetup/autosetup to rename its builtin + # --debug to --autosetup-debug. See details in + # autosetup/README.md#patching. + with-debug=0 + debug=0 + => {Enable debug build flags. This option will impact performance by + as much as 4x, as it includes large numbers of assert()s in + performance-critical loops. Never use --debug for production + builds.} + scanstatus + => {Enable the SQLITE_ENABLE_STMT_SCANSTATUS feature flag} + } + {canonical} { + dev + => {Enable dev-mode build: automatically enables certain other flags} + test-status + => {Enable status of tests} + gcov=0 + => {Enable coverage testing using gcov} + linemacros + => {Enable #line macros in the amalgamation} + dynlink-tools + => {Dynamically link libsqlite3 to certain tools which normally statically embed it} + asan-fsanitize:=auto + => {Comma- or space-separated list of -fsanitize flags for use with the + fuzzcheck-asan tool. Only those which the compiler claims to support + will actually be used. May be provided multiple times.} + } + {*} { + dump-defines=0 + => {Dump autosetup defines to $::sqliteConfig(dump-defines-txt) + (for build debugging)} + } + } + }; # $allFlags + + set allFlags [proj-strip-hash-comments $allFlags] + # ^^^ lappend of [sqlite-custom-flags] introduces weirdness if + # we delay [proj-strip-hash-comments] until after that. + + ######################################################################## + # sqlite-custom.tcl is intended only for vendor-branch-specific + # customization. See autosetup/README.md#branch-customization for + # details. + if {[file exists $::autosetup(libdir)/sqlite-custom.tcl]} { + uplevel 1 {source $::autosetup(libdir)/sqlite-custom.tcl} + } + + if {[llength [info proc sqlite-custom-flags]] > 0} { + # sqlite-custom-flags is assumed to be imported via + # autosetup/sqlite-custom.tcl. + set scf [sqlite-custom-flags] + if {"" ne $scf} { + lappend allFlags sqlite-custom-flags $scf + } + } + + #lappend allFlags just-testing {{*} {soname:=duplicateEntry => {x}}} + + # Filter allFlags to create the set of [options] legal for this build + foreach {group XY} [subst -nobackslashes -nocommands $allFlags] { + foreach {X Y} $XY { + if { $buildMode in $X || "*" in $X } { + options-add $Y + } + } + } + + if {[catch {options {}} msg xopts]} { + # Workaround for + # where [options] behaves oddly on _some_ TCL builds when it's + # called from deeper than the global scope. + dict incr xopts -level + return {*}$xopts $msg + } + sqlite-configure-phase1 $buildMode + uplevel 1 $configScript + sqlite-configure-finalize +}; # sqlite-configure + +######################################################################## +# Runs "phase 1" of the configure process: after initial --flags +# handling but before sqlite-configure's $configScript argument is +# run. $buildMode must be the mode which was passed to +# [sqlite-configure]. +proc sqlite-configure-phase1 {buildMode} { + define PACKAGE_NAME sqlite + define PACKAGE_URL {https://sqlite.org} + define PACKAGE_BUGREPORT [get-define PACKAGE_URL]/forum + define PACKAGE_STRING "[get-define PACKAGE_NAME] [get-define PACKAGE_VERSION]" + proj-xfer-options-aliases { + # Carry values from hidden --flag aliases over to their canonical + # flag forms. This list must include only options which are common + # to all build modes supported by [sqlite-configure]. + with-readline-inc => with-readline-cflags + with-readline-lib => with-readline-ldflags + with-debug => debug + } + set ::sqliteConfig(msg-debug-enabled) [proj-val-truthy [get-env msg-debug 0]] + proc-debug "msg-debug is enabled" + proj-setup-autoreconfig SQLITE_AUTORECONFIG + proj-file-extensions + if {".exe" eq [get-define TARGET_EXEEXT]} { + define SQLITE_OS_UNIX 0 + define SQLITE_OS_WIN 1 + } else { + define SQLITE_OS_UNIX 1 + define SQLITE_OS_WIN 0 + } + sqlite-setup-default-cflags + define HAVE_LFS 0 + if {[opt-bool largefile]} { + # + # Insofar as we can determine HAVE_LFS has no effect on the + # library. Perhaps it did back in the early 2000's. The + # --enable/disable-largefile flag is retained because it's + # harmless, but it doesn't do anything useful. It does have + # visible side-effects, though: the generated sqlite_cfg.h may (or + # may not) define HAVE_LFS. + cc-check-lfs + } + set srcdir $::autosetup(srcdir) + proj-dot-ins-append $srcdir/Makefile.in + if {[file exists $srcdir/sqlite3.pc.in]} { + proj-dot-ins-append $srcdir/sqlite3.pc.in + } + sqlite-handle-hpux; # must be relatively early so that other config tests can work +}; # sqlite-configure-phase1 + +######################################################################## +# Performs late-stage config steps common to all supported +# $::sqliteConfig(build-mode) values. +proc sqlite-configure-finalize {} { + sqlite-handle-rpath + sqlite-handle-soname + sqlite-handle-threadsafe + sqlite-handle-tempstore + sqlite-handle-load-extension + sqlite-handle-math + sqlite-handle-icu + if {[proj-opt-exists readline]} { + sqlite-handle-line-editing + } + if {[proj-opt-exists shared]} { + proj-define-for-opt shared ENABLE_LIB_SHARED "Build shared library?" + } + if {[proj-opt-exists static]} { + if {![proj-define-for-opt static ENABLE_LIB_STATIC "Build static library?"]} { + # This notice really only applies to the canonical build... + proj-indented-notice { + NOTICE: static lib build may be implicitly re-activated by + other components, e.g. some test apps. + } + } + } + sqlite-handle-env-quirks + sqlite-handle-common-feature-flags + sqlite-finalize-feature-flags + sqlite-process-dot-in-files; # do not [define] anything after this + sqlite-dump-defines +} + +######################################################################## +# Internal config-time debugging output routine. It generates no +# output unless msg-debug=1 is passed to the configure script. +proc msg-debug {msg} { + if {$::sqliteConfig(msg-debug-enabled)} { + puts stderr [proj-bold "** DEBUG: $msg"] + } +} +######################################################################## +# A [msg-debug] proxy which prepends the name of the current proc to +# the debug message. It is not legal to call this from the global +# scope. +proc proc-debug {msg} { + msg-debug "\[[proj-scope 1]\]: $msg" +} + +define OPT_FEATURE_FLAGS {} ; # -DSQLITE_OMIT/ENABLE flags. +define OPT_SHELL {} ; # Feature-related CFLAGS for the sqlite3 CLI app +######################################################################## +# Adds $args, if not empty, to OPT_FEATURE_FLAGS. If the first arg is +# -shell then it strips that arg and passes the remaining args the +# sqlite-add-shell-opt in addition to adding them to +# OPT_FEATURE_FLAGS. This is intended only for holding +# -DSQLITE_ENABLE/OMIT/... flags, but that is not enforced here. +proc sqlite-add-feature-flag {args} { + set shell "" + if {"-shell" eq [lindex $args 0]} { + set args [lassign $args shell] + } + if {"" ne $args} { + if {"" ne $shell} { + sqlite-add-shell-opt {*}$args + } + define-append OPT_FEATURE_FLAGS {*}$args + } +} + +######################################################################## +# Appends $args, if not empty, to OPT_SHELL. +proc sqlite-add-shell-opt {args} { + if {"" ne $args} { + define-append OPT_SHELL {*}$args + } +} + +######################################################################## +# Check for log(3) in libm and die with an error if it is not +# found. $featureName should be the feature name which requires that +# function (it's used only in error messages). defines LDFLAGS_MATH to +# the required linker flags (which may be empty even if the math APIs +# are found, depending on the OS). +proc sqlite-affirm-have-math {featureName} { + if {"" eq [get-define LDFLAGS_MATH ""]} { + if {![msg-quiet proj-check-function-in-lib log m]} { + user-error "Missing math APIs for $featureName" + } + set lfl [get-define lib_log ""] + undefine lib_log + if {"" ne $lfl} { + user-notice "Forcing requirement of $lfl for $featureName" + } + define LDFLAGS_MATH $lfl + } +} + +######################################################################## +# Run checks for required binaries, like ld and ar. In the canonical +# build this must come before [sqlite-handle-wasi-sdk]. +proc sqlite-check-common-bins {} { + cc-check-tools ld ar ; # must come before [sqlite-handle-wasi-sdk] + if {"" eq [proj-bin-define install]} { + proj-warn "Cannot find install binary, so 'make install' will not work." + define BIN_INSTALL false + } +} + +######################################################################## +# Run checks for system-level includes and libs which are common to +# both the canonical build and the "autoconf" bundle. +# +# For the canonical build this must come after +# [sqlite-handle-wasi-sdk], as that function may change the +# environment in ways which affect this. +proc sqlite-check-common-system-deps {} { + # Check for needed/wanted data types + cc-with {-includes stdint.h} \ + {cc-check-types int8_t int16_t int32_t int64_t intptr_t \ + uint8_t uint16_t uint32_t uint64_t uintptr_t} + + # Check for needed/wanted functions + cc-check-functions gmtime_r isnan localtime_r localtime_s \ + usleep utime pread pread64 pwrite pwrite64 + + apply {{} { + set ldrt "" + # Collapse funcs from librt into LDFLAGS_RT. + # Some systems (ex: SunOS) require -lrt in order to use nanosleep + foreach func {fdatasync nanosleep} { + if {[proj-check-function-in-lib $func rt]} { + set ldrt [get-define lib_${func} ""] + undefine lib_${func} + if {"" ne $ldrt} { + break + } + } + } + define LDFLAGS_RT $ldrt + }} + + # Check for needed/wanted headers + cc-check-includes \ + sys/types.h sys/stat.h dlfcn.h unistd.h \ + stdlib.h malloc.h memory.h \ + string.h strings.h \ + inttypes.h + + if {[cc-check-includes zlib.h] && [proj-check-function-in-lib deflate z]} { + # TODO? port over the more sophisticated zlib search from the fossil auto.def + define HAVE_ZLIB 1 + define LDFLAGS_ZLIB -lz + sqlite-add-shell-opt -DSQLITE_HAVE_ZLIB=1 + } else { + define HAVE_ZLIB 0 + define LDFLAGS_ZLIB "" + } +} + +######################################################################## +# Move -DSQLITE_OMIT... and -DSQLITE_ENABLE... flags from CFLAGS and +# CPPFLAGS to OPT_FEATURE_FLAGS and remove them from BUILD_CFLAGS. +proc sqlite-munge-cflags {} { + # Move CFLAGS and CPPFLAGS entries matching -DSQLITE_OMIT* and + # -DSQLITE_ENABLE* to OPT_FEATURE_FLAGS. This behavior is derived + # from the legacy build and was missing the 3.48.0 release (the + # initial Autosetup port). + # https://sqlite.org/forum/forumpost/9801e54665afd728 + # + # Handling of CPPFLAGS, as well as removing ENABLE/OMIT from + # CFLAGS/CPPFLAGS, was missing in the 3.49.0 release as well. + # + # If any configure flags for features are in conflict with + # CFLAGS/CPPFLAGS-specified feature flags, all bets are off. There + # are no guarantees about which one will take precedence. + foreach flagDef {CFLAGS CPPFLAGS} { + set tmp "" + foreach cf [get-define $flagDef ""] { + switch -glob -- $cf { + -DSQLITE_OMIT* - + -DSQLITE_ENABLE* { + sqlite-add-feature-flag $cf + } + default { + lappend tmp $cf + } + } + } + define $flagDef $tmp + } + + # Strip all SQLITE_ENABLE/OMIT flags from BUILD_CFLAGS, + # for compatibility with the legacy build. + set tmp "" + foreach cf [get-define BUILD_CFLAGS ""] { + switch -glob -- $cf { + -DSQLITE_OMIT* - + -DSQLITE_ENABLE* {} + default { + lappend tmp $cf + } + } + } + define BUILD_CFLAGS $tmp +} + +######################################################################### +# Set up the default CFLAGS and BUILD_CFLAGS values. +proc sqlite-setup-default-cflags {} { + ######################################################################## + # We differentiate between two C compilers: the one used for binaries + # which are to run on the build system (in autosetup it's called + # CC_FOR_BUILD and in Makefile.in it's $(B.cc)) and the one used for + # compiling binaries for the target system (CC a.k.a. $(T.cc)). + # Normally they're the same, but they will differ when + # cross-compiling. + # + # When cross-compiling we default to not using the -g flag, based on a + # /chat discussion prompted by + # https://sqlite.org/forum/forumpost/9a67df63eda9925c + set defaultCFlags {-O2} + if {!$::sqliteConfig(is-cross-compiling)} { + lappend defaultCFlags -g + } + define CFLAGS [proj-get-env CFLAGS $defaultCFlags] + # BUILD_CFLAGS is the CFLAGS for CC_FOR_BUILD. + define BUILD_CFLAGS [proj-get-env BUILD_CFLAGS {-g}] + sqlite-munge-cflags +} + +######################################################################## +# Handle various SQLITE_ENABLE/OMIT_... feature flags. +proc sqlite-handle-common-feature-flags {} { + msg-result "Feature flags..." + if {![opt-bool all]} { + # Special handling for --disable-all + foreach flag $::sqliteConfig(all-flag-enables) { + if {![proj-opt-was-provided $flag]} { + proj-opt-set $flag 0 + } + } + } + foreach {boolFlag featureFlag ifSetEvalThis} [proj-strip-hash-comments { + all {} { + # The 'all' option must be first in this list. This impl makes + # an effort to only apply flags which the user did not already + # apply, so that combinations like (--all --disable-geopoly) + # will indeed disable geopoly. There are corner cases where + # flags which depend on each other will behave in non-intuitive + # ways: + # + # --all --disable-rtree + # + # Will NOT disable geopoly, though geopoly depends on rtree. + # The --geopoly flag, though, will automatically re-enable + # --rtree, so --disable-rtree won't actually disable anything in + # that case. + foreach k $::sqliteConfig(all-flag-enables) { + if {![proj-opt-was-provided $k]} { + proj-opt-set $k 1 + } + } + } + fts3 -DSQLITE_ENABLE_FTS3 {sqlite-affirm-have-math fts3} + fts4 -DSQLITE_ENABLE_FTS4 {sqlite-affirm-have-math fts4} + fts5 -DSQLITE_ENABLE_FTS5 {sqlite-affirm-have-math fts5} + geopoly -DSQLITE_ENABLE_GEOPOLY {proj-opt-set rtree} + rtree -DSQLITE_ENABLE_RTREE {} + session {-DSQLITE_ENABLE_SESSION -DSQLITE_ENABLE_PREUPDATE_HOOK} {} + update-limit -DSQLITE_ENABLE_UPDATE_DELETE_LIMIT {} + memsys5 -DSQLITE_ENABLE_MEMSYS5 {} + memsys3 {} { + if {[opt-bool memsys5]} { + proj-warn "not enabling memsys3 because memsys5 is enabled." + expr 0 + } else { + sqlite-add-feature-flag -DSQLITE_ENABLE_MEMSYS3 + } + } + scanstatus -DSQLITE_ENABLE_STMT_SCANSTATUS {} + column-metadata -DSQLITE_ENABLE_COLUMN_METADATA {} + dbpage -DSQLITE_ENABLE_DBPAGE_VTAB {} + dbstat -DSQLITE_ENABLE_DBSTAT_VTAB {} + carray -DSQLITE_ENABLE_CARRAY {} + }] { + if {$boolFlag ni $::autosetup(options)} { + # Skip flags which are in the canonical build but not + # the autoconf bundle. + continue + } + proj-if-opt-truthy $boolFlag { + sqlite-add-feature-flag $featureFlag + if {0 != [eval $ifSetEvalThis] && "all" ne $boolFlag} { + msg-result " + $boolFlag" + } + } { + if {"all" ne $boolFlag} { + msg-result " - $boolFlag" + } + } + } + ######################################################################## + # Invert the above loop's logic for some SQLITE_OMIT_... cases. If + # config option $boolFlag is false, [sqlite-add-feature-flag + # $featureFlag], where $featureFlag is intended to be + # -DSQLITE_OMIT_... + foreach {boolFlag featureFlag} { + json -DSQLITE_OMIT_JSON + } { + if {[proj-opt-truthy $boolFlag]} { + msg-result " + $boolFlag" + } else { + sqlite-add-feature-flag $featureFlag + msg-result " - $boolFlag" + } + } +} + +######################################################################### +# Remove duplicates from the final feature flag sets and show them to +# the user. +proc sqlite-finalize-feature-flags {} { + set oFF [get-define OPT_FEATURE_FLAGS] + if {"" ne $oFF} { + define OPT_FEATURE_FLAGS [lsort -unique $oFF] + msg-result "Library feature flags: [get-define OPT_FEATURE_FLAGS]" + } + set oFF [get-define OPT_SHELL] + if {"" ne $oFF} { + define OPT_SHELL [lsort -unique $oFF] + msg-result "Shell options: [get-define OPT_SHELL]" + } + if {"" ne [set extraSrc [get-define AMALGAMATION_EXTRA_SRC ""]]} { + proj-assert {"canonical" eq $::sqliteConfig(build-mode)} + msg-result "Appending source files to amalgamation: $extraSrc" + } + if {[lsearch [get-define TARGET_DEBUG ""] -DSQLITE_DEBUG=1] > -1} { + msg-result "Note: this is a debug build, so performance will suffer." + } +} + +######################################################################## +# Checks for the --debug flag and [define]s TARGET_DEBUG based on +# that. TARGET_DEBUG is unused in the autoconf build but that is +# arguably a bug. +proc sqlite-handle-debug {} { + msg-checking "SQLITE_DEBUG build? " + proj-if-opt-truthy debug { + define TARGET_DEBUG {-g -DSQLITE_DEBUG=1 -O0 -Wall} + sqlite-add-feature-flag -DSQLITE_ENABLE_SELECTTRACE -DSQLITE_ENABLE_WHERETRACE + proj-opt-set memsys5 + msg-result yes + } { + define TARGET_DEBUG {-DNDEBUG} + msg-result no + } +} + +######################################################################## +# "soname" for libsqlite3.so. See discussion at: +# https://sqlite.org/src/forumpost/5a3b44f510df8ded +proc sqlite-handle-soname {} { + define LDFLAGS_LIBSQLITE3_SONAME "" + if {[proj-opt-was-provided soname]} { + set soname [join [opt-val soname] ""] + } else { + # Enabling soname breaks linking for the --dynlink-tools feature, + # and this project has no direct use for soname, so default to + # none. Package maintainers, on the other hand, like to have an + # soname. + set soname none + } + switch -exact -- $soname { + none - "" { return 0 } + legacy { set soname libsqlite3.so.0 } + default { + if {[string match libsqlite3.* $soname]} { + # use it as-is + } else { + # Assume it's a suffix + set soname "libsqlite3.so.${soname}" + } + } + } + proc-debug "soname=$soname" + if {[proj-check-soname $soname]} { + define LDFLAGS_LIBSQLITE3_SONAME [get-define LDFLAGS_SONAME_PREFIX]$soname + msg-result "Setting SONAME using: [get-define LDFLAGS_LIBSQLITE3_SONAME]" + } elseif {[proj-opt-was-provided soname]} { + # --soname was explicitly requested but not available, so fail fatally + proj-fatal "This environment does not support SONAME." + } else { + # --soname was not explicitly requested but not available, so just warn + msg-result "This environment does not support SONAME." + } +} + +######################################################################## +# If --enable-threadsafe is set, this adds -DSQLITE_THREADSAFE=1 to +# OPT_FEATURE_FLAGS and sets LDFLAGS_PTHREAD to the linker flags +# needed for linking pthread (possibly an empty string). If +# --enable-threadsafe is not set, adds -DSQLITE_THREADSAFE=0 to +# OPT_FEATURE_FLAGS and sets LDFLAGS_PTHREAD to an empty string. +proc sqlite-handle-threadsafe {} { + msg-checking "Support threadsafe operation? " + define LDFLAGS_PTHREAD "" + set enable 0 + proj-if-opt-truthy threadsafe { + msg-result "Checking for libs..." + if {[proj-check-function-in-lib pthread_create pthread] + && [proj-check-function-in-lib pthread_mutexattr_init pthread]} { + set enable 1 + define LDFLAGS_PTHREAD [get-define lib_pthread_create] + undefine lib_pthread_create + undefine lib_pthread_mutexattr_init + } elseif {[proj-opt-was-provided threadsafe]} { + user-error "Missing required pthread libraries. Use --disable-threadsafe to disable this check." + } else { + msg-result "pthread support not detected" + } + # Recall that LDFLAGS_PTHREAD might be empty even if pthreads if + # found because it's in -lc on some platforms. + } { + msg-result "Disabled using --disable-threadsafe" + } + sqlite-add-feature-flag -DSQLITE_THREADSAFE=${enable} + return $enable +} + +######################################################################## +# Handles the --with-tempstore flag. +# +# The test fixture likes to set SQLITE_TEMP_STORE on its own, so do +# not set that feature flag unless it was explicitly provided to the +# configure script. +proc sqlite-handle-tempstore {} { + if {[proj-opt-was-provided with-tempstore]} { + set ts [opt-val with-tempstore no] + set tsn 1 + msg-checking "Use an in-RAM database for temporary tables? " + switch -exact -- $ts { + never { set tsn 0 } + no { set tsn 1 } + yes { set tsn 2 } + always { set tsn 3 } + default { + user-error "Invalid --with-tempstore value '$ts'. Use one of: never, no, yes, always" + } + } + msg-result $ts + sqlite-add-feature-flag -DSQLITE_TEMP_STORE=$tsn + } +} + +######################################################################## +# Check for the Emscripten SDK for building the web-based wasm +# components. The core lib and tools do not require this but ext/wasm +# does. Most of the work is done via [proj-check-emsdk], then this +# function adds the following defines: +# +# - EMCC_WRAPPER = "" or top-srcdir/tool/emcc.sh +# - BIN_WASM_OPT = "" or path to wasm-opt +# - BIN_WASM_STRIP = "" or path to wasm-strip +# +# Noting that: +# +# 1) Not finding the SDK is not fatal at this level, nor is failure to +# find one of the related binaries. +# +# 2) wasm-strip is part of the wabt package: +# +# https://github.com/WebAssembly/wabt +# +# and this project requires it for production-mode builds but not dev +# builds. +# +proc sqlite-handle-emsdk {} { + define EMCC_WRAPPER "" + define BIN_WASM_STRIP "" + define BIN_WASM_OPT "" + set srcdir $::autosetup(srcdir) + if {$srcdir ne $::autosetup(builddir)} { + # The EMSDK pieces require writing to the original source tree + # even when doing an out-of-tree build. The ext/wasm pieces do not + # support an out-of-tree build so we treat that case as if EMSDK + # were not found. + msg-result "Out-of tree build: not checking for EMSDK." + return + } + set emccSh $srcdir/tool/emcc.sh + set extWasmConfig $srcdir/ext/wasm/config.make + if {![get-define HAVE_WASI_SDK] && [proj-check-emsdk]} { + define EMCC_WRAPPER $emccSh + set emsdkHome [get-define EMSDK_HOME ""] + proj-assert {"" ne $emsdkHome} + #define EMCC_WRAPPER ""; # just for testing + proj-bin-define wasm-strip + proj-bin-define bash; # ext/wasm/GNUmakefile requires bash + if {[file-isexec $emsdkHome/upstream/bin/wasm-opt]} { + define BIN_WASM_OPT $emsdkHome/upstream/bin/wasm-opt + } else { + # Maybe there's a copy in the path? + proj-bin-define wasm-opt BIN_WASM_OPT + } + proj-dot-ins-append $emccSh.in $emccSh { + catch {exec chmod u+x $dotInsOut} + } + proj-dot-ins-append $extWasmConfig.in $extWasmConfig + } else { + define EMCC_WRAPPER "" + file delete -force -- $emccSh $extWasmConfig + } +} + +######################################################################## +# Internal helper for [sqlite-check-line-editing]. Returns a list of +# potential locations under which readline.h might be found. +# +# On some environments this function may perform extra work to help +# sqlite-check-line-editing figure out how to find libreadline and +# friends. It will communicate those results via means other than the +# result value, e.g. by modifying configure --flags. +proc sqlite-get-readline-dir-list {} { + # Historical note: the dirs list, except for the inclusion of + # $prefix and some platform-specific dirs, originates from the + # legacy configure script. + set dirs [list [get-define prefix]] + switch -glob -- [get-define host] { + *-linux-android { + # Possibly termux + lappend dirs /data/data/com.termux/files/usr + } + *-mingw32 { + lappend dirs /mingw32 /mingw + } + *-mingw64 { + lappend dirs /mingw64 /mingw + } + *-haiku { + lappend dirs /boot/system/develop/headers + if {[opt-val with-readline-ldflags] in {auto ""}} { + # If the user did not supply their own --with-readline-ldflags + # value, hijack that flag to inject options which are known to + # work on Haiku OS installations. + if {"" ne [glob -nocomplain /boot/system/lib/libreadline*]} { + proj-opt-set with-readline-ldflags {-L/boot/system/lib -lreadline} + } + } + } + } + lappend dirs /usr /usr/local /usr/local/readline /usr/contrib + set rv {} + foreach d $dirs { + if {[file isdir $d]} {lappend rv $d} + } + #proc-debug "dirs=$rv" + return $rv +} + +######################################################################## +# sqlite-check-line-editing jumps through proverbial hoops to try to +# find a working line-editing library, setting: +# +# - HAVE_READLINE to 0 or 1 +# - HAVE_LINENOISE to 0, 1, or 2 +# - HAVE_EDITLINE to 0 or 1 +# +# Only one of ^^^ those will be set to non-0. +# +# - LDFLAGS_READLINE = linker flags or empty string +# +# - CFLAGS_READLINE = compilation flags for clients or empty string. +# +# Note that LDFLAGS_READLINE and CFLAGS_READLINE may refer to +# linenoise or editline, not necessarily libreadline. In some cases +# it will set HAVE_READLINE=1 when it's really using editline, for +# reasons described in this function's comments. +# +# Returns a string describing which line-editing approach to use, or +# "none" if no option is available. +# +# Order of checks: +# +# 1) --with-linenoise trumps all others and skips all of the +# complexities involved with the remaining options. +# +# 2) --editline trumps --readline +# +# 3) --disable-readline trumps --readline +# +# 4) Default to automatic search for optional readline +# +# 5) Try to find readline or editline. If it's not found AND the +# corresponding --FEATURE flag was explicitly given then fail +# fatally, else fail non-fatally. +proc sqlite-check-line-editing {} { + msg-result "Checking for line-editing capability..." + define HAVE_READLINE 0 + define HAVE_LINENOISE 0 + define HAVE_EDITLINE 0 + define LDFLAGS_READLINE "" + define CFLAGS_READLINE "" + set failIfNotFound 0 ; # Gets set to 1 for explicit --FEATURE requests + # so that we know whether to fail fatally or not + # if the library is not found. + set libsForReadline {readline edit} ; # -l names to check for readline(). + # The libedit check changes this. + set editLibName "readline" ; # "readline" or "editline" + set editLibDef "HAVE_READLINE" ; # "HAVE_READLINE" or "HAVE_EDITLINE" + set dirLn [opt-val with-linenoise] + if {"" ne $dirLn} { + # Use linenoise from a copy of its sources (not a library)... + if {![file isdir $dirLn]} { + proj-fatal "--with-linenoise value is not a directory" + } + set lnH $dirLn/linenoise.h + if {![file exists $lnH] } { + proj-fatal "Cannot find linenoise.h in $dirLn" + } + set lnC "" + set lnCOpts {linenoise-ship.c linenoise.c} + foreach f $lnCOpts { + if {[file exists $dirLn/$f]} { + set lnC $dirLn/$f + break + } + } + if {"" eq $lnC} { + proj-fatal "Cannot find any of $lnCOpts in $dirLn" + } + set flavor "" + set lnVal [proj-which-linenoise $lnH] + switch -- $lnVal { + 1 { set flavor "antirez" } + 2 { set flavor "msteveb" } + default { + proj-fatal "Cannot determine the flavor of linenoise from $lnH" + } + } + define CFLAGS_READLINE "-I$dirLn $lnC" + define HAVE_LINENOISE $lnVal + sqlite-add-shell-opt -DHAVE_LINENOISE=$lnVal + if {$::sqliteConfig(use-jim-for-codegen) && 2 == $lnVal} { + define-append CFLAGS_JIMSH -DUSE_LINENOISE [get-define CFLAGS_READLINE] + user-notice "Adding linenoise support to jimsh." + } + return "linenoise ($flavor)" + } elseif {[opt-bool editline]} { + # libedit mimics libreadline and on some systems does not have its + # own header installed (instead, that of libreadline is used). + # + # shell.c historically expects HAVE_EDITLINE to be set for + # libedit, but it then expects to see , which + # some system's don't actually have despite having libedit. If we + # end up finding below, we will use + # -DHAVE_EDITLINE=1, else we will use -DHAVE_READLINE=1. In either + # case, we will link against libedit. + set failIfNotFound 1 + set libsForReadline {edit} + set editLibName editline + } elseif {![opt-bool readline]} { + msg-result "Readline support explicitly disabled with --disable-readline" + return "none" + } elseif {[proj-opt-was-provided readline]} { + # If an explicit --[enable-]readline was used, fail if it's not + # found, else treat the feature as optional. + set failIfNotFound 1 + } + + # Transform with-readline-header=X to with-readline-cflags=-I... + set v [opt-val with-readline-header] + proj-opt-set with-readline-header "" + if {"" ne $v} { + if {"auto" eq $v} { + proj-opt-set with-readline-cflags auto + } else { + set v [file dirname $v] + if {[string match */readline $v]} { + # Special case: if the path includes .../readline/readline.h, + # set the -I to one dir up from that because our sources + # #include or . + set v [file dirname $v] + } + proj-opt-set with-readline-cflags "-I$v" + } + } + + # Look for readline.h + set rlInc [opt-val with-readline-cflags auto] + if {"auto" eq $rlInc} { + set rlInc "" + if {$::sqliteConfig(is-cross-compiling)} { + # ^^^ this check is derived from the legacy configure script. + proj-warn "Skipping check for readline.h because we're cross-compiling." + } else { + set dirs [sqlite-get-readline-dir-list] + set subdirs [list \ + include/$editLibName \ + readline] + if {"editline" eq $editLibName} { + lappend subdirs include/readline + # ^^^ editline, on some systems, does not have its own header, + # and uses libreadline's header. + } + lappend subdirs include + set rlInc [proj-search-for-header-dir readline.h \ + -dirs $dirs -subdirs $subdirs] + #proc-debug "rlInc=$rlInc" + if {"" ne $rlInc} { + if {[string match */readline $rlInc]} { + set rlInc [file dirname $rlInc]; # CLI shell: #include + } elseif {[string match */editline $rlInc]} { + set editLibDef HAVE_EDITLINE + set rlInc [file dirname $rlInc]; # CLI shell: #include + } + set rlInc "-I${rlInc}" + } + } + } elseif {"" ne $rlInc && ![string match *-I* $rlInc]} { + proj-fatal "Argument to --with-readline-cflags is intended to be CFLAGS and contain -I..." + } + + # If readline.h was found/specified, look for lib(readline|edit)... + # + # This is not quite straightforward because both libreadline and + # libedit typically require some other library which (according to + # legacy autotools-generated tests) provides tgetent(3). On some + # systems that's built into libreadline/edit, on some (most?) its in + # lib[n]curses, and on some it's in libtermcap. + set rlLib "" + if {"" ne $rlInc} { + set rlLib [opt-val with-readline-ldflags] + #proc-debug "rlLib=$rlLib" + if {$rlLib in {auto ""}} { + set rlLib "" ; # make sure it's not "auto", as we may append to it below + set libTerm ""; # lib with tgetent(3) + if {[proj-check-function-in-lib tgetent [list $editLibName ncurses curses termcap]]} { + # ^^^ that libs list comes from the legacy configure script ^^^ + set libTerm [get-define lib_tgetent] + undefine lib_tgetent + } + if {$editLibName eq $libTerm} { + # tgetent(3) was found in the editing library + set rlLib $libTerm + } elseif {[proj-check-function-in-lib readline $libsForReadline $libTerm]} { + # tgetent(3) was found in an external lib + set rlLib [get-define lib_readline] + lappend rlLib $libTerm + undefine lib_readline + } + } + } + + # If we found a library, configure the build to use it... + if {"" ne $rlLib} { + if {"editline" eq $editLibName && "HAVE_READLINE" eq $editLibDef} { + # Alert the user that, despite outward appearances, we won't be + # linking to the GPL'd libreadline. Presumably that distinction is + # significant for those using --editline. + proj-indented-notice { + NOTE: the local libedit uses so we + will compile with -DHAVE_READLINE=1 but will link with + libedit. + } + } + set rlLib [join $rlLib] + set rlInc [join $rlInc] + define LDFLAGS_READLINE $rlLib + define CFLAGS_READLINE $rlInc + proj-assert {$editLibDef in {HAVE_READLINE HAVE_EDITLINE}} + proj-assert {$editLibName in {readline editline}} + sqlite-add-shell-opt -D${editLibDef}=1 + msg-result "Using $editLibName flags: $rlInc $rlLib" + # Check whether rl_completion_matches() has a signature we can use + # and disable that sub-feature if it doesn't. + if {![cctest -cflags "$rlInc -D${editLibDef}" -libs $rlLib -nooutput 1 \ + -source { + #include + #ifdef HAVE_EDITLINE + #include + #else + #include + #endif + static char * rcg(const char *z, int i){(void)z; (void)i; return 0;} + int main(void) { + char ** x = rl_completion_matches("one", rcg); + (void)x; + return 0; + } + }]} { + proj-warn "readline-style completion disabled due to rl_completion_matches() signature mismatch" + sqlite-add-shell-opt -DSQLITE_OMIT_READLINE_COMPLETION + } + return $editLibName + } + + if {$failIfNotFound} { + proj-fatal "Explicit --$editLibName failed to find a matching library." + } + return "none" +}; # sqlite-check-line-editing + +######################################################################## +# Runs sqlite-check-line-editing and adds a message around it. In the +# canonical build this must not be called before +# sqlite-determine-codegen-tcl for reasons now lost to history (and +# might not still be applicable). +proc sqlite-handle-line-editing {} { + msg-result "Line-editing support for the sqlite3 shell: [sqlite-check-line-editing]" +} + + +######################################################################## +# ICU - International Components for Unicode +# +# Handles these flags: +# +# --with-icu-ldflags=LDFLAGS +# --with-icu-cflags=CFLAGS +# --with-icu-config[=auto | pkg-config | /path/to/icu-config] +# --enable-icu-collations +# +# --with-icu-config values: +# +# - auto: use the first one of (pkg-config, icu-config) found on the +# system. +# - pkg-config: use only pkg-config to determine flags +# - /path/to/icu-config: use that to determine flags +# +# If --with-icu-config is used and neither pkg-config nor icu-config +# are found, fail fatally. +# +# If both --with-icu-ldflags and --with-icu-config are provided, they +# are cumulative. If neither are provided, icu-collations is not +# honored and a warning is emitted if it is provided. +# +# Design note: though we could automatically enable ICU if the +# icu-config binary or (pkg-config icu-io) are found, we specifically +# do not. ICU is always an opt-in feature. +proc sqlite-handle-icu {} { + define LDFLAGS_ICU [join [opt-val with-icu-ldflags ""]] + define CFLAGS_ICU [join [opt-val with-icu-cflags ""]] + if {[proj-opt-was-provided with-icu-config]} { + msg-result "Checking for ICU support..." + set icuConfigBin [opt-val with-icu-config] + set tryIcuConfigBin 1; # set to 0 if we end up using pkg-config + if {$icuConfigBin in {auto pkg-config}} { + if {[pkg-config-init 0] && [pkg-config icu-io]} { + # Maintenance reminder: historical docs say to use both of + # (icu-io, icu-uc). icu-uc lacks a required lib and icu-io has + # all of them on tested OSes. + set tryIcuConfigBin 0 + define LDFLAGS_ICU [get-define PKG_ICU_IO_LDFLAGS] + define-append LDFLAGS_ICU [get-define PKG_ICU_IO_LIBS] + define CFLAGS_ICU [get-define PKG_ICU_IO_CFLAGS] + } elseif {"pkg-config" eq $icuConfigBin} { + proj-fatal "pkg-config cannot find package icu-io" + } else { + proj-assert {"auto" eq $icuConfigBin} + } + } + if {$tryIcuConfigBin} { + if {"auto" eq $icuConfigBin} { + set icuConfigBin [proj-first-bin-of \ + /usr/local/bin/icu-config \ + /usr/bin/icu-config] + if {"" eq $icuConfigBin} { + proj-indented-notice -error { + --with-icu-config=auto cannot find (pkg-config icu-io) or icu-config binary. + On Ubuntu-like systems try: + --with-icu-ldflags='-licui18n -licuuc -licudata' + } + } + } + if {[file-isexec $icuConfigBin]} { + set x [exec $icuConfigBin --ldflags] + if {"" eq $x} { + proj-indented-notice -error \ + [subst { + $icuConfigBin --ldflags returned no data. + On Ubuntu-like systems try: + --with-icu-ldflags='-licui18n -licuuc -licudata' + }] + } + define-append LDFLAGS_ICU $x + set x [exec $icuConfigBin --cppflags] + define-append CFLAGS_ICU $x + } else { + proj-fatal "--with-icu-config=$icuConfigBin does not refer to an executable" + } + } + } + set ldflags [define LDFLAGS_ICU [string trim [get-define LDFLAGS_ICU]]] + set cflags [define CFLAGS_ICU [string trim [get-define CFLAGS_ICU]]] + if {"" ne $ldflags} { + sqlite-add-feature-flag -shell -DSQLITE_ENABLE_ICU + msg-result "Enabling ICU support with flags: $ldflags $cflags" + if {[opt-bool icu-collations]} { + msg-result "Enabling ICU collations." + sqlite-add-feature-flag -shell -DSQLITE_ENABLE_ICU_COLLATIONS + # Recall that shell.c builds with sqlite3.c except in the case + # of --disable-static-shell, a combination we do not + # specifically attempt to account for. + } + } elseif {[opt-bool icu-collations]} { + proj-warn "ignoring --enable-icu-collations because neither --with-icu-ldflags nor --with-icu-config provided any linker flags" + } else { + msg-result "ICU support is disabled." + } +}; # sqlite-handle-icu + + +######################################################################## +# Handles the --enable-load-extension flag. Returns 1 if the support +# is enabled, else 0. If support for that feature is not found, a +# fatal error is triggered if --enable-load-extension is explicitly +# provided, else a loud warning is instead emitted. If +# --disable-load-extension is used, no check is performed. +# +# Makes the following environment changes: +# +# - defines LDFLAGS_DLOPEN to any linker flags needed for this +# feature. It may legally be empty on (A) some systems where +# dlopen() is in libc and (B) certain Unix-esque Windows +# environments which identify as Windows for SQLite's purposes so +# use LoadLibrary(). +# +# - If the feature is not available, adds +# -DSQLITE_OMIT_LOAD_EXTENSION=1 to the feature flags list. +proc sqlite-handle-load-extension {} { + define LDFLAGS_DLOPEN "" + set found 0 + set suffix "" + proj-if-opt-truthy load-extension { + switch -glob -- [get-define host] { + *-*-mingw* - *windows* { + incr found + set suffix "Using LoadLibrary()" + } + default { + set found [proj-check-function-in-lib dlopen dl] + if {$found} { + set suffix [define LDFLAGS_DLOPEN [get-define lib_dlopen]] + undefine lib_dlopen + } else { + if {[proj-opt-was-provided load-extension]} { + # Explicit --enable-load-extension: fail if not found + proj-indented-notice -error { + --enable-load-extension was provided but dlopen() + not found. Use --disable-load-extension to bypass this + check. + } + } else { + # It was implicitly enabled: warn if not found + proj-indented-notice { + WARNING: dlopen() not found, so loadable module support will + be disabled. Use --disable-load-extension to bypass this + check. + } + } + } + } + } + } + if {$found} { + msg-result "Loadable extension support enabled. $suffix" + } else { + msg-result "Disabling loadable extension support. Use --enable-load-extension to enable them." + sqlite-add-feature-flag -DSQLITE_OMIT_LOAD_EXTENSION=1 + } + return $found +} + +######################################################################## +# Handles the --enable-math flag. +proc sqlite-handle-math {} { + proj-if-opt-truthy math { + if {![proj-check-function-in-lib ceil m]} { + user-error "Cannot find libm functions. Use --disable-math to bypass this." + } + define LDFLAGS_MATH [get-define lib_ceil] + undefine lib_ceil + sqlite-add-feature-flag -DSQLITE_ENABLE_MATH_FUNCTIONS -DSQLITE_ENABLE_PERCENTILE + msg-result "Enabling math SQL functions" + } { + define LDFLAGS_MATH "" + msg-result "Disabling math SQL functions" + } +} + +######################################################################## +# If this OS looks like a Mac, checks for the Mac-specific +# -current_version and -compatibility_version linker flags. Defines +# LDFLAGS_MAC_CVERSION to an empty string and returns 0 if they're not +# supported, else defines that to the linker flags and returns 1. +# +# We don't check this on non-Macs because this whole thing is a +# libtool compatibility kludge to account for a version stamp which +# libtool applied only on Mac platforms. +# +# Based on https://sqlite.org/forum/forumpost/9dfd5b8fd525a5d7. +proc sqlite-handle-mac-cversion {} { + define LDFLAGS_MAC_CVERSION "" + set rc 0 + if {[proj-looks-like-mac]} { + cc-with {-link 1} { + # These version numbers are historical libtool-defined values, not + # library-defined ones + if {[cc-check-flags "-Wl,-current_version,9.6.0"] + && [cc-check-flags "-Wl,-compatibility_version,9.0.0"]} { + define LDFLAGS_MAC_CVERSION "-Wl,-compatibility_version,9.0.0 -Wl,-current_version,9.6.0" + set rc 1 + } elseif {[cc-check-flags "-compatibility_version 9.0.0"] + && [cc-check-flags "-current_version 9.6.0"]} { + define LDFLAGS_MAC_CVERSION "-compatibility_version 9.0.0 -current_version 9.6.0" + set rc 1 + } + } + } + return $rc +} + +######################################################################## +# If this is a Mac platform, check for support for +# -Wl,-install_name,... and, if it's available, define +# LDFLAGS_MAC_INSTALL_NAME to a variant of that string which is +# intended to expand at make-time, else set LDFLAGS_MAC_INSTALL_NAME +# to an empty string. +# +# https://sqlite.org/forum/forumpost/5651662b8875ec0a +proc sqlite-handle-mac-install-name {} { + define LDFLAGS_MAC_INSTALL_NAME ""; # {-Wl,-install_name,"$(install-dir.lib)/$(libsqlite3.DLL)"} + set rc 0 + if {[proj-looks-like-mac]} { + cc-with {-link 1} { + if {[cc-check-flags "-Wl,-install_name,/usr/local/lib/libsqlite3.dylib"]} { + define LDFLAGS_MAC_INSTALL_NAME {-Wl,-install_name,"$(install-dir.lib)/$(libsqlite3.DLL)"} + set rc 1 + } + } + } + return $rc +} + +# +# Checks specific to HP-UX. +# +proc sqlite-handle-hpux {} { + switch -glob -- [get-define host] { + *hpux* { + if {[cc-check-flags "-Ae"]} { + define-append CFLAGS -Ae + } + } + } +} + +######################################################################## +# Handles the --dll-basename configure flag. [define]'s +# SQLITE_DLL_BASENAME to the DLL's preferred base name (minus +# extension). If --dll-basename is not provided (or programmatically +# set - see [sqlite-handle-env-quirks]) then this is always +# "libsqlite3", otherwise it may use a different value based on the +# value of [get-define host]. +proc sqlite-handle-dll-basename {} { + if {[proj-opt-was-provided dll-basename]} { + set dn [join [opt-val dll-basename] ""] + if {$dn in {none default}} { set dn libsqlite3 } + } else { + set dn libsqlite3 + } + if {$dn in {auto ""}} { + switch -glob -- [get-define host] { + *-*-cygwin { set dn cygsqlite3-0 } + *-*-ming* { set dn libsqlite3-0 } + *-*-msys { set dn msys-sqlite3-0 } + default { set dn libsqlite3 } + } + } + define SQLITE_DLL_BASENAME $dn +} + +######################################################################## +# [define]s LDFLAGS_OUT_IMPLIB to either an empty string or to a +# -Wl,... flag for the platform-specific --out-implib flag, which is +# used for building an "import library .dll.a" file on some platforms +# (e.g. msys2, mingw). SQLITE_OUT_IMPLIB is defined to the name of the +# import lib or an empty string. Returns 1 if supported, else 0. +# +# The name of the import library is [define]d in SQLITE_OUT_IMPLIB. +# +# If the configure flag --out-implib is not used (or programmatically +# set) then this simply sets the above-listed defines to empty strings +# (but see [sqlite-handle-env-quirks]). If that flag is used but the +# capability is not available, a fatal error is triggered. +# +# This feature is specifically opt-in because it's supported on far +# more platforms than actually need it and enabling it causes creation +# of libsqlite3.so.a files which are unnecessary in most environments. +# +# Added in response to: https://sqlite.org/forum/forumpost/0c7fc097b2 +# +# Platform notes: +# +# - cygwin sqlite packages historically install no .dll.a file. +# +# - msys2 and mingw sqlite packages historically install +# /usr/lib/libsqlite3.dll.a despite the DLL being in +# /usr/bin. +proc sqlite-handle-out-implib {} { + define LDFLAGS_OUT_IMPLIB "" + define SQLITE_OUT_IMPLIB "" + set rc 0 + if {[proj-opt-was-provided out-implib]} { + set olBaseName [join [opt-val out-implib] ""] + if {$olBaseName in {auto ""}} { + set olBaseName "libsqlite3" ;# [get-define SQLITE_DLL_BASENAME] + # Based on discussions with mingw/msys users, the import lib + # should always be called libsqlite3.dll.a even on platforms + # which rename libsqlite3.dll to something else. + } + if {$olBaseName ne "none"} { + cc-with {-link 1} { + set dll "${olBaseName}[get-define TARGET_DLLEXT]" + set flags [proj-cc-check-Wl-flag --out-implib ${dll}.a] + if {"" ne $flags} { + define LDFLAGS_OUT_IMPLIB $flags + define SQLITE_OUT_IMPLIB ${dll}.a + set rc 1 + } + } + if {!$rc} { + user-error "--out-implib is not supported on this platform" + } + } + } + return $rc +} + +######################################################################## +# If the given platform identifier (defaulting to [get-define host]) +# appears to be one of the Unix-on-Windows environments, returns a +# brief symbolic name for that environment, else returns an empty +# string. +# +# It does not distinguish between msys and msys2, returning msys for +# both. The build does not, as of this writing, specifically support +# msys v1. Similarly, this function returns "mingw" for both "mingw32" +# and "mingw64". +proc sqlite-env-is-unix-on-windows {{envTuple ""}} { + if {"" eq $envTuple} { + set envTuple [get-define host] + } + set name "" + switch -glob -- $envTuple { + *-*-cygwin { set name cygwin } + *-*-ming* { set name mingw } + *-*-msys { set name msys } + } + return $name +} + +######################################################################## +# Performs various tweaks to the build which are only relevant on +# certain platforms, e.g. Mac and "Unix on Windows" platforms (msys2, +# cygwin, ...). +# +# 1) DLL installation: +# +# [define]s SQLITE_DLL_INSTALL_RULES to a symbolic name suffix for a +# set of "make install" rules to use for installation of the DLL +# deliverable. The makefile is tasked with providing rules named +# install-dll-NAME which runs the installation for that set, as well +# as providing a rule named install-dll which resolves to +# install-dll-NAME (perhaps indirectly, depending on whether the DLL +# is (de)activated). +# +# The default value is "unix-generic". +# +# 2) --out-implib: +# +# On platforms where an "import library" is conventionally used but +# --out-implib was not explicitly used, automatically add that flag. +# This conventionally applies only to the "Unix on Windows" +# environments like msys and cygwin. +# +# 3) --dll-basename: +# +# On the same platforms addressed by --out-implib, if --dll-basename +# is not explicitly specified, --dll-basename=auto is implied. +proc sqlite-handle-env-quirks {} { + set instName unix-generic; # name of installation rules set + set autoDll 0; # true if --out-implib/--dll-basename should be implied + set host [get-define host] + switch -glob -- $host { + *apple* - + *darwin* { set instName darwin } + default { + set x [sqlite-env-is-unix-on-windows $host] + if {"" ne $x} { + set instName $x + set autoDll 1 + } + } + } + define SQLITE_DLL_INSTALL_RULES $instName + if {$autoDll} { + if {![proj-opt-was-provided out-implib]} { + # Imply --out-implib=auto + proj-indented-notice [subst -nocommands -nobackslashes { + NOTICE: auto-enabling --out-implib for environment [$host]. + Use --out-implib=none to disable this special case + or --out-implib=auto to squelch this notice. + }] + proj-opt-set out-implib auto + } + if {![proj-opt-was-provided dll-basename]} { + # Imply --dll-basename=auto + proj-indented-notice [subst -nocommands -nobackslashes { + NOTICE: auto-enabling --dll-basename for environment [$host]. + Use --dll-basename=default to disable this special case + or --dll-basename=auto to squelch this notice. + }] + proj-opt-set dll-basename auto + } + } + sqlite-handle-dll-basename + sqlite-handle-out-implib + sqlite-handle-mac-cversion + sqlite-handle-mac-install-name + if {[llength [info proc sqlite-custom-handle-flags]] > 0} { + # sqlite-custom-handle-flags is assumed to be imported via a + # client-specific import: autosetup/sqlite-custom.tcl. + sqlite-custom-handle-flags + } +} + +######################################################################## +# Perform some late-stage work and generate the configure-process +# output file(s). +proc sqlite-process-dot-in-files {} { + ######################################################################## + # "Re-export" the autoconf-conventional --XYZdir flags into something + # which is more easily overridable from a make invocation. See the docs + # for [proj-remap-autoconf-dir-vars] for the explanation of why. + # + # We do this late in the config process, immediately before we export + # the Makefile and other generated files, so that configure tests + # which make make use of the autotools-conventional flags + # (e.g. [proj-check-rpath]) may do so before we "mangle" them here. + proj-remap-autoconf-dir-vars + + proj-dot-ins-process -validate + make-config-header sqlite_cfg.h \ + -bare {SIZEOF_* HAVE_DECL_*} \ + -none {HAVE_CFLAG_* LDFLAGS_* SH_* SQLITE_AUTORECONFIG + TARGET_* USE_GCOV TCL_*} \ + -auto {HAVE_* PACKAGE_*} \ + -none * + proj-touch sqlite_cfg.h ; # help avoid frequent unnecessary @SQLITE_AUTORECONFIG@ +} + +######################################################################## +# Handle --with-wasi-sdk[=DIR] +# +# This must be run relatively early on because it may change the +# toolchain and disable a number of config options. However, in the +# canonical build this must come after [sqlite-check-common-bins]. +proc sqlite-handle-wasi-sdk {} { + set wasiSdkDir [opt-val with-wasi-sdk] ; # ??? [lindex [opt-val with-wasi-sdk] end] + define HAVE_WASI_SDK 0 + if {$wasiSdkDir eq ""} { + return 0 + } elseif {$::sqliteConfig(is-cross-compiling)} { + proj-fatal "Cannot combine --with-wasi-sdk with cross-compilation" + } + msg-result "Checking WASI SDK directory \[$wasiSdkDir]... " + proj-affirm-files-exist -v {*}[prefix "$wasiSdkDir/bin/" {clang wasm-ld ar}] + define HAVE_WASI_SDK 1 + define WASI_SDK_DIR $wasiSdkDir + # Disable numerous options which we know either can't work or are + # not useful in this build... + msg-result "Using wasi-sdk clang. Disabling CLI shell and modifying config flags:" + # Boolean (--enable-/--disable-) flags which must be switched off: + foreach opt { + dynlink-tools + editline + gcov + icu-collations + load-extension + readline + shared + tcl + threadsafe + } { + if {[proj-opt-exists $opt] && [opt-bool $opt]} { + # -^^^^ not all builds define all of these flags + msg-result " --disable-$opt" + proj-opt-set $opt 0 + } + } + # Non-boolean flags which need to be cleared: + foreach opt { + with-emsdk + with-icu-config + with-icu-ldflags + with-icu-cflags + with-linenoise + with-tcl + } { + if {[proj-opt-was-provided $opt]} { + msg-result " removing --$opt" + proj-opt-set $opt "" + } + } + # Remember that we now have a discrepancy between + # $::sqliteConfig(is-cross-compiling) and [proj-is-cross-compiling]. + set ::sqliteConfig(is-cross-compiling) 1 + + # + # Changing --host and --target have no effect here except to + # possibly cause confusion. Autosetup has finished processing them + # by this point. + # + # host_alias=wasm32-wasi + # target=wasm32-wasi + # + # Merely changing CC, LD, and AR to the wasi-sdk's is enough to get + # sqlite3.o building in WASM format. + # + define CC "${wasiSdkDir}/bin/clang" + define LD "${wasiSdkDir}/bin/wasm-ld" + define AR "${wasiSdkDir}/bin/ar" + #define STRIP "${wasiSdkDir}/bin/strip" + return 1 +}; # sqlite-handle-wasi-sdk + +######################################################################## +# TCL... +# +# sqlite-check-tcl performs most of the --with-tcl and --with-tclsh +# handling. Some related bits and pieces are performed before and +# after that function is called. +# +# Important [define]'d vars: +# +# - HAVE_TCL indicates whether we have a tclsh suitable for building +# the TCL SQLite extension and, by extension, the testing +# infrastructure. This must only be 1 for environments where +# tclConfig.sh can be found. +# +# - TCLSH_CMD is the path to the canonical tclsh or "". It never +# refers to jimtcl. +# +# - TCL_CONFIG_SH is the path to tclConfig.sh or "". +# +# - TCLLIBDIR is the dir to which libtclsqlite3 gets installed. +# +# - BTCLSH = the path to the tcl interpreter used for in-tree code +# generation. It may be jimtcl or the canonical tclsh but may not +# be empty - this tree requires TCL to generated numerous +# components. +# +# If --tcl or --with-tcl are provided but no TCL is found, this +# function fails fatally. If they are not explicitly provided then +# failure to find TCL is not fatal but a loud warning will be emitted. +# +proc sqlite-check-tcl {} { + define TCLSH_CMD false ; # Significant is that it exits with non-0 + define HAVE_TCL 0 ; # Will be enabled via --tcl or a successful search + define TCLLIBDIR "" ; # Installation dir for TCL extension lib + define TCL_CONFIG_SH ""; # full path to tclConfig.sh + + # Clear out all vars which would harvest from tclConfig.sh so that + # the late-config validation of @VARS@ works even if --disable-tcl + # is used. + proj-tclConfig-sh-to-autosetup "" + + file delete -force ".tclenv.sh"; # ensure no stale state from previous configures. + if {![opt-bool tcl]} { + proj-indented-notice { + NOTE: TCL is disabled via --disable-tcl. This means that none + of the TCL-based components will be built, including tests + and sqlite3_analyzer. + } + return + } + # TODO: document the steps this is taking. + set srcdir $::autosetup(srcdir) + msg-result "Checking for a suitable tcl... " + proj-assert [proj-opt-truthy tcl] + set use_tcl 1 + set with_tclsh [opt-val with-tclsh] + set with_tcl [opt-val with-tcl] + if {"prefix" eq $with_tcl} { + set with_tcl [get-define prefix] + } + proc-debug "use_tcl ${use_tcl}" + proc-debug "with_tclsh=${with_tclsh}" + proc-debug "with_tcl=$with_tcl" + if {"" eq $with_tclsh && "" eq $with_tcl} { + # If neither --with-tclsh nor --with-tcl are provided, try to find + # a workable tclsh. + set with_tclsh [proj-first-bin-of tclsh9.1 tclsh9.0 tclsh8.6 tclsh] + proc-debug "with_tclsh=${with_tclsh}" + } + + set doConfigLookup 1 ; # set to 0 to test the tclConfig.sh-not-found cases + if {"" ne $with_tclsh} { + # --with-tclsh was provided or found above. Validate it and use it + # to trump any value passed via --with-tcl=DIR. + if {![file-isexec $with_tclsh]} { + proj-fatal "TCL shell $with_tclsh is not executable" + } else { + define TCLSH_CMD $with_tclsh + #msg-result "Using tclsh: $with_tclsh" + } + if {$doConfigLookup && + [catch {exec $with_tclsh $::autosetup(libdir)/find_tclconfig.tcl} result] == 0} { + set with_tcl $result + } + if {"" ne $with_tcl && [file isdir $with_tcl]} { + msg-result "$with_tclsh recommends the tclConfig.sh from $with_tcl" + } else { + proj-warn "$with_tclsh is unable to recommend a tclConfig.sh" + set use_tcl 0 + } + } + set cfg "" + set tclSubdirs {tcl9.1 tcl9.0 tcl8.6 lib} + while {$use_tcl} { + if {"" ne $with_tcl} { + # Ensure that we can find tclConfig.sh under ${with_tcl}/... + if {$doConfigLookup} { + if {[file readable "${with_tcl}/tclConfig.sh"]} { + set cfg "${with_tcl}/tclConfig.sh" + } else { + foreach i $tclSubdirs { + if {[file readable "${with_tcl}/$i/tclConfig.sh"]} { + set cfg "${with_tcl}/$i/tclConfig.sh" + break + } + } + } + } + if {"" eq $cfg} { + proj-fatal "No tclConfig.sh found under ${with_tcl}" + } + } else { + # If we have not yet found a tclConfig.sh file, look in $libdir + # which is set automatically by autosetup or via the --prefix + # command-line option. See + # https://sqlite.org/forum/forumpost/e04e693439a22457 + set libdir [get-define libdir] + if {[file readable "${libdir}/tclConfig.sh"]} { + set cfg "${libdir}/tclConfig.sh" + } else { + foreach i $tclSubdirs { + if {[file readable "${libdir}/$i/tclConfig.sh"]} { + set cfg "${libdir}/$i/tclConfig.sh" + break + } + } + } + if {![file readable $cfg]} { + break + } + } + msg-result "Using tclConfig.sh: $cfg" + break + } + define TCL_CONFIG_SH $cfg + # Export a subset of tclConfig.sh to the current TCL-space. If $cfg + # is an empty string, this emits empty-string entries for the + # various options we're interested in. + proj-tclConfig-sh-to-autosetup $cfg + + if {"" eq $with_tclsh && $cfg ne ""} { + # We have tclConfig.sh but no tclsh. Attempt to locate a tclsh + # based on info from tclConfig.sh. + set tclExecPrefix [get-define TCL_EXEC_PREFIX] + proj-assert {"" ne $tclExecPrefix} + set tryThese [list \ + $tclExecPrefix/bin/tclsh[get-define TCL_VERSION] \ + $tclExecPrefix/bin/tclsh ] + foreach trySh $tryThese { + if {[file-isexec $trySh]} { + set with_tclsh $trySh + break + } + } + if {![file-isexec $with_tclsh]} { + proj-warn "Cannot find a usable tclsh (tried: $tryThese) + } + } + define TCLSH_CMD $with_tclsh + if {$use_tcl} { + # Set up the TCLLIBDIR + # + # 2024-10-28: calculation of TCLLIBDIR is now done via the shell + # in main.mk (search it for T.tcl.env.sh) so that + # static/hand-written makefiles which import main.mk do not have + # to define that before importing main.mk. Even so, we export + # TCLLIBDIR from here, which will cause the canonical makefile to + # use this one rather than to re-calculate it at make-time. + set tcllibdir [get-env TCLLIBDIR ""] + set sq3Ver [get-define PACKAGE_VERSION] + if {"" eq $tcllibdir} { + # Attempt to extract TCLLIBDIR from TCL's $auto_path + if {"" ne $with_tclsh && + [catch {exec echo "puts stdout \$auto_path" | "$with_tclsh"} result] == 0} { + foreach i $result { + if {[file isdir $i]} { + set tcllibdir $i/sqlite${sq3Ver} + break + } + } + } else { + proj-warn "Cannot determine TCLLIBDIR." + # The makefile will fail fatally in this case if a target is + # invoked which requires TCLLIBDIR. + } + } + #if {"" ne $tcllibdir} { msg-result "TCLLIBDIR = ${tcllibdir}"; } + define TCLLIBDIR $tcllibdir + }; # find TCLLIBDIR + + if {[file-isexec $with_tclsh]} { + msg-result "Using tclsh: $with_tclsh" + if {$cfg ne ""} { + define HAVE_TCL 1 + } else { + proj-warn "Found tclsh but no tclConfig.sh." + } + } + show-notices + # If TCL is not found: if it was explicitly requested then fail + # fatally, else just emit a warning. If we can find the APIs needed + # to generate a working JimTCL then that will suffice for build-time + # TCL purposes (see: proc sqlite-determine-codegen-tcl). + if {![get-define HAVE_TCL] && + ([proj-opt-was-provided tcl] || [proj-opt-was-provided with-tcl])} { + proj-fatal "TCL support was requested but no tclConfig.sh could be found." + } + if {"" eq $cfg} { + proj-assert {0 == [get-define HAVE_TCL]} + proj-indented-notice { + WARNING: Cannot find a usable tclConfig.sh file. Use + --with-tcl=DIR to specify a directory where tclConfig.sh can be + found. SQLite does not use TCL internally, but some optional + components require TCL, including tests and sqlite3_analyzer. + } + } +}; # sqlite-check-tcl + +######################################################################## +# sqlite-determine-codegen-tcl checks which TCL to use as a code +# generator. By default, prefer jimsh simply because we have it +# in-tree (it's part of autosetup) unless --with-tclsh=X is used, in +# which case prefer X. +# +# Returns the human-readable name of the TCL it selects. Fails fatally +# if it cannot detect a TCL appropriate for code generation. +# +# Defines: +# +# - BTCLSH = the TCL shell used for code generation. It may set this +# to an unexpanded makefile var name. +# +# - CFLAGS_JIMSH = any flags needed for buildng a BTCLSH-compatible +# jimsh. The defaults may be passed on to configure as +# CFLAGS_JIMSH=... +proc sqlite-determine-codegen-tcl {} { + msg-result "Checking for TCL to use for code generation... " + define CFLAGS_JIMSH [proj-get-env CFLAGS_JIMSH {-O1}] + set cgtcl [opt-val with-tclsh jimsh] + if {"jimsh" ne $cgtcl} { + # When --with-tclsh=X is used, use that for all TCL purposes, + # including in-tree code generation, per developer request. + define BTCLSH "\$(TCLSH_CMD)" + return $cgtcl + } + set flagsToRestore {CC CFLAGS AS_CFLAGS CPPFLAGS AS_CPPFLAGS LDFLAGS LINKFLAGS LIBS CROSS} + define-push $flagsToRestore { + # We have to swap CC to CC_FOR_BUILD for purposes of the various + # [cc-...] tests below. Recall that --with-wasi-sdk may have + # swapped out CC with one which is not appropriate for this block. + # Per consulation with autosetup's creator, doing this properly + # requires us to [define-push] the whole $flagsToRestore list + # (plus a few others which are not relevant in this tree). + # + # These will get set to their previous values at the end of this + # block. + foreach flag $flagsToRestore {define $flag ""} + define CC [get-define CC_FOR_BUILD] + # These headers are technically optional for JimTCL but necessary if + # we want to use it for code generation: + set sysh [cc-check-includes dirent.h sys/time.h] + # jimsh0.c hard-codes #define's for HAVE_DIRENT_H and + # HAVE_SYS_TIME_H on the platforms it supports, so we do not + # need to add -D... flags for those. We check for them here only + # so that we can avoid the situation that we later, at + # make-time, try to compile jimsh but it then fails due to + # missing headers (i.e. fail earlier rather than later). + if {$sysh && [cc-check-functions realpath]} { + define-append CFLAGS_JIMSH -DHAVE_REALPATH + define BTCLSH "\$(JIMSH)" + set ::sqliteConfig(use-jim-for-codegen) 1 + } elseif {$sysh && [cc-check-functions _fullpath]} { + # _fullpath() is a Windows API. It's not entirely clear + # whether we need to add {-DHAVE_SYS_TIME_H -DHAVE_DIRENT_H} + # to CFLAGS_JIMSH in this case. On MinGW32 we definitely do + # not want to because it already hard-codes them. On _MSC_VER + # builds it does not. + define-append CFLAGS_JIMSH -DHAVE__FULLPATH + define BTCLSH "\$(JIMSH)" + set ::sqliteConfig(use-jim-for-codegen) 1 + } elseif {[file-isexec [get-define TCLSH_CMD]]} { + set cgtcl [get-define TCLSH_CMD] + define BTCLSH "\$(TCLSH_CMD)" + } else { + # One last-ditch effort to find TCLSH_CMD: use info from + # tclConfig.sh to try to find a tclsh + if {"" eq [get-define TCLSH_CMD]} { + set tpre [get-define TCL_EXEC_PREFIX] + if {"" ne $tpre} { + set tv [get-define TCL_VERSION] + if {[file-isexec "${tpre}/bin/tclsh${tv}"]} { + define TCLSH_CMD "${tpre}/bin/tclsh${tv}" + } elseif {[file-isexec "${tpre}/bin/tclsh"]} { + define TCLSH_CMD "${tpre}/bin/tclsh" + } + } + } + set cgtcl [get-define TCLSH_CMD] + if {![file-isexec $cgtcl]} { + proj-fatal "Cannot find a tclsh to use for code generation." + } + define BTCLSH "\$(TCLSH_CMD)" + } + }; # /define-push $flagsToRestore + return $cgtcl +}; # sqlite-determine-codegen-tcl + +######################################################################## +# Runs sqlite-check-tcl and, if this is the canonical build, +# sqlite-determine-codegen-tcl. +proc sqlite-handle-tcl {} { + sqlite-check-tcl + if {"canonical" ne $::sqliteConfig(build-mode)} return + msg-result "TCL for code generation: [sqlite-determine-codegen-tcl]" + + # Determine the base name of the Tcl extension's DLL + # + if {[get-define HAVE_TCL]} { + if {[string match *-cygwin [get-define host]]} { + set libname cyg + } else { + set libname lib + } + if {[get-define TCL_MAJOR_VERSION] > 8} { + append libname tcl9 + } + append libname sqlite + } else { + set libname "" + } + define TCL_EXT_DLL_BASENAME $libname + # The extension is added in the makefile +} + +######################################################################## +# Handle the --enable/disable-rpath flag. +proc sqlite-handle-rpath {} { + # autosetup/cc-shared.tcl sets the rpath flag definition in + # [get-define SH_LINKRPATH], but it does so on a per-platform basis + # rather than as a compiler check. Though we should do a proper + # compiler check (as proj-check-rpath does), we may want to consider + # adopting its approach of clearing the rpath flags for environments + # for which sqlite-env-is-unix-on-windows returns a non-empty + # string. + + # https://sqlite.org/forum/forumpost/13cac3b56516f849 + if {[proj-opt-truthy rpath]} { + proj-check-rpath + } else { + msg-result "Disabling use of rpath." + define LDFLAGS_RPATH "" + } +} + +######################################################################## +# If the --dump-defines configure flag is provided then emit a list of +# all [define] values to config.defines.txt, else do nothing. +proc sqlite-dump-defines {} { + proj-if-opt-truthy dump-defines { + make-config-header $::sqliteConfig(dump-defines-txt) \ + -bare {SQLITE_OS* SQLITE_DEBUG USE_*} \ + -str {BIN_* CC LD AR LDFLAG* OPT_*} \ + -auto {*} + # achtung: ^^^^ whichever SQLITE_OS_foo flag which is set to 0 will + # get _undefined_ here unless it's part of the -bare set. + if {"" ne $::sqliteConfig(dump-defines-json)} { + msg-result "--dump-defines is creating $::sqliteConfig(dump-defines-json)" + ######################################################################## + # Dump config-defines.json... + # Demonstrate (mis?)handling of spaces in JSON-export array values: + # define-append OPT_FOO.list {"-DFOO=bar baz" -DBAR="baz barre"} + define OPT_FEATURE_FLAGS.list [get-define OPT_FEATURE_FLAGS] + define OPT_SHELL.list [get-define OPT_SHELL] + set dumpDefsOpt { + -bare {SIZEOF_* HAVE_DECL_*} + -none {HAVE_CFLAG_* LDFLAGS_* SH_* SQLITE_AUTORECONFIG TARGET_* USE_GCOV TCL_*} + -array {*.list} + -auto {OPT_* PACKAGE_* HAVE_*} + } +# if {$::sqliteConfig(dump-defines-json-include-lowercase)} { +# lappend dumpDefsOpt -none {lib_*} ; # remnants from proj-check-function-in-lib and friends +# lappend dumpDefsOpt -auto {[a-z]*} +# } + lappend dumpDefsOpt -none * + proj-dump-defs-json $::sqliteConfig(dump-defines-json) {*}$dumpDefsOpt + undefine OPT_FEATURE_FLAGS.list + undefine OPT_SHELL.list + } + } +} diff --git a/autosetup/system.tcl b/autosetup/system.tcl new file mode 100644 index 0000000000..05d378afdd --- /dev/null +++ b/autosetup/system.tcl @@ -0,0 +1,420 @@ +# Copyright (c) 2010 WorkWare Systems http://www.workware.net.au/ +# All rights reserved + +# @synopsis: +# +# This module supports common system interrogation and options +# such as '--host', '--build', '--prefix', and setting 'srcdir', 'builddir', and 'EXEEXT'. +# +# It also support the "feature" naming convention, where searching +# for a feature such as 'sys/type.h' defines 'HAVE_SYS_TYPES_H'. +# +# It defines the following variables, based on '--prefix' unless overridden by the user: +# +## datadir +## sysconfdir +## sharedstatedir +## localstatedir +## infodir +## mandir +## includedir +# +# If '--prefix' is not supplied, it defaults to '/usr/local' unless 'options-defaults { prefix ... }' is used *before* +# including the 'system' module. + +if {[is-defined defaultprefix]} { + user-notice "Note: defaultprefix is deprecated. Use options-defaults to set default options" + options-defaults [list prefix [get-define defaultprefix]] +} + +options { + host:host-alias => {a complete or partial cpu-vendor-opsys for the system where + the application will run (defaults to the same value as --build)} + build:build-alias => {a complete or partial cpu-vendor-opsys for the system + where the application will be built (defaults to the + result of running config.guess)} + prefix:dir=/usr/local => {the target directory for the build (default: '@default@')} + + # These (hidden) options are supported for autoconf/automake compatibility + exec-prefix: + bindir: + sbindir: + includedir: + mandir: + infodir: + libexecdir: + datadir: + libdir: + sysconfdir: + sharedstatedir: + localstatedir: + runstatedir: + maintainer-mode=0 + dependency-tracking=0 + silent-rules=0 + program-prefix: + program-suffix: + program-transform-name: + x-includes: + x-libraries: +} + +# @check-feature name { script } +# +# defines feature '$name' to the return value of '$script', +# which should be 1 if found or 0 if not found. +# +# e.g. the following will define 'HAVE_CONST' to 0 or 1. +# +## check-feature const { +## cctest -code {const int _x = 0;} +## } +proc check-feature {name code} { + msg-checking "Checking for $name..." + set r [uplevel 1 $code] + define-feature $name $r + if {$r} { + msg-result "ok" + } else { + msg-result "not found" + } + return $r +} + +# @have-feature name ?default=0? +# +# Returns the value of feature '$name' if defined, or '$default' if not. +# +# See 'feature-define-name' for how the "feature" name +# is translated into the "define" name. +# +proc have-feature {name {default 0}} { + get-define [feature-define-name $name] $default +} + +# @define-feature name ?value=1? +# +# Sets the feature 'define' to '$value'. +# +# See 'feature-define-name' for how the "feature" name +# is translated into the "define" name. +# +proc define-feature {name {value 1}} { + define [feature-define-name $name] $value +} + +# @feature-checked name +# +# Returns 1 if feature '$name' has been checked, whether true or not. +# +proc feature-checked {name} { + is-defined [feature-define-name $name] +} + +# @feature-define-name name ?prefix=HAVE_? +# +# Converts a "feature" name to the corresponding "define", +# e.g. 'sys/stat.h' becomes 'HAVE_SYS_STAT_H'. +# +# Converts '*' to 'P' and all non-alphanumeric to underscore. +# +proc feature-define-name {name {prefix HAVE_}} { + string toupper $prefix[regsub -all {[^a-zA-Z0-9]} [regsub -all {[*]} $name p] _] +} + +# @write-if-changed filename contents ?script? +# +# If '$filename' doesn't exist, or it's contents are different to '$contents', +# the file is written and '$script' is evaluated. +# +# Otherwise a "file is unchanged" message is displayed. +proc write-if-changed {file buf {script {}}} { + set old [readfile $file ""] + if {$old eq $buf && [file exists $file]} { + msg-result "$file is unchanged" + } else { + writefile $file $buf\n + uplevel 1 $script + } +} + + +# @include-file infile mapping +# +# The core of make-template, called recursively for each @include +# directive found within that template so that this proc's result +# is the fully-expanded template. +# +# The mapping parameter is how we expand @varname@ within the template. +# We do that inline within this step only for @include directives which +# can have variables in the filename arg. A separate substitution pass +# happens when this recursive function returns, expanding the rest of +# the variables. +# +proc include-file {infile mapping} { + # A stack of true/false conditions, one for each nested conditional + # starting with "true" + set condstack {1} + set result {} + set linenum 0 + foreach line [split [readfile $infile] \n] { + incr linenum + if {[regexp {^@(if|else|endif)(\s*)(.*)} $line -> condtype condspace condargs]} { + if {$condtype eq "if"} { + if {[string length $condspace] == 0} { + autosetup-error "$infile:$linenum: Invalid expression: $line" + } + if {[llength $condargs] == 1} { + # ABC => [get-define ABC] ni {0 ""} + # !ABC => [get-define ABC] in {0 ""} + lassign $condargs condvar + if {[regexp {^!(.*)} $condvar -> condvar]} { + set op in + } else { + set op ni + } + set condexpr "\[[list get-define $condvar]\] $op {0 {}}" + } else { + # Translate alphanumeric ABC into [get-define ABC] and leave the + # rest of the expression untouched + regsub -all {([A-Z][[:alnum:]_]*)} $condargs {[get-define \1]} condexpr + } + if {[catch [list expr $condexpr] condval]} { + dputs $condval + autosetup-error "$infile:$linenum: Invalid expression: $line" + } + dputs "@$condtype: $condexpr => $condval" + } + if {$condtype ne "if"} { + if {[llength $condstack] <= 1} { + autosetup-error "$infile:$linenum: Error: @$condtype missing @if" + } elseif {[string length $condargs] && [string index $condargs 0] ne "#"} { + autosetup-error "$infile:$linenum: Error: Extra arguments after @$condtype" + } + } + switch -exact $condtype { + if { + # push condval + lappend condstack $condval + } + else { + # Toggle the last entry + set condval [lpop condstack] + set condval [expr {!$condval}] + lappend condstack $condval + } + endif { + if {[llength $condstack] == 0} { + user-notice "$infile:$linenum: Error: @endif missing @if" + } + lpop condstack + } + } + continue + } + # Only continue if the stack contains all "true" + if {"0" in $condstack} { + continue + } + if {[regexp {^@include\s+(.*)} $line -> filearg]} { + set incfile [string map $mapping $filearg] + if {[file exists $incfile]} { + lappend ::autosetup(deps) [file-normalize $incfile] + lappend result {*}[include-file $incfile $mapping] + } else { + user-error "$infile:$linenum: Include file $incfile is missing" + } + continue + } + if {[regexp {^@define\s+(\w+)\s+(.*)} $line -> var val]} { + define $var $val + continue + } + lappend result $line + } + return $result +} + + +# @make-template template ?outfile? +# +# Reads the input file '/$template' and writes the output file '$outfile' +# (unless unchanged). +# If '$outfile' is blank/omitted, '$template' should end with '.in' which +# is removed to create the output file name. +# +# Each pattern of the form '@define@' is replaced with the corresponding +# "define", if it exists, or left unchanged if not. +# +# The special value '@srcdir@' is substituted with the relative +# path to the source directory from the directory where the output +# file is created, while the special value '@top_srcdir@' is substituted +# with the relative path to the top level source directory. +# +# Conditional sections may be specified as follows: +## @if NAME eq "value" +## lines +## @else +## lines +## @endif +# +# Where 'NAME' is a defined variable name and '@else' is optional. +# Note that variables names *must* start with an uppercase letter. +# If the expression does not match, all lines through '@endif' are ignored. +# +# The alternative forms may also be used: +## @if NAME (true if the variable is defined, but not empty and not "0") +## @if !NAME (opposite of the form above) +## @if +# +# In the general Tcl expression, any words beginning with an uppercase letter +# are translated into [get-define NAME] +# +# Expressions may be nested +# +proc make-template {template {out {}}} { + set infile [file join $::autosetup(srcdir) $template] + + if {![file exists $infile]} { + user-error "Template $template is missing" + } + + # Define this as late as possible + define AUTODEPS $::autosetup(deps) + + if {$out eq ""} { + if {[file ext $template] ne ".in"} { + autosetup-error "make_template $template has no target file and can't guess" + } + set out [file rootname $template] + } + + set outdir [file dirname $out] + + # Make sure the directory exists + file mkdir $outdir + + # Set up srcdir and top_srcdir to be relative to the target dir + define srcdir [relative-path [file join $::autosetup(srcdir) $outdir] $outdir] + define top_srcdir [relative-path $::autosetup(srcdir) $outdir] + + # Build map from global defines to their values so they can be + # substituted into @include file names. + proc build-define-mapping {} { + set mapping {} + foreach {n v} [array get ::define] { + lappend mapping @$n@ $v + } + return $mapping + } + set mapping [build-define-mapping] + + set result [include-file $infile $mapping] + + # Rebuild the define mapping in case we ran across @define + # directives in the template or a file it @included, then + # apply that mapping to the expanded template. + set mapping [build-define-mapping] + write-if-changed $out [string map $mapping [join $result \n]] { + msg-result "Created [relative-path $out] from [relative-path $template]" + } +} + +proc system-init {} { + global autosetup + + # build/host tuples and cross-compilation prefix + opt-str build build "" + define build_alias $build + if {$build eq ""} { + define build [config_guess] + } else { + define build [config_sub $build] + } + + opt-str host host "" + define host_alias $host + if {$host eq ""} { + define host [get-define build] + set cross "" + } else { + define host [config_sub $host] + set cross $host- + } + define cross [get-env CROSS $cross] + + # build/host _cpu, _vendor and _os + foreach type {build host} { + set v [get-define $type] + if {![regexp {^([^-]+)-([^-]+)-(.*)$} $v -> cpu vendor os]} { + user-error "Invalid canonical $type: $v" + } + define ${type}_cpu $cpu + define ${type}_vendor $vendor + define ${type}_os $os + } + + opt-str prefix prefix /usr/local + + # These are for compatibility with autoconf + define target [get-define host] + define prefix $prefix + define builddir $autosetup(builddir) + define srcdir $autosetup(srcdir) + define top_srcdir $autosetup(srcdir) + define abs_top_srcdir [file-normalize $autosetup(srcdir)] + define abs_top_builddir [file-normalize $autosetup(builddir)] + + # autoconf supports all of these + define exec_prefix [opt-str exec-prefix exec_prefix $prefix] + foreach {name defpath} { + bindir /bin + sbindir /sbin + libexecdir /libexec + libdir /lib + } { + define $name [opt-str $name o $exec_prefix$defpath] + } + foreach {name defpath} { + datadir /share + sharedstatedir /com + infodir /share/info + mandir /share/man + includedir /include + } { + define $name [opt-str $name o $prefix$defpath] + } + if {$prefix ne {/usr}} { + opt-str sysconfdir sysconfdir $prefix/etc + } else { + opt-str sysconfdir sysconfdir /etc + } + define sysconfdir $sysconfdir + + define localstatedir [opt-str localstatedir o /var] + define runstatedir [opt-str runstatedir o /run] + + define SHELL [get-env SHELL [find-an-executable sh bash ksh]] + + # These could be used to generate Makefiles following some automake conventions + define AM_SILENT_RULES [opt-bool silent-rules] + define AM_MAINTAINER_MODE [opt-bool maintainer-mode] + define AM_DEPENDENCY_TRACKING [opt-bool dependency-tracking] + + # Windows vs. non-Windows + switch -glob -- [get-define host] { + *-*-ming* - *-*-cygwin - *-*-msys { + define-feature windows + define EXEEXT .exe + } + default { + define EXEEXT "" + } + } + + # Display + msg-result "Host System...[get-define host]" + msg-result "Build System...[get-define build]" +} + +system-init diff --git a/autosetup/teaish/README.txt b/autosetup/teaish/README.txt new file mode 100644 index 0000000000..e11519b042 --- /dev/null +++ b/autosetup/teaish/README.txt @@ -0,0 +1,4 @@ +The *.tcl files in this directory are part of the SQLite's "autoconf" +bundle which are specific to the TEA(-ish) build. During the tarball +generation process, they are copied into /autoconf/autosetup/teaish +(which itself is created as part of that process). diff --git a/autosetup/teaish/core.tcl b/autosetup/teaish/core.tcl new file mode 100644 index 0000000000..c9abfa0626 --- /dev/null +++ b/autosetup/teaish/core.tcl @@ -0,0 +1,2564 @@ +######################################################################## +# 2025 April 5 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# * May you do good and not evil. +# * May you find forgiveness for yourself and forgive others. +# * May you share freely, never taking more than you give. +# +######################################################################## +# ----- @module teaish.tcl ----- +# @section TEA-ish ((TCL Extension Architecture)-ish) +# +# Functions in this file with a prefix of teaish__ are +# private/internal APIs. Those with a prefix of teaish- are +# public APIs. +# +# Teaish has a hard dependency on proj.tcl, and any public API members +# of that module are considered legal for use by teaish extensions. +# +# Project home page: https://fossil.wanderinghorse.net/r/teaish + +use proj + +# +# API-internal settings and shared state. +array set teaish__Config [proj-strip-hash-comments { + # + # Teaish's version number, not to be confused with + # teaish__PkgInfo(-version). + # + version 0.1-beta + + # set to 1 to enable some internal debugging output + debug-enabled 0 + + # + # 0 = don't yet have extension's pkgindex + # 0x01 = found TEAISH_EXT_DIR/pkgIndex.tcl.in + # 0x02 = found srcdir/pkgIndex.tcl.in + # 0x10 = found TEAISH_EXT_DIR/pkgIndex.tcl (static file) + # 0x20 = static-pkgIndex.tcl pragma: behave as if 0x10 + # 0x100 = disabled by -tm.tcl.in + # 0x200 = disabled by -tm.tcl + # + # Reminder: it's significant that the bottom 4 bits be + # cases where teaish manages ./pkgIndex.tcl. + # + pkgindex-policy 0 + + # + # The pkginit counterpart of pkgindex-policy: + # + # 0 = no pkginit + # 0x01 = found default X.in: generate X from X.in + # 0x10 = found static pkginit file X + # 0x02 = user-provided X.in generates ./X. + # 0x20 = user-provided static pkginit file X + # + # The 0x0f bits indicate that teaish is responsible for cleaning up + # the (generated) pkginit file. + # + pkginit-policy 0 + # + # 0 = no tm.tcl + # 0x01 = tm.tcl.in + # 0x10 = static tm.tcl + tm-policy 0 + + # + # If 1+ then teaish__verbose will emit messages. + # + verbose 0 + + # + # Mapping of pkginfo -flags to their TEAISH_xxx define (if any). + # This must not be modified after initialization. + # + pkginfo-f2d { + -name TEAISH_NAME + -name.dist TEAISH_DIST_NAME + -name.pkg TEAISH_PKGNAME + -version TEAISH_VERSION + -libDir TEAISH_LIBDIR_NAME + -loadPrefix TEAISH_LOAD_PREFIX + -vsatisfies TEAISH_VSATISFIES + -pkgInit.tcl TEAISH_PKGINIT_TCL + -pkgInit.tcl.in TEAISH_PKGINIT_TCL_IN + -url TEAISH_URL + -tm.tcl TEAISH_TM_TCL + -tm.tcl.in TEAISH_TM_TCL_IN + -options {} + -pragmas {} + -src {} + } + + # + # Queues for use with teaish-checks-queue and teaish-checks-run. + # + queued-checks-pre {} + queued-checks-post {} + + # Whether or not "make dist" parts are enabled. They get enabled + # when building from an extension's dir, disabled when building + # elsewhere. + dist-enabled 1 + # Whether or not "make install" parts are enabled. By default + # they are, but we have a single use case where they're + # both unnecessary and unhelpful, so... + install-enabled 1 + + # By default we enable compilation of a native extension but if the + # extension has no native code or the user wants to take that over + # via teaish.make.in or provide a script-only extension, we will + # elide the default compilation rules if this is 0. + dll-enabled 1 + + # Files to include in the "make dist" bundle. + dist-files {} + + # List of source files for the extension. + extension-src {} + + # Path to the teaish.tcl file. + teaish.tcl {} + + # Dir where teaish.tcl is found. + extension-dir {} + + # Whether the generates TEASH_VSATISFIES_CODE should error out on a + # satisfies error. If 0, it uses return instead of error. + vsatisfies-error 1 + + # Whether or not to allow a "full dist" - a "make dist" build which + # includes both the extension and teaish. By default this is only on + # if the extension dir is teaish's dir. + dist-full-enabled 0 +}] +set teaish__Config(core-dir) $::autosetup(libdir)/teaish + +# +# Array of info managed by teaish-pkginfo-get and friends. Has the +# same set of keys as $teaish__Config(pkginfo-f2d). +# +array set teaish__PkgInfo {} + +# +# Runs {*}$args if $lvl is <= the current verbosity level, else it has +# no side effects. +# +proc teaish__verbose {lvl args} { + if {$lvl <= $::teaish__Config(verbose)} { + {*}$args + } +} + +# +# @teaish-argv-has flags... +# +# Returns true if any arg in $::argv matches any of the given globs, +# else returns false. +# +proc teaish-argv-has {args} { + foreach glob $args { + foreach arg $::argv { + if {[string match $glob $arg]} { + return 1 + } + } + } + return 0 +} + +if {[teaish-argv-has --teaish-verbose --t-v]} { + # Check this early so that we can use verbose-only messages in the + # pre-options-parsing steps. + set ::teaish__Config(verbose) 1 + #teaish__verbose 1 msg-result "--teaish-verbose activated" +} + +msg-quiet use system ; # Outputs "Host System" and "Build System" lines +if {"--help" ni $::argv} { + teaish__verbose 1 msg-result "TEA(ish) Version = $::teaish__Config(version)" + teaish__verbose 1 msg-result "Source dir = $::autosetup(srcdir)" + teaish__verbose 1 msg-result "Build dir = $::autosetup(builddir)" +} + +# +# @teaish-configure-core +# +# Main entry point for the TEA-ish configure process. auto.def's primary +# (ideally only) job should be to call this. +# +proc teaish-configure-core {} { + proj-tweak-default-env-dirs + + set ::teaish__Config(install-mode) [teaish-argv-has --teaish-install*] + set ::teaish__Config(create-ext-mode) \ + [teaish-argv-has --teaish-create-extension=* --t-c-e=*] + set gotExt 0; # True if an extension config is found + if {!$::teaish__Config(create-ext-mode) + && !$::teaish__Config(install-mode)} { + # Don't look for an extension if we're in --t-c-e or --t-i mode + set gotExt [teaish__find_extension] + } + + # + # Set up the core --flags. This needs to come before teaish.tcl is + # sourced so that that file can use teaish-pkginfo-set to append + # options. + # + options-add [proj-strip-hash-comments { + with-tcl:DIR + => {Directory containing tclConfig.sh or a directory one level up from + that, from which we can derive a directory containing tclConfig.sh. + Defaults to the $TCL_HOME environment variable.} + + with-tclsh:PATH + => {Full pathname of tclsh to use. It is used for trying to find + tclConfig.sh. Warning: if its containing dir has multiple tclsh + versions, it may select the wrong tclConfig.sh! + Defaults to the $TCLSH environment variable.} + + tcl-stubs=0 => {Enable use of Tcl stubs library.} + + # TEA has --with-tclinclude but it appears to only be useful for + # building an extension against an uninstalled copy of TCL's own + # source tree. The policy here is that either we get that info + # from tclConfig.sh or we give up. + # + # with-tclinclude:DIR + # => {Specify the directory which contains the tcl.h. This should not + # normally be required, as that information comes from tclConfig.sh.} + + # We _generally_ want to reduce the possibility of flag collisions with + # extensions, and thus use a teaish-... prefix on most flags. However, + # --teaish-extension-dir is frequently needed, so... + # + # As of this spontaneous moment, we'll settle on using --t-A-X to + # abbreviate --teaish-A...-X... flags when doing so is + # unambiguous... + ted: t-e-d: + teaish-extension-dir:DIR + => {Looks for an extension in the given directory instead of the current + dir.} + + t-c-e: + teaish-create-extension:TARGET_DIRECTORY + => {Writes stub files for creating an extension. Will refuse to overwrite + existing files without --teaish-force.} + + t-f + teaish-force + => {Has a context-dependent meaning (autosetup defines --force for its + own use).} + + t-d-d + teaish-dump-defines + => {Dump all configure-defined vars to config.defines.txt} + + t-v:=0 + teaish-verbose:=0 + => {Enable more (often extraneous) messages from the teaish core.} + + t-d + teaish-debug=0 => {Enable teaish-specific debug output} + + t-i + teaish-install:=auto + => {Installs a copy of teaish, including autosetup, to the target dir. + When used with --teaish-create-extension=DIR, a value of "auto" + (no no value) will inherit that directory.} + + #TODO: --teaish-install-extension:=dir as short for + # --t-c-e=dir --t-i + + t-e-p: + teaish-extension-pkginfo:pkginfo + => {For use with --teaish-create-extension. If used, it must be a + list of arguments for use with teaish-pkginfo-set, e.g. + --teaish-extension-pkginfo="-name Foo -version 2.3"} + + t-v-c + teaish-vsatisfies-check=1 + => {Disable the configure-time "vsatisfies" check on the target tclsh.} + + }]; # main options. + + if {$gotExt} { + # We found an extension. Source it... + set ttcl $::teaish__Config(teaish.tcl) + proj-assert {"" ne [teaish-pkginfo-get -name]} + proj-assert {[file exists $ttcl]} \ + "Expecting to have found teaish.(tcl|config) by now" + if {[string match *.tcl $ttcl]} { + uplevel 1 {source $::teaish__Config(teaish.tcl)} + } else { + teaish-pkginfo-set {*}[proj-file-content -trim $ttcl] + } + unset ttcl + # Set up some default values if the extension did not set them. + # This must happen _after_ it's sourced but before + # teaish-configure is called. + array set f2d $::teaish__Config(pkginfo-f2d) + foreach {pflag key type val} { + - TEAISH_CFLAGS -v "" + - TEAISH_LDFLAGS -v "" + - TEAISH_MAKEFILE -v "" + - TEAISH_MAKEFILE_CODE -v "" + - TEAISH_MAKEFILE_IN -v "" + - TEAISH_PKGINDEX_TCL -v "" + - TEAISH_PKGINDEX_TCL_IN -v "" + - TEAISH_PKGINIT_TCL -v "" + - TEAISH_PKGINIT_TCL_IN -v "" + - TEAISH_PKGINIT_TCL_TAIL -v "" + - TEAISH_TEST_TCL -v "" + - TEAISH_TEST_TCL_IN -v "" + + -version - -v 0.0.0 + -name.pkg - -e {set ::teaish__PkgInfo(-name)} + -name.dist - -e {set ::teaish__PkgInfo(-name)} + -libDir - -e { + join [list \ + $::teaish__PkgInfo(-name.pkg) \ + $::teaish__PkgInfo(-version)] "" + } + -loadPrefix - -e { + string totitle $::teaish__PkgInfo(-name.pkg) + } + -vsatisfies - -v {{Tcl 8.5-}} + -pkgInit.tcl - -v "" + -pkgInit.tcl.in - -v "" + -url - -v "" + -tm.tcl - -v "" + -tm.tcl.in - -v "" + -src - -v "" + } { + #proj-assert 0 {Just testing} + set isPIFlag [expr {"-" ne $pflag}] + if {$isPIFlag} { + if {[info exists ::teaish__PkgInfo($pflag)]} { + # Was already set - skip it. + continue; + } + proj-assert {{-} eq $key};# "Unexpected pflag=$pflag key=$key type=$type val=$val" + set key $f2d($pflag) + } + if {"" ne $key} { + if {"" ne [get-define $key ""]} { + # Was already set - skip it. + continue + } + } + switch -exact -- $type { + -v {} + -e { set val [eval $val] } + default { proj-error "Invalid type flag: $type" } + } + #puts "***** defining default $pflag $key {$val} isPIFlag=$isPIFlag" + if {$key ne ""} { + define $key $val + } + if {$isPIFlag} { + set ::teaish__PkgInfo($pflag) $val + } + } + unset isPIFlag pflag key type val + array unset f2d + }; # sourcing extension's teaish.tcl + + if {[llength [info proc teaish-options]] > 0} { + # Add options defined by teaish-options, which is assumed to be + # imported via [teaish-get -teaish-tcl]. + set o [teaish-options] + if {"" ne $o} { + options-add $o + } + } + #set opts [proj-options-combine] + #lappend opts teaish-debug => {x}; #testing dupe entry handling + if {[catch {options {}} msg xopts]} { + # Workaround for + # where [options] behaves oddly on _some_ TCL builds when it's + # called from deeper than the global scope. + dict incr xopts -level + return {*}$xopts $msg + } + + proj-xfer-options-aliases { + t-c-e => teaish-create-extension + t-d => teaish-debug + t-d-d => teaish-dump-defines + ted => teaish-extension-dir + t-e-d => teaish-extension-dir + t-e-p => teaish-extension-pkginfo + t-f => teaish-force + t-i => teaish-install + t-v => teaish-verbose + t-v-c => teaish-vsatisfies-check + } + + scan [opt-val teaish-verbose 0] %d ::teaish__Config(verbose) + set ::teaish__Config(debug-enabled) [opt-bool teaish-debug] + + set exitEarly 0 + if {[proj-opt-was-provided teaish-create-extension]} { + teaish__create_extension [opt-val teaish-create-extension] + incr exitEarly + } + if {$::teaish__Config(install-mode)} { + teaish__install + incr exitEarly + } + + if {$exitEarly} { + file delete -force config.log + return + } + proj-assert {1==$gotExt} "Else we cannot have gotten this far" + + teaish__configure_phase1 +} + + +# +# Internal config-time debugging output routine. It is not legal to +# call this from the global scope. +# +proc teaish-debug {msg} { + if {$::teaish__Config(debug-enabled)} { + puts stderr [proj-bold "** DEBUG: \[[proj-scope 1]\]: $msg"] + } +} + +# +# Runs "phase 1" of the configuration, immediately after processing +# --flags. This is what will import the client-defined teaish.tcl. +# +proc teaish__configure_phase1 {} { + msg-result \ + [join [list "Configuring build of Tcl extension" \ + [proj-bold [teaish-pkginfo-get -name] \ + [teaish-pkginfo-get -version]] "..."]] + + uplevel 1 { + use cc cc-db cc-shared cc-lib; # pkg-config + } + teaish__check_tcl + apply {{} { + # + # If --prefix or --exec-prefix are _not_ provided, use their + # TCL_... counterpart from tclConfig.sh. Caveat: by the time we can + # reach this point, autosetup's system.tcl will have already done + # some non-trivial amount of work with these to create various + # derived values from them, so we temporarily end up with a mishmash + # of autotools-compatibility var values. That will be straightened + # out in the final stage of the configure script via + # [proj-remap-autoconf-dir-vars]. + # + foreach {flag uflag tclVar} { + prefix prefix TCL_PREFIX + exec-prefix exec_prefix TCL_EXEC_PREFIX + } { + if {![proj-opt-was-provided $flag]} { + if {"exec-prefix" eq $flag} { + # If --exec-prefix was not used, ensure that --exec-prefix + # derives from the --prefix we may have just redefined. + set v {${prefix}} + } else { + set v [get-define $tclVar "???"] + teaish__verbose 1 msg-result "Using \$$tclVar for --$flag=$v" + } + proj-assert {"???" ne $v} "Expecting teaish__check_tcl to have defined $tclVar" + #puts "*** $flag $uflag $tclVar = $v" + proj-opt-set $flag $v + define $uflag $v + + # ^^^ As of here, all autotools-compatibility vars which derive + # from --$flag, e.g. --libdir, still derive from the default + # --$flag value which was active when system.tcl was + # included. So long as those flags are not explicitly passed to + # the configure script, those will be straightened out via + # [proj-remap-autoconf-dir-vars]. + } + } + }}; # --[exec-]prefix defaults + teaish__check_common_bins + # + # Set up library file names + # + proj-file-extensions + teaish__define_pkginfo_derived * + + teaish-checks-run -pre + if {[llength [info proc teaish-configure]] > 0} { + # teaish-configure is assumed to be imported via + # teaish.tcl + teaish-configure + } + teaish-checks-run -post + + define TEAISH_USE_STUBS [opt-bool tcl-stubs] + + apply {{} { + # Set up "vsatisfies" code for pkgIndex.tcl.in, + # _teaish.tester.tcl.in, and for a configure-time check. We would + # like to put this before [teaish-checks-run -pre] but it's + # marginally conceivable that a client may need to dynamically + # calculate the vsatisfies and set it via [teaish-configure]. + set vs [get-define TEAISH_VSATISFIES ""] + if {"" eq $vs} return + set code {} + set n 0 + # Treat $vs as a list-of-lists {{Tcl 8.5-} {Foo 1.0- -3.0} ...} + # and generate Tcl which will run package vsatisfies tests with + # that info. + foreach pv $vs { + set n [llength $pv] + if {$n < 2} { + proj-error "-vsatisfies: {$pv} appears malformed. Whole list is: $vs" + } + set pkg [lindex $pv 0] + set vcheck {} + for {set i 1} {$i < $n} {incr i} { + lappend vcheck [lindex $pv $i] + } + if {[opt-bool teaish-vsatisfies-check]} { + set tclsh [get-define TCLSH_CMD] + set vsat "package vsatisfies \[ package provide $pkg \] $vcheck" + set vputs "puts \[ $vsat \]" + #puts "*** vputs = $vputs" + scan [exec echo $vputs | $tclsh] %d vvcheck + if {![info exists vvcheck] || 0 == $vvcheck} { + proj-fatal -up $tclsh "check failed:" $vsat + } + } + if {$::teaish__Config(vsatisfies-error)} { + set vunsat \ + [list error [list Package \ + $::teaish__PkgInfo(-name) $::teaish__PkgInfo(-version) \ + requires $pv]] + } else { + set vunsat return + } + lappend code \ + [string trim [subst -nocommands \ + {if { ![package vsatisfies [package provide $pkg] $vcheck] } {\n $vunsat\n}}]] + }; # foreach pv + define TEAISH_VSATISFIES_CODE [join $code "\n"] + }}; # vsatisfies + + if {[proj-looks-like-windows]} { + # Without this, linking of an extension will not work on Cygwin or + # Msys2. + msg-result "Using USE_TCL_STUBS for Unix(ish)-on-Windows environment" + teaish-cflags-add -DUSE_TCL_STUBS=1 + } + + #define AS_LIBDIR $::autosetup(libdir) + define TEAISH_TESTUTIL_TCL $::teaish__Config(core-dir)/tester.tcl + + apply {{} { + # + # Ensure we have a pkgIndex.tcl and don't have a stale generated one + # when rebuilding for different --with-tcl=... values. + # + if {!$::teaish__Config(pkgindex-policy)} { + proj-error "Cannot determine which pkgIndex.tcl to use" + } + if {0x300 & $::teaish__Config(pkgindex-policy)} { + teaish__verbose 1 msg-result "pkgIndex disabled by -tm.tcl(.in)" + } else { + set tpi [proj-coalesce \ + [get-define TEAISH_PKGINDEX_TCL_IN] \ + [get-define TEAISH_PKGINDEX_TCL]] + proj-assert {$tpi ne ""} \ + "TEAISH_PKGINDEX_TCL should have been set up by now" + teaish__verbose 1 msg-result "Using pkgIndex from $tpi" + if {0x0f & $::teaish__Config(pkgindex-policy)} { + # Don't leave stale pkgIndex.tcl laying around yet don't delete + # or overwrite a user-managed static pkgIndex.tcl. + file delete -force -- [get-define TEAISH_PKGINDEX_TCL] + proj-dot-ins-append [get-define TEAISH_PKGINDEX_TCL_IN] + } else { + teaish-dist-add [file tail $tpi] + } + } + }}; # $::teaish__Config(pkgindex-policy) + + # + # Ensure we clean up TEAISH_PKGINIT_TCL if needed and @-process + # TEAISH_PKGINIT_TCL_IN if needed. + # + if {0x0f & $::teaish__Config(pkginit-policy)} { + file delete -force -- [get-define TEAISH_PKGINIT_TCL] + proj-dot-ins-append [get-define TEAISH_PKGINIT_TCL_IN] \ + [get-define TEAISH_PKGINIT_TCL] + } + if {0x0f & $::teaish__Config(tm-policy)} { + file delete -force -- [get-define TEAISH_TM_TCL] + proj-dot-ins-append [get-define TEAISH_TM_TCL_IN] + } + + apply {{} { + # Queue up any remaining dot-in files + set dotIns [list] + foreach {dIn => dOut} { + TEAISH_TESTER_TCL_IN => TEAISH_TESTER_TCL + TEAISH_TEST_TCL_IN => TEAISH_TEST_TCL + TEAISH_MAKEFILE_IN => TEAISH_MAKEFILE + } { + lappend dotIns [get-define $dIn ""] [get-define $dOut ""] + } + lappend dotIns $::autosetup(srcdir)/Makefile.in Makefile; # must be after TEAISH_MAKEFILE_IN. + # Much later: probably because of timestamps for deps purposes :-? + #puts "dotIns=$dotIns" + foreach {i o} $dotIns { + if {"" ne $i && "" ne $o} { + #puts " pre-dot-ins-append: \[$i\] -> \[$o\]" + proj-dot-ins-append $i $o + } + } + }} + + define TEAISH_DIST_FULL \ + [expr { + $::teaish__Config(dist-enabled) + && $::teaish__Config(dist-full-enabled) + }] + + define TEAISH_AUTOSETUP_DIR $::teaish__Config(core-dir) + define TEAISH_ENABLE_DIST $::teaish__Config(dist-enabled) + define TEAISH_ENABLE_INSTALL $::teaish__Config(install-enabled) + define TEAISH_ENABLE_DLL $::teaish__Config(dll-enabled) + define TEAISH_TCL $::teaish__Config(teaish.tcl) + + define TEAISH_DIST_FILES [join $::teaish__Config(dist-files)] + define TEAISH_EXT_DIR [join $::teaish__Config(extension-dir)] + define TEAISH_EXT_SRC [join $::teaish__Config(extension-src)] + proj-setup-autoreconfig TEAISH_AUTORECONFIG + foreach f { + TEAISH_CFLAGS + TEAISH_LDFLAGS + } { + # Ensure that any of these lists are flattened + define $f [join [get-define $f]] + } + proj-remap-autoconf-dir-vars + set tdefs [teaish__defines_to_list] + define TEAISH__DEFINES_MAP $tdefs; # injected into _teaish.tester.tcl + + # + # NO [define]s after this point! + # + proj-if-opt-truthy teaish-dump-defines { + proj-file-write config.defines.txt $tdefs + } + proj-dot-ins-process -validate + +}; # teaish__configure_phase1 + +# +# Run checks for required binaries. +# +proc teaish__check_common_bins {} { + if {"" eq [proj-bin-define install]} { + proj-warn "Cannot find install binary, so 'make install' will not work." + define BIN_INSTALL false + } + if {"" eq [proj-bin-define zip]} { + proj-warn "Cannot find zip, so 'make dist.zip' will not work." + } + if {"" eq [proj-bin-define tar]} { + proj-warn "Cannot find tar, so 'make dist.tgz' will not work." + } +} + +# +# TCL... +# +# teaish__check_tcl performs most of the --with-tcl and --with-tclsh +# handling. Some related bits and pieces are performed before and +# after that function is called. +# +# Important [define]'d vars: +# +# - TCLSH_CMD is the path to the canonical tclsh or "". +# +# - TCL_CONFIG_SH is the path to tclConfig.sh or "". +# +# - TCLLIBDIR is the dir to which the extension library gets +# - installed. +# +proc teaish__check_tcl {} { + define TCLSH_CMD false ; # Significant is that it exits with non-0 + define TCLLIBDIR "" ; # Installation dir for TCL extension lib + define TCL_CONFIG_SH ""; # full path to tclConfig.sh + + # Clear out all vars which would harvest from tclConfig.sh so that + # the late-config validation of @VARS@ works even if --disable-tcl + # is used. + proj-tclConfig-sh-to-autosetup "" + + # TODO: better document the steps this is taking. + set srcdir $::autosetup(srcdir) + msg-result "Checking for a suitable tcl... " + set use_tcl 1 + set withSh [opt-val with-tclsh [proj-get-env TCLSH]] + set tclHome [opt-val with-tcl [proj-get-env TCL_HOME]] + if {[string match */lib $tclHome]} { + # TEA compatibility kludge: its --with-tcl wants the lib + # dir containing tclConfig.sh. + #proj-warn "Replacing --with-tcl=$tclHome for TEA compatibility" + regsub {/lib^} $tclHome "" tclHome + msg-result "NOTE: stripped /lib suffix from --with-tcl=$tclHome (a TEA-ism)" + } + if {0} { + # This misinteracts with the $TCL_PREFIX default: it will use the + # autosetup-defined --prefix default + if {"prefix" eq $tclHome} { + set tclHome [get-define prefix] + } + } + teaish-debug "use_tcl ${use_tcl}" + teaish-debug "withSh=${withSh}" + teaish-debug "tclHome=$tclHome" + if {"" eq $withSh && "" eq $tclHome} { + # If neither --with-tclsh nor --with-tcl are provided, try to find + # a workable tclsh. + set withSh [proj-first-bin-of tclsh9.1 tclsh9.0 tclsh8.6 tclsh] + teaish-debug "withSh=${withSh}" + } + + set doConfigLookup 1 ; # set to 0 to test the tclConfig.sh-not-found cases + if {"" ne $withSh} { + # --with-tclsh was provided or found above. Validate it and use it + # to trump any value passed via --with-tcl=DIR. + if {![file-isexec $withSh]} { + proj-error "TCL shell $withSh is not executable" + } else { + define TCLSH_CMD $withSh + #msg-result "Using tclsh: $withSh" + } + if {$doConfigLookup && + [catch {exec $withSh $::autosetup(libdir)/find_tclconfig.tcl} result] == 0} { + set tclHome $result + } + if {"" ne $tclHome && [file isdirectory $tclHome]} { + teaish__verbose 1 msg-result "$withSh recommends the tclConfig.sh from $tclHome" + } else { + proj-warn "$withSh is unable to recommend a tclConfig.sh" + set use_tcl 0 + } + } + set cfg "" + set tclSubdirs {tcl9.1 tcl9.0 tcl8.6 tcl8.5 lib} + while {$use_tcl} { + if {"" ne $tclHome} { + # Ensure that we can find tclConfig.sh under ${tclHome}/... + if {$doConfigLookup} { + if {[file readable "${tclHome}/tclConfig.sh"]} { + set cfg "${tclHome}/tclConfig.sh" + } else { + foreach i $tclSubdirs { + if {[file readable "${tclHome}/$i/tclConfig.sh"]} { + set cfg "${tclHome}/$i/tclConfig.sh" + break + } + } + } + } + if {"" eq $cfg} { + proj-error "No tclConfig.sh found under ${tclHome}" + } + } else { + # If we have not yet found a tclConfig.sh file, look in $libdir + # which is set automatically by autosetup or via the --prefix + # command-line option. See + # https://sqlite.org/forum/forumpost/e04e693439a22457 + set libdir [get-define libdir] + if {[file readable "${libdir}/tclConfig.sh"]} { + set cfg "${libdir}/tclConfig.sh" + } else { + foreach i $tclSubdirs { + if {[file readable "${libdir}/$i/tclConfig.sh"]} { + set cfg "${libdir}/$i/tclConfig.sh" + break + } + } + } + if {![file readable $cfg]} { + break + } + } + teaish__verbose 1 msg-result "Using tclConfig.sh = $cfg" + break + }; # while {$use_tcl} + define TCL_CONFIG_SH $cfg + # Export a subset of tclConfig.sh to the current TCL-space. If $cfg + # is an empty string, this emits empty-string entries for the + # various options we're interested in. + proj-tclConfig-sh-to-autosetup $cfg + + if {"" eq $withSh && $cfg ne ""} { + # We have tclConfig.sh but no tclsh. Attempt to locate a tclsh + # based on info from tclConfig.sh. + set tclExecPrefix [get-define TCL_EXEC_PREFIX] + proj-assert {"" ne $tclExecPrefix} + set tryThese [list \ + $tclExecPrefix/bin/tclsh[get-define TCL_VERSION] \ + $tclExecPrefix/bin/tclsh ] + foreach trySh $tryThese { + if {[file-isexec $trySh]} { + set withSh $trySh + break + } + } + if {![file-isexec $withSh]} { + proj-warn "Cannot find a usable tclsh (tried: $tryThese)" + } + } + define TCLSH_CMD $withSh + if {$use_tcl} { + # Set up the TCLLIBDIR + set tcllibdir [get-env TCLLIBDIR ""] + set extDirName [teaish-pkginfo-get -libDir] + if {"" eq $tcllibdir} { + # Attempt to extract TCLLIBDIR from TCL's $auto_path + if {"" ne $withSh && + [catch {exec echo "puts stdout \$auto_path" | "$withSh"} result] == 0} { + foreach i $result { + if {![string match //zip* $i] && [file isdirectory $i]} { + # isdirectory actually passes on //zipfs:/..., but those are + # useless for our purposes + set tcllibdir $i/$extDirName + break + } + } + } else { + proj-error "Cannot determine TCLLIBDIR." + } + } + define TCLLIBDIR $tcllibdir + }; # find TCLLIBDIR + + set gotSh [file-isexec $withSh] + set tmdir ""; # first tcl::tm::list entry + if {$gotSh} { + catch { + set tmli [exec echo {puts [tcl::tm::list]} | $withSh] + # Reminder: this list contains many names of dirs which do not + # exist but are legitimate. If we rely only on an is-dir check, + # we can end up not finding any of the many candidates. + set firstDir "" + foreach d $tmli { + if {"" eq $firstDir && ![string match //*:* $d]} { + # First non-VFS entry, e.g. not //zipfs: + set firstDir $d + } + if {[file isdirectory $d]} { + set tmdir $d + break + } + } + if {"" eq $tmdir} { + set tmdir $firstDir + } + }; # find tcl::tm path + } + define TEAISH_TCL_TM_DIR $tmdir + + # Finally, let's wrap up... + if {$gotSh} { + teaish__verbose 1 msg-result "Using tclsh = $withSh" + if {$cfg ne ""} { + define HAVE_TCL 1 + } else { + proj-warn "Found tclsh but no tclConfig.sh." + } + if {"" eq $tmdir} { + proj-warn "Did not find tcl::tm directory." + } + } + show-notices + # If TCL is not found: if it was explicitly requested then fail + # fatally, else just emit a warning. If we can find the APIs needed + # to generate a working JimTCL then that will suffice for build-time + # TCL purposes (see: proc sqlite-determine-codegen-tcl). + if {!$gotSh} { + proj-error "Did not find tclsh" + } elseif {"" eq $cfg} { + proj-indented-notice -error { + Cannot find a usable tclConfig.sh file. Use --with-tcl=DIR to + specify a directory near which tclConfig.sh can be found, or + --with-tclsh=/path/to/tclsh to allow the tclsh binary to locate + its tclConfig.sh, with the caveat that a symlink to tclsh, or + wrapper script around it, e.g. ~/bin/tclsh -> + $HOME/tcl/9.0/bin/tclsh9.1, may not work because tclsh emits + different library paths for the former than the latter. + } + } + msg-result "Using Tcl [get-define TCL_VERSION] from [get-define TCL_PREFIX]." + teaish__tcl_platform_quirks +}; # teaish__check_tcl + +# +# Perform last-minute platform-specific tweaks to account for quirks. +# +proc teaish__tcl_platform_quirks {} { + define TEAISH_POSTINST_PREREQUIRE "" + switch -glob -- [get-define host] { + *-haiku { + # Haiku's default TCLLIBDIR is "all wrong": it points to a + # read-only virtual filesystem mount-point. We bend it back to + # fit under $TCL_PACKAGE_PATH here. + foreach {k d} { + vj TCL_MAJOR_VERSION + vn TCL_MINOR_VERSION + pp TCL_PACKAGE_PATH + ld TCLLIBDIR + } { + set $k [get-define $d] + } + if {[string match /packages/* $ld]} { + set old $ld + set tail [file tail $ld] + if {8 == $vj} { + set ld "${pp}/tcl${vj}.${vn}/${tail}" + } else { + proj-assert {9 == $vj} + set ld "${pp}/${tail}" + } + define TCLLIBDIR $ld + # [load foo.so], without a directory part, does not work via + # automated tests on Haiku (but works when run + # manually). Similarly, the post-install [package require ...] + # test fails, presumably for a similar reason. We work around + # the former in _teaish.tester.tcl.in. We work around the + # latter by amending the post-install check's ::auto_path (in + # Makefile.in). This code MUST NOT contain any single-quotes. + define TEAISH_POSTINST_PREREQUIRE \ + [join [list set ::auto_path \ + \[ linsert \$::auto_path 0 $ld \] \; \ + ]] + proj-indented-notice [subst -nocommands -nobackslashes { + Haiku users take note: patching target installation dir to match + Tcl's home because Haiku's is not writable. + + Original : $old + Substitute: $ld + }] + } + } + } +}; # teaish__tcl_platform_quirks + +# +# Searches $::argv and/or the build dir and/or the source dir for +# teaish.tcl and friends. Fails if it cannot find teaish.tcl or if +# there are other irreconcilable problems. If it returns 0 then it did +# not find an extension but the --help flag was seen, in which case +# that's not an error. +# +# This does not _load_ the extension, it primarily locates the files +# which make up an extension and fills out no small amount of teaish +# state related to that. +# +proc teaish__find_extension {} { + proj-assert {!$::teaish__Config(install-mode)} + teaish__verbose 1 msg-result "Looking for teaish extension..." + + # Helper for the foreach loop below. + set checkTeaishTcl {{mustHave fid dir} { + set f [file join $dir $fid] + if {[file readable $f]} { + file-normalize $f + } elseif {$mustHave} { + proj-error "Missing required $dir/$fid" + } + }} + + # + # We have to handle some flags manually because the extension must + # be loaded before [options] is run (so that the extension can + # inject its own options). + # + set dirBld $::autosetup(builddir); # dir we're configuring under + set dirSrc $::autosetup(srcdir); # where teaish's configure script lives + set extT ""; # teaish.tcl + set largv {}; # rewritten $::argv + set gotHelpArg 0; # got the --help + foreach arg $::argv { + #puts "*** arg=$arg" + switch -glob -- $arg { + --ted=* - + --t-e-d=* - + --teaish-extension-dir=* { + # Ensure that $extD refers to a directory and contains a + # teaish.tcl. + regexp -- {--[^=]+=(.+)} $arg - extD + set extD [file-normalize $extD] + if {![file isdirectory $extD]} { + proj-error "--teaish-extension-dir value is not a directory: $extD" + } + set extT [apply $checkTeaishTcl 0 teaish.config $extD] + if {"" eq $extT} { + set extT [apply $checkTeaishTcl 1 teaish.tcl $extD] + } + set ::teaish__Config(extension-dir) $extD + } + --help { + incr gotHelpArg + lappend largv $arg + } + default { + lappend largv $arg + } + } + } + set ::argv $largv + + set dirExt $::teaish__Config(extension-dir); # dir with the extension + # + # teaish.tcl is a TCL script which implements various + # interfaces described by this framework. + # + # We use the first one we find in the builddir or srcdir. + # + if {"" eq $extT} { + set flist [list] + proj-assert {$dirExt eq ""} + lappend flist $dirBld/teaish.tcl $dirBld/teaish.config $dirSrc/teaish.tcl + if {![proj-first-file-found extT $flist]} { + if {$gotHelpArg} { + # Tell teaish-configure-core that the lack of extension is not + # an error when --help or --teaish-install is used. + return 0; + } + proj-indented-notice -error " +Did not find any of: $flist + +If you are attempting an out-of-tree build, use + --teaish-extension-dir=/path/to/extension" + } + } + if {![file readable $extT]} { + proj-error "extension tcl file is not readable: $extT" + } + set ::teaish__Config(teaish.tcl) $extT + set dirExt [file dirname $extT] + + set ::teaish__Config(extension-dir) $dirExt + set ::teaish__Config(blddir-is-extdir) [expr {$dirBld eq $dirExt}] + set ::teaish__Config(dist-enabled) $::teaish__Config(blddir-is-extdir); # may change later + set ::teaish__Config(dist-full-enabled) \ + [expr {[file-normalize $::autosetup(srcdir)] + eq [file-normalize $::teaish__Config(extension-dir)]}] + + set addDist {{file} { + teaish-dist-add [file tail $file] + }} + apply $addDist $extT + + teaish__verbose 1 msg-result "Extension dir = [teaish-get -dir]" + teaish__verbose 1 msg-result "Extension config = $extT" + + teaish-pkginfo-set -name [file tail [file dirname $extT]] + + # + # teaish.make[.in] provides some of the info for the main makefile, + # like which source(s) to build and their build flags. + # + # We use the first one of teaish.make.in or teaish.make we find in + # $dirExt. + # + if {[proj-first-file-found extM \ + [list \ + $dirExt/teaish.make.in \ + $dirExt/teaish.make \ + ]]} { + if {[string match *.in $extM]} { + define TEAISH_MAKEFILE_IN $extM + define TEAISH_MAKEFILE _[file rootname [file tail $extM]] + } else { + define TEAISH_MAKEFILE_IN "" + define TEAISH_MAKEFILE $extM + } + apply $addDist $extM + teaish__verbose 1 msg-result "Extension makefile = $extM" + } else { + define TEAISH_MAKEFILE_IN "" + define TEAISH_MAKEFILE "" + } + + # Look for teaish.pkginit.tcl[.in] + set piPolicy 0 + if {[proj-first-file-found extI \ + [list \ + $dirExt/teaish.pkginit.tcl.in \ + $dirExt/teaish.pkginit.tcl \ + ]]} { + if {[string match *.in $extI]} { + # Generate teaish.pkginit.tcl from $extI. + define TEAISH_PKGINIT_TCL_IN $extI + define TEAISH_PKGINIT_TCL [file rootname [file tail $extI]] + set piPolicy 0x01 + } else { + # Assume static $extI. + define TEAISH_PKGINIT_TCL_IN "" + define TEAISH_PKGINIT_TCL $extI + set piPolicy 0x10 + } + apply $addDist $extI + teaish__verbose 1 msg-result "Extension post-load init = $extI" + define TEAISH_PKGINIT_TCL_TAIL \ + [file tail [get-define TEAISH_PKGINIT_TCL]]; # for use in pkgIndex.tcl.in + } + set ::teaish__Config(pkginit-policy) $piPolicy + + # Look for pkgIndex.tcl[.in]... + set piPolicy 0 + if {[proj-first-file-found extPI $dirExt/pkgIndex.tcl.in]} { + # Generate ./pkgIndex.tcl from $extPI. + define TEAISH_PKGINDEX_TCL_IN $extPI + define TEAISH_PKGINDEX_TCL [file rootname [file tail $extPI]] + apply $addDist $extPI + set piPolicy 0x01 + } elseif {$dirExt ne $dirSrc + && [proj-first-file-found extPI $dirSrc/pkgIndex.tcl.in]} { + # Generate ./pkgIndex.tcl from $extPI. + define TEAISH_PKGINDEX_TCL_IN $extPI + define TEAISH_PKGINDEX_TCL [file rootname [file tail $extPI]] + set piPolicy 0x02 + } elseif {[proj-first-file-found extPI $dirExt/pkgIndex.tcl]} { + # Assume $extPI's a static file and use it. + define TEAISH_PKGINDEX_TCL_IN "" + define TEAISH_PKGINDEX_TCL $extPI + apply $addDist $extPI + set piPolicy 0x10 + } + # Reminder: we have to delay removal of stale TEAISH_PKGINDEX_TCL + # and the proj-dot-ins-append of TEAISH_PKGINDEX_TCL_IN until much + # later in the process. + set ::teaish__Config(pkgindex-policy) $piPolicy + + # Look for teaish.test.tcl[.in] + proj-assert {"" ne $dirExt} + set flist [list $dirExt/teaish.test.tcl.in $dirExt/teaish.test.tcl] + if {[proj-first-file-found ttt $flist]} { + if {[string match *.in $ttt]} { + # Generate _teaish.test.tcl from $ttt + set xt _[file rootname [file tail $ttt]] + file delete -force -- $xt; # ensure no stale copy is used + define TEAISH_TEST_TCL $xt + define TEAISH_TEST_TCL_IN $ttt + } else { + define TEAISH_TEST_TCL $ttt + define TEAISH_TEST_TCL_IN "" + } + apply $addDist $ttt + } else { + define TEAISH_TEST_TCL "" + define TEAISH_TEST_TCL_IN "" + } + + # Look for _teaish.tester.tcl[.in] + set flist [list $dirExt/_teaish.tester.tcl.in $dirSrc/_teaish.tester.tcl.in] + if {[proj-first-file-found ttt $flist]} { + # Generate teaish.test.tcl from $ttt + set xt [file rootname [file tail $ttt]] + file delete -force -- $xt; # ensure no stale copy is used + define TEAISH_TESTER_TCL $xt + define TEAISH_TESTER_TCL_IN $ttt + if {[lindex $flist 0] eq $ttt} { + apply $addDist $ttt + } + unset ttt xt + } else { + if {[file exists [set ttt [file join $dirSrc _teaish.tester.tcl.in]]]} { + set xt [file rootname [file tail $ttt]] + define TEAISH_TESTER_TCL $xt + define TEAISH_TESTER_TCL_IN $ttt + } else { + define TEAISH_TESTER_TCL "" + define TEAISH_TESTER_TCL_IN "" + } + } + unset flist + + # TEAISH_OUT_OF_EXT_TREE = 1 if we're building from a dir other + # than the extension's home dir. + define TEAISH_OUT_OF_EXT_TREE \ + [expr {[file-normalize $::autosetup(builddir)] ne \ + [file-normalize $::teaish__Config(extension-dir)]}] + return 1 +}; # teaish__find_extension + +# +# @teaish-cflags-add ?-p|prepend? ?-define? cflags... +# +# Equivalent to [proj-define-amend TEAISH_CFLAGS {*}$args]. +# +proc teaish-cflags-add {args} { + proj-define-amend TEAISH_CFLAGS {*}$args +} + +# +# @teaish-define-to-cflag ?flags? defineName...|{defineName...} +# +# Uses [proj-define-to-cflag] to expand a list of [define] keys, each +# one a separate argument, to CFLAGS-style -D... form then appends +# that to the current TEAISH_CFLAGS. +# +# It accepts these flags from proj-define-to-cflag: -quote, +# -zero-undef. It does _not_ support its -list flag. +# +# It accepts its non-flag argument(s) in 2 forms: (1) each arg is a +# single [define] key or (2) its one arg is a list of such keys. +# +# TODO: document teaish's well-defined (as it were) defines for this +# purpose. At a bare minimum: +# +# - TEAISH_NAME +# - TEAISH_PKGNAME +# - TEAISH_VERSION +# - TEAISH_LIBDIR_NAME +# - TEAISH_LOAD_PREFIX +# - TEAISH_URL +# +proc teaish-define-to-cflag {args} { + set flags {} + while {[string match -* [lindex $args 0]]} { + set arg [lindex $args 0] + switch -exact -- $arg { + -quote - + -zero-undef { + lappend flags $arg + set args [lassign $args -] + } + default break + } + } + if {1 == [llength $args]} { + set args [list {*}[lindex $args 0]] + } + #puts "***** flags=$flags args=$args" + teaish-cflags-add [proj-define-to-cflag {*}$flags {*}$args] +} + +# +# @teaish-cflags-for-tea ?...CFLAGS? +# +# Adds several -DPACKAGE_... CFLAGS using the extension's metadata, +# all as quoted strings. Those symbolic names are commonly used in +# TEA-based builds, and this function is intended to simplify porting +# of such builds. The -D... flags added are: +# +# -DPACKAGE_VERSION=... +# -DPACKAGE_NAME=... +# -DPACKAGE_URL=... +# -DPACKAGE_STRING=... +# +# Any arguments are passed-on as-is to teaish-cflags-add. +# +proc teaish-cflags-for-tea {args} { + set name $::teaish__PkgInfo(-name) + set version $::teaish__PkgInfo(-version) + set pstr [join [list $name $version]] + teaish-cflags-add \ + {*}$args \ + '-DPACKAGE_VERSION="$version"' \ + '-DPACKAGE_NAME="$name"' \ + '-DPACKAGE_STRING="$pstr"' \ + '-DPACKAGE_URL="[teaish-get -url]"' +} + +# +# @teaish-ldflags-add ?-p|-prepend? ?-define? ldflags... +# +# Equivalent to [proj-define-amend TEAISH_LDFLAGS {*}$args]. +# +# Typically, -lXYZ flags need to be in "reverse" order, with each -lY +# resolving symbols for -lX's to its left. This order is largely +# historical, and not relevant on all environments, but it is +# technically correct and still relevant on some environments. +# +# See: teaish-ldflags-prepend +# +proc teaish-ldflags-add {args} { + proj-define-amend TEAISH_LDFLAGS {*}$args +} + +# +# @teaish-ldflags-prepend args... +# +# Functionally equivalent to [teaish-ldflags-add -p {*}$args] +# +proc teaish-ldflags-prepend {args} { + teaish-ldflags-add -p {*}$args +} + +# +# @teaish-src-add ?-dist? ?-dir? src-files... +# +# Appends all non-empty $args to the project's list of C/C++ source or +# (in some cases) object files. +# +# If passed -dist then it also passes each filename, as-is, to +# [teaish-dist-add]. +# +# If passed -dir then each src-file has [teaish-get -dir] prepended to +# it before they're added to the list. As often as not, that will be +# the desired behavior so that out-of-tree builds can find the +# sources, but there are cases where it's not desired (e.g. when using +# a source file from outside of the extension's dir, or when adding +# object files (which are typically in the build tree)). +# +proc teaish-src-add {args} { + proj-parse-simple-flags args flags { + -dist 0 {expr 1} + -dir 0 {expr 1} + } + if {$flags(-dist)} { + teaish-dist-add {*}$args + } + if {$flags(-dir)} { + set xargs {} + foreach arg $args { + if {"" ne $arg} { + lappend xargs [file join $::teaish__Config(extension-dir) $arg] + } + } + set args $xargs + } + lappend ::teaish__Config(extension-src) {*}$args +} + +# +# @teaish-dist-add files-or-dirs... +# +# Adds the given files to the list of files to include with the "make +# dist" rules. +# +# This is a no-op when the current build is not in the extension's +# directory, as dist support is disabled in out-of-tree builds. +# +# It is not legal to call this until [teaish-get -dir] has been +# reliably set (via teaish__find_extension). +# +proc teaish-dist-add {args} { + if {$::teaish__Config(blddir-is-extdir)} { + # ^^^ reminder: we ignore $::teaish__Config(dist-enabled) here + # because the client might want to implement their own dist + # rules. + #proj-warn "**** args=$args" + lappend ::teaish__Config(dist-files) {*}$args + } +} + +# teaish-install-add files... +# Equivalent to [proj-define-apend TEAISH_INSTALL_FILES ...]. +#proc teaish-install-add {args} { +# proj-define-amend TEAISH_INSTALL_FILES {*}$args +#} + +# +# @teash-make-add args... +# +# Appends makefile code to the TEAISH_MAKEFILE_CODE define. Each +# arg may be any of: +# +# -tab: emit a literal tab +# -nl: emit a literal newline +# -nltab: short for -nl -tab +# -bnl: emit a backslash-escaped end-of-line +# -bnltab: short for -eol -tab +# +# Anything else is appended verbatim. This function adds no additional +# spacing between each argument nor between subsequent invocations. +# Generally speaking, a series of calls to this function need to +# be sure to end the series with a newline. +proc teaish-make-add {args} { + set out [get-define TEAISH_MAKEFILE_CODE ""] + foreach a $args { + switch -exact -- $a { + -bnl { set a " \\\n" } + -bnltab { set a " \\\n\t" } + -tab { set a "\t" } + -nl { set a "\n" } + -nltab { set a "\n\t" } + } + append out $a + } + define TEAISH_MAKEFILE_CODE $out +} + +# Internal helper to generate a clean/distclean rule name +proc teaish__cleanup_rule {{tgt clean}} { + set x [incr ::teaish__Config(teaish__cleanup_rule-counter-${tgt})] + return ${tgt}-_${x}_ +} + +# @teaish-make-obj ?flags? ?...args? +# +# Uses teaish-make-add to inject makefile rules for $objfile from +# $srcfile, which is assumed to be C code which uses libtcl. Unless +# -recipe is used (see below) it invokes the compiler using the +# makefile-defined $(CC.tcl) which, in the default Makefile.in +# template, includes any flags needed for building against the +# configured Tcl. +# +# This always terminates the resulting code with a newline. +# +# Any arguments after the 2nd may be flags described below or, if no +# -recipe is provided, flags for the compiler call. +# +# -obj obj-filename.o +# +# -src src-filename.c +# +# -recipe {...} +# Uses the trimmed value of {...} as the recipe, prefixing it with +# a single hard-tab character. +# +# -deps {...} +# List of extra files to list as dependencies of $o. +# +# -clean +# Generate cleanup rules as well. +proc teaish-make-obj {args} { + proj-parse-simple-flags args flags { + -clean 0 {expr 1} + -recipe => {} + -deps => {} + -obj => {} + -src => {} + } + #parray flags + if {"" eq $flags(-obj)} { + set args [lassign $args flags(-obj)] + if {"" eq $flags(-obj)} { + proj-error "Missing -obj flag." + } + } + foreach f {-deps -src} { + set flags($f) [string trim [string map {\n " "} $flags($f)]] + } + foreach f {-deps -src} { + set flags($f) [string trim $flags($f)] + } + #parray flags + #puts "-- args=$args" + teaish-make-add \ + "# [proj-scope 1] -> [proj-scope] $flags(-obj) $flags(-src)" -nl \ + "$flags(-obj): $flags(-src) $::teaish__Config(teaish.tcl)" + if {[info exists flags(-deps)]} { + teaish-make-add " " [join $flags(-deps)] + } + teaish-make-add -nltab + if {[info exists flags(-recipe)]} { + teaish-make-add [string trim $flags(-recipe)] -nl + } else { + teaish-make-add [join [list \$(CC.tcl) -c $flags(-src) {*}$args]] -nl + } + if {$flags(-clean)} { + set rule [teaish__cleanup_rule] + teaish-make-add \ + "clean: $rule\n$rule:\n\trm -f \"$flags(-obj)\"\n" + } +} + +# +# @teaish-make-clean ?-r? ?-dist? ...files|{...files} +# +# Adds makefile rules for cleaning up the given files via the "make +# clean" or (if -dist is used) "make distclean" makefile rules. The -r +# flag uses "rm -fr" instead of "rm -f", so be careful with that. +# +# The file names are taken literally as arguments to "rm", so they may +# be shell wildcards to be resolved at cleanup-time. To clean up whole +# directories, pass the -r flag. Each name gets quoted in +# double-quotes, so spaces in names should not be a problem (but +# double-quotes in names will be). +# +proc teaish-make-clean {args} { + if {1 == [llength $args]} { + set args [list {*}[lindex $args 0]] + } + + set tgt clean + set rmflags "-f" + proj-parse-simple-flags args flags { + -dist 0 { + set tgt distclean + } + -r 0 { + set rmflags "-fr" + } + } + set rule [teaish__cleanup_rule $tgt] + teaish-make-add "# [proj-scope 1] -> [proj-scope]: [join $args]\n" + teaish-make-add "${rule}:\n\trm ${rmflags}" + foreach a $args { + teaish-make-add " \"$a\"" + } + teaish-make-add "\n${tgt}: ${rule}\n" +} + +# +# @teaish-make-config-header filename +# +# Invokes autosetup's [make-config-header] and passes it $filename and +# a relatively generic list of options for controlling which defined +# symbols get exported. Clients which need more control over the +# exports can copy/paste/customize this. +# +# The exported file is then passed to [proj-touch] because, in +# practice, that's sometimes necessary to avoid build dependency +# issues. +# +proc teaish-make-config-header {filename} { + make-config-header $filename \ + -none {HAVE_CFLAG_* LDFLAGS_* SH_* TEAISH__* TEAISH_*_CODE} \ + -auto {SIZEOF_* HAVE_* TEAISH_* TCL_*} \ + -none * + proj-touch $filename; # help avoid frequent unnecessary auto-reconfig +} + +# +# @teaish-feature-cache-set $key value +# +# Sets a feature-check cache entry with the given key. +# See proj-cache-set for the key's semantics. $key should +# normally be 0. +# +proc teaish-feature-cache-set {key val} { + proj-cache-set -key $key -level 1 $val +} + +# +# @teaish-feature-cache-check key tgtVarName +# +# Checks for a feature-check cache entry with the given key. +# See proj-cache-set for the key's semantics. +# +# $key should also almost always be 0 but, due to a tclsh +# incompatibility in 1 OS, it cannot have a default value unless it's +# the second argument (but it should be the first one). +# +# If the feature-check cache has a matching entry then this function +# assigns its value to tgtVar and returns 1, else it assigns tgtVar to +# "" and returns 0. +# +# See proj-cache-check for $key's semantics. +# +proc teaish-feature-cache-check {key tgtVar} { + upvar $tgtVar tgt + proj-cache-check -key $key -level 1 tgt +} + +# +# @teaish-check-cached@ ?flags? msg script... +# +# A proxy for feature-test impls which handles caching of a feature +# flag check on per-function basis, using the calling scope's name as +# the cache key. +# +# It emits [msg-checking $msg]. If $msg is empty then it defaults to +# the name of the caller's scope. The -nomsg flag suppresses the +# message for non-cache-hit checks. At the end, it will [msg-result +# "ok"] [msg-result "no"] unless -nostatus is used, in which case the +# caller is responsible for emitting at least a newline when it's +# done. The -msg-0 and -msg-1 flags can be used to change the ok/no +# text. +# +# This function checks for a cache hit before running $script and +# caching the result. If no hit is found then $script is run in the +# calling scope and its result value is stored in the cache. This +# routine will intercept a 'return' from $script. +# +# $script may be a command and its arguments, as opposed to a single +# script block. +# +# Flags: +# +# -nostatus = do not emit "ok" or "no" at the end. This presumes +# that either $script will emit at least one newline before +# returning or the caller will account for it. Because of how this +# function is typically used, -nostatus is not honored when the +# response includes a cached result. +# +# -quiet = disable output from Autosetup's msg-checking and +# msg-result for the duration of the $script check. Note that when +# -quiet is in effect, Autosetup's user-notice can be used to queue +# up output to appear after the check is done. Also note that +# -quiet has no effect on _this_ function, only the $script part. +# +# -nomsg = do not emit $msg for initial check. Like -nostatus, this +# flag is not honored when the response includes a cached result +# because it would otherwise produce no output (which is confusing +# in this context). This is useful when a check runs several other +# verbose checks and they emit all the necessary info. +# +# -msg-0 and -msg-1 MSG = strings to show when the check has failed +# resp. passed. Defaults are "no" and "ok". The 0 and 1 refer to the +# result value from teaish-feature-cache-check. +# +# -key cachekey = set the cache context key. Only needs to be +# explicit when using this function multiple times from a single +# scope. See proj-cache-check and friends for details on the key +# name. Its default is the name of the scope which calls this +# function. +# +proc teaish-check-cached {args} { + proj-parse-simple-flags args flags { + -nostatus 0 {expr 1} + -quiet 0 {expr 1} + -key => 1 + -nomsg 0 {expr 1} + -msg-0 => no + -msg-1 => ok + } + set args [lassign $args msg] + set script [join $args] + if {"" eq $msg} { + set msg [proj-scope 1] + } + if {[teaish-feature-cache-check $flags(-key) check]} { + #if {0 == $flags(-nomsg)} { + msg-checking "${msg} ... (cached) " + #} + #if {!$flags(-nostatus)} { + msg-result $flags(-msg-[expr {0 != ${check}}]) + #} + return $check + } else { + if {0 == $flags(-nomsg)} { + msg-checking "${msg} ... " + } + if {$flags(-quiet)} { + incr ::autosetup(msg-quiet) + } + set code [catch {uplevel 1 $script} rc xopt] + if {$flags(-quiet)} { + incr ::autosetup(msg-quiet) -1 + } + #puts "***** cached-check got code=$code rc=$rc" + if {$code in {0 2}} { + teaish-feature-cache-set 1 $rc + if {!$flags(-nostatus)} { + msg-result $flags(-msg-[expr {0 != ${rc}}]) + } else { + #show-notices; # causes a phantom newline because we're in a + #msg-checking scope, so... + if {[info exists ::autosetup(notices)]} { + show-notices + } + } + } else { + #puts "**** code=$code rc=$rc xopt=$xopt" + teaish-feature-cache-set 1 0 + } + #puts "**** code=$code rc=$rc" + return {*}$xopt $rc + } +} + +# +# Internal helper for teaish__defs_format_: returns a JSON-ish quoted +# form of the given string-type values. +# +# If $asList is true then the return value is in {$value} form. If +# $asList is false it only performs the most basic of escaping and +# the input must not contain any control characters. +# +proc teaish__quote_str {asList value} { + if {$asList} { + return "{${value}}" + } + return \"[string map [list \\ \\\\ \" \\\"] $value]\" +} + +# +# Internal helper for teaish__defines_to_list. Expects to be passed +# a name and the variadic $args which are passed to +# teaish__defines_to_list.. If it finds a pattern match for the +# given $name in the various $args, it returns the type flag for that +# $name, e.g. "-str" or "-bare", else returns an empty string. +# +proc teaish__defs_type {name spec} { + foreach {type patterns} $spec { + foreach pattern $patterns { + if {[string match $pattern $name]} { + return $type + } + } + } + return "" +} + +# +# An internal impl detail. Requires a data type specifier, as used by +# Autosetup's [make-config-header], and a value. Returns the formatted +# value or the value $::teaish__Config(defs-skip) if the caller should +# skip emitting that value. +# +# In addition to -str, -auto, etc., as defined by make-config-header, +# it supports: +# +# -list {...} will cause non-integer values to be quoted in {...} +# instead of quotes. +# +# -autolist {...} works like -auto {...} except that it falls back to +# -list {...} type instead of -str {...} style for non-integers. +# +# -jsarray {...} emits the output in something which, for +# conservative inputs, will be a valid JSON array. It can only +# handle relatively simple values with no control characters in +# them. +# +set teaish__Config(defs-skip) "-teaish__defs_format sentinel" +proc teaish__defs_format {type value} { + switch -exact -- $type { + -bare { + # Just output the value unchanged + } + -none { + set value $::teaish__Config(defs-skip) + } + -str { + set value [teaish__quote_str 0 $value] + } + -auto { + # Automatically determine the type + if {![string is integer -strict $value]} { + set value [teaish__quote_str 0 $value] + } + } + -autolist { + if {![string is integer -strict $value]} { + set value [teaish__quote_str 1 $value] + } + } + -list { + set value [teaish__quote_str 1 $value] + } + -jsarray { + set ar {} + foreach v $value { + if {![string is integer -strict $v]} { + set v [teaish__quote_str 0 $v] + } + if {$::teaish__Config(defs-skip) ne $v} { + lappend ar $v + } + } + set value [concat \[ [join $ar {, }] \]] + } + "" { + # (Much later:) Why do we do this? + set value $::teaish__Config(defs-skip) + } + default { + proj-error \ + "Unknown [proj-scope] -type ($type) called from" \ + [proj-scope 1] + } + } + return $value +} + +# +# Returns Tcl code in the form of code which evaluates to a list of +# configure-time DEFINEs in the form {key val key2 val...}. It may +# misbehave for values which are not numeric or simple strings. Some +# defines are specifically filtered out of the result, either because +# their irrelevant to teaish or because they may be arbitrarily large +# (e.g. makefile content). +# +# The $args are explained in the docs for internal-use-only +# [teaish__defs_format]. The default mode is -autolist. +# +proc teaish__defines_to_list {args} { + set lines {} + lappend lines "\{" + set skipper $::teaish__Config(defs-skip) + set args [list \ + -none { + TEAISH__* + TEAISH_*_CODE + AM_* AS_* + } \ + {*}$args \ + -autolist *] + foreach d [lsort [dict keys [all-defines]]] { + set type [teaish__defs_type $d $args] + set value [teaish__defs_format $type [get-define $d]] + if {$skipper ne $value} { + lappend lines "$d $value" + } + } + lappend lines "\}" + tailcall join $lines "\n" +} + +# +# teaish__pragma ...flags +# +# Offers a way to tweak how teaish's core behaves in some cases, in +# particular those which require changing how the core looks for an +# extension and its files. +# +# Accepts the following flags. Those marked with [L] are safe to use +# during initial loading of tclish.tcl (recall that most teaish APIs +# cannot be used until [teaish-configure] is called). +# +# static-pkgIndex.tcl [L]: Tells teaish that ./pkgIndex.tcl is not +# a generated file, so it will not try to overwrite or delete +# it. Errors out if it does not find pkgIndex.tcl in the +# extension's dir. +# +# no-dist [L]: tells teaish to elide the 'make dist' recipe +# from the generated Makefile. +# +# no-dll [L]: tells teaish to elide the DLL-building recipe +# from the generated Makefile. +# +# no-vsatisfies-error [L]: tells teaish that failure to match the +# -vsatisfies value should simply "return" instead of "error". +# +# no-tester [L]: disables automatic generation of teaish.test.tcl +# even if a copy of _teaish.tester.tcl.in is found. +# +# no-full-dist [L]: changes the "make dist" rules to never include +# a copy of teaish itself. By default it will include itself only +# if the extension lives in the same directory as teaish. +# +# full-dist [L]: changes the "make dist" rules to always include +# a copy of teaish itself. +# +# Emits a warning message for unknown arguments. +# +proc teaish__pragma {args} { + foreach arg $args { + switch -exact -- $arg { + + static-pkgIndex.tcl { + if {$::teaish__Config(tm-policy)} { + proj-fatal -up "Cannot use pragma $arg together with -tm.tcl or -tm.tcl.in." + } + set tpi [file join $::teaish__Config(extension-dir) pkgIndex.tcl] + if {[file exists $tpi]} { + define TEAISH_PKGINDEX_TCL_IN "" + define TEAISH_PKGINDEX_TCL $tpi + set ::teaish__Config(pkgindex-policy) 0x20 + } else { + proj-error "pragma $arg: found no package-local pkgIndex.tcl\[.in]" + } + } + + no-dist { + set ::teaish__Config(dist-enabled) 0 + } + + no-install { + set ::teaish__Config(install-enabled) 0 + } + + full-dist { + set ::teaish__Config(dist-full-enabled) 1 + } + + no-full-dist { + set ::teaish__Config(dist-full-enabled) 0 + } + + no-dll { + set ::teaish__Config(dll-enabled) 0 + } + + no-vsatisfies-error { + set ::teaish__Config(vsatisfies-error) 0 + } + + no-tester { + define TEAISH_TESTER_TCL_IN "" + define TEAISH_TESTER_TCL "" + } + + default { + proj-error "Unknown flag: $arg" + } + } + } +} + +# +# @teaish-pkginfo-set ...flags +# +# The way to set up the initial package state. Used like: +# +# teaish-pkginfo-set -name foo -version 0.1.2 +# +# Or: +# +# teaish-pkginfo-set ?-vars|-subst? {-name foo -version 0.1.2} +# +# The latter may be easier to write for a multi-line invocation. +# +# For the second call form, passing the -vars flag tells it to perform +# a [subst] of (only) variables in the {...} part from the calling +# scope. The -subst flag will cause it to [subst] the {...} with +# command substitution as well (but no backslash substitution). When +# using -subst for string concatenation, e.g. with -libDir +# foo[get-version-number], be sure to wrap the value in braces: +# -libDir {foo[get-version-number]}. +# +# Each pkginfo flag corresponds to one piece of extension package +# info. Teaish provides usable default values for all of these flags, +# but at least the -name and -version should be set by clients. +# e.g. the default -name is the directory name the extension lives in, +# which may change (e.g. when building it from a "make dist" bundle). +# +# The flags: +# +# -name theName: The extension's name. It defaults to the name of the +# directory containing the extension. (In TEA this would be the +# PACKAGE_NAME, not to be confused with...) +# +# -name.pkg pkg-provide-name: The extension's name for purposes of +# Tcl_PkgProvide(), [package require], and friends. It defaults to +# the `-name`, and is normally the same, but some projects (like +# SQLite) have a different name here than they do in their +# historical TEA PACKAGE_NAME. +# +# -version version: The extension's package version. Defaults to +# 0.0.0. +# +# -libDir dirName: The base name of the directory into which this +# extension should be installed. It defaults to a concatenation of +# `-name.pkg` and `-version`. +# +# -loadPrefix prefix: For use as the second argument passed to +# Tcl's `load` command in the package-loading process. It defaults +# to title-cased `-name.pkg` because Tcl's `load` plugin system +# expects it in that form. +# +# -options {...}: If provided, it must be a list compatible with +# Autosetup's `options-add` function. These can also be set up via +# `teaish-options`. +# +# -vsatisfies {{...} ...}: Expects a list-of-lists of conditions +# for Tcl's `package vsatisfies` command: each list entry is a +# sub-list of `{PkgName Condition...}`. Teaish inserts those +# checks via its default pkgIndex.tcl.in and _teaish.tester.tcl.in +# templates to verify that the system's package dependencies meet +# these requirements. The default value is `{{Tcl 8.5-}}` (recall +# that it's a list-of-lists), as 8.5 is the minimum Tcl version +# teaish will run on, but some extensions may require newer +# versions or dependencies on other packages. As a special case, +# if `-vsatisfies` is given a single token, e.g. `8.6-`, then it +# is transformed into `{Tcl $thatToken}`, i.e. it checks the Tcl +# version which the package is being run with. If given multiple +# lists, each `package provides` check is run in the given +# order. Failure to meet a `vsatisfies` condition triggers an +# error. +# +# -url {...}: an optional URL for the extension. +# +# -pragmas {...} A list of infrequently-needed lower-level +# directives which can influence teaish, including: +# +# static-pkgIndex.tcl: tells teaish that the client manages their +# own pkgIndex.tcl, so that teaish won't try to overwrite it +# using a template. +# +# no-dist: tells teaish to elide the "make dist" recipe from the +# makefile so that the client can implement it. +# +# no-dll: tells teaish to elide the makefile rules which build +# the DLL, as well as any templated test script and pkgIndex.tcl +# references to them. The intent here is to (A) support +# client-defined build rules for the DLL and (B) eventually +# support script-only extensions. +# +# Unsupported flags or pragmas will trigger an error. +# +# Potential pothole: setting certain state, e.g. -version, after the +# initial call requires recalculating of some [define]s. Any such +# changes should be made as early as possible in teaish-configure so +# that any later use of those [define]s gets recorded properly (not +# with the old value). This is particularly relevant when it is not +# possible to determine the -version or -name until teaish-configure +# has been called, and it's updated dynamically from +# teaish-configure. Notably: +# +# - If -version or -name are updated, -libDir will almost certainly +# need to be explicitly set along with them. +# +# - If -name is updated, -loadPrefix probably needs to be as well. +# +proc teaish-pkginfo-set {args} { + set doVars 0 + set doCommands 0 + set xargs $args + set recalc {} + foreach arg $args { + switch -exact -- $arg { + -vars { + incr doVars + set xargs [lassign $xargs -] + } + -subst { + incr doVars + incr doCommands + set xargs [lassign $xargs -] + } + default { + break + } + } + } + set args $xargs + unset xargs + if {1 == [llength $args] && [llength [lindex $args 0]] > 1} { + # Transform a single {...} arg into the canonical call form + set a [list {*}[lindex $args 0]] + if {$doVars || $doCommands} { + set sflags -nobackslashes + if {!$doCommands} { + lappend sflags -nocommands + } + set a [uplevel 1 [list subst {*}$sflags $a]] + } + set args $a + } + set sentinel "" + set flagDefs [list] + foreach {f d} $::teaish__Config(pkginfo-f2d) { + lappend flagDefs $f => $sentinel + } + proj-parse-simple-flags args flags $flagDefs + if {[llength $args]} { + proj-error -up "Too many (or unknown) arguments to [proj-scope]: $args" + } + foreach {f d} $::teaish__Config(pkginfo-f2d) { + if {$sentinel eq [set v $flags($f)]} continue + switch -exact -- $f { + + -options { + proj-assert {"" eq $d} + options-add $v + } + + -pragmas { + teaish__pragma {*}$v + } + + -vsatisfies { + if {1 == [llength $v] && 1 == [llength [lindex $v 0]]} { + # Transform X to {Tcl $X} + set v [list [join [list Tcl $v]]] + } + define $d $v + } + + -pkgInit.tcl - + -pkgInit.tcl.in { + if {0x22 & $::teaish__Config(pkginit-policy)} { + proj-fatal "Cannot use -pkgInit.tcl(.in) more than once." + } + set x [file join $::teaish__Config(extension-dir) $v] + set tTail [file tail $v] + if {"-pkgInit.tcl.in" eq $f} { + # Generate pkginit file X from X.in + set pI 0x02 + set tIn $x + set tOut [file rootname $tTail] + set other -pkgInit.tcl + } else { + # Static pkginit file X + set pI 0x20 + set tIn "" + set tOut $x + set other -pkgInit.tcl.in + } + set ::teaish__Config(pkginit-policy) $pI + set ::teaish__PkgInfo($other) {} + define TEAISH_PKGINIT_TCL_IN $tIn + define TEAISH_PKGINIT_TCL $tOut + define TEAISH_PKGINIT_TCL_TAIL $tTail + teaish-dist-add $v + set v $x + } + + -src { + set d $::teaish__Config(extension-dir) + foreach f $v { + lappend ::teaish__Config(dist-files) $f + lappend ::teaish__Config(extension-src) $d/$f + lappend ::teaish__PkgInfo(-src) $f + # ^^^ so that default-value initialization in + # teaish-configure-core recognizes that it's been set. + } + } + + -tm.tcl - + -tm.tcl.in { + if {0x30 & $::teaish__Config(pkgindex-policy)} { + proj-fatal "Cannot use $f together with a pkgIndex.tcl." + } elseif {$::teaish__Config(tm-policy)} { + proj-fatal "Cannot use -tm.tcl(.in) more than once." + } + set x [file join $::teaish__Config(extension-dir) $v] + if {"-tm.tcl.in" eq $f} { + # Generate tm file X from X.in + set pT 0x02 + set pI 0x100 + set tIn $x + set tOut [file rootname [file tail $v]] + set other -tm.tcl + } else { + # Static tm file X + set pT 0x20 + set pI 0x200 + set tIn "" + set tOut $x + set other -tm.tcl.in + } + set ::teaish__Config(pkgindex-policy) $pI + set ::teaish__Config(tm-policy) $pT + set ::teaish__PkgInfo($other) {} + define TEAISH_TM_TCL_IN $tIn + define TEAISH_TM_TCL $tOut + define TEAISH_PKGINDEX_TCL "" + define TEAISH_PKGINDEX_TCL_IN "" + define TEAISH_PKGINDEX_TCL_TAIL "" + teaish-dist-add $v + teaish__pragma no-dll + set v $x + } + + default { + proj-assert {"" ne $d} + define $d $v + } + } + set ::teaish__PkgInfo($f) $v + if {$f in {-name -version -libDir -loadPrefix}} { + lappend recalc $f + } + } + if {"" ne $recalc} { + teaish__define_pkginfo_derived $recalc + } +} + +# +# @teaish-pkginfo-get ?arg? +# +# If passed no arguments, it returns the extension config info in the +# same form accepted by teaish-pkginfo-set. +# +# If passed one -flagname arg then it returns the value of that config +# option. +# +# Else it treats arg as the name of caller-scoped variable to +# which this function assigns an array containing the configuration +# state of this extension, in the same structure accepted by +# teaish-pkginfo-set. In this case it returns an empty string. +# +proc teaish-pkginfo-get {args} { + set cases {} + set argc [llength $args] + set rv {} + switch -exact $argc { + 0 { + # Return a list of (-flag value) pairs + lappend cases default {{ + if {[info exists ::teaish__PkgInfo($flag)]} { + lappend rv $flag $::teaish__PkgInfo($flag) + } else { + lappend rv $flag [get-define $defName] + } + }} + } + + 1 { + set arg $args + if {[string match -* $arg]} { + # Return the corresponding -flag's value + lappend cases $arg {{ + if {[info exists ::teaish__PkgInfo($flag)]} { + return $::teaish__PkgInfo($flag) + } else { + return [get-define $defName] + } + }} + } else { + # Populate target with an array of (-flag value). + upvar $arg tgt + array set tgt {} + lappend cases default {{ + if {[info exists ::teaish__PkgInfo($flag)]} { + set tgt($flag) $::teaish__PkgInfo($flag) + } else { + set tgt($flag) [get-define $defName] + } + }} + } + } + + default { + proj-error "invalid arg count from [proj-scope 1]" + } + } + + foreach {flag defName} $::teaish__Config(pkginfo-f2d) { + switch -exact -- $flag [join $cases] + } + if {0 == $argc} { return $rv } +} + +# (Re)set some defines based on pkginfo state. $flags is the list of +# pkginfo -flags which triggered this, or "*" for the initial call. +proc teaish__define_pkginfo_derived {flags} { + set all [expr {{*} in $flags}] + if {$all || "-version" in $flags || "-name" in $flags} { + set name $::teaish__PkgInfo(-name) ; # _not_ -name.pkg + if {[info exists ::teaish__PkgInfo(-version)]} { + set pkgver $::teaish__PkgInfo(-version) + set libname "lib" + if {[string match *-cygwin [get-define host]]} { + set libname cyg + } + define TEAISH_DLL8_BASENAME $libname$name$pkgver + define TEAISH_DLL9_BASENAME ${libname}tcl9$name$pkgver + set ext [get-define TARGET_DLLEXT] + define TEAISH_DLL8 [get-define TEAISH_DLL8_BASENAME]$ext + define TEAISH_DLL9 [get-define TEAISH_DLL9_BASENAME]$ext + } + } + if {$all || "-libDir" in $flags} { + if {[info exists ::teaish__PkgInfo(-libDir)]} { + define TCLLIBDIR \ + [file dirname [get-define TCLLIBDIR]]/$::teaish__PkgInfo(-libDir) + } + } +} + +# +# @teaish-checks-queue -pre|-post args... +# +# Queues one or more arbitrary "feature test" functions to be run when +# teaish-checks-run is called. $flag must be one of -pre or -post to +# specify whether the tests should be run before or after +# teaish-configure is run. Each additional arg is the name of a +# feature-test proc. +# +proc teaish-checks-queue {flag args} { + if {$flag ni {-pre -post}} { + proj-error "illegal flag: $flag" + } + lappend ::teaish__Config(queued-checks${flag}) {*}$args +} + +# +# @teaish-checks-run -pre|-post +# +# Runs all feature checks queued using teaish-checks-queue +# then cleares the queue. +# +proc teaish-checks-run {flag} { + if {$flag ni {-pre -post}} { + proj-error "illegal flag: $flag" + } + #puts "*** running $flag: $::teaish__Config(queued-checks${flag})" + set foo 0 + foreach f $::teaish__Config(queued-checks${flag}) { + if {![teaish-feature-cache-check $f foo]} { + set v [$f] + teaish-feature-cache-set $f $v + } + } + set ::teaish__Config(queued-checks${flag}) {} +} + +# +# A general-purpose getter for various teaish state. Requires one +# flag, which determines its result value. Flags marked with [L] below +# are safe for using at load-time, before teaish-pkginfo-set is called +# +# -dir [L]: returns the extension's directory, which may differ from +# the teaish core dir or the build dir. +# +# -teaish-home [L]: the "home" dir of teaish itself, which may +# differ from the extension dir or build dir. +# +# -build-dir [L]: the build directory (typically the current working +# -dir). +# +# Any of the teaish-pkginfo-get/get flags: returns the same as +# teaish-pkginfo-get. Not safe for use until teaish-pkginfo-set has +# been called. +# +# Triggers an error if passed an unknown flag. +# +proc teaish-get {flag} { + #-teaish.tcl {return $::teaish__Config(teaish.tcl)} + switch -exact -- $flag { + -dir { + return $::teaish__Config(extension-dir) + } + -teaish-home { + return $::autosetup(srcdir) + } + -build-dir { + return $::autosetup(builddir) + } + default { + if {[info exists ::teaish__PkgInfo($flag)]} { + return $::teaish__PkgInfo($flag) + } + } + } + proj-error "Unhandled flag: $flag" +} + +# +# Handles --teaish-create-extension=TARGET-DIR +# +proc teaish__create_extension {dir} { + set force [opt-bool teaish-force] + if {"" eq $dir} { + proj-error "--teaish-create-extension=X requires a directory name." + } + file mkdir $dir/generic + set cwd [pwd] + #set dir [file-normalize [file join $cwd $dir]] + teaish__verbose 1 msg-result "Created dir $dir" + cd $dir + if {!$force} { + # Ensure that we don't blindly overwrite anything + foreach f { + generic/teaish.c + teaish.tcl + teaish.make.in + teaish.test.tcl + } { + if {[file exists $f]} { + error "Cowardly refusing to overwrite $dir/$f. Use --teaish-force to overwrite." + } + } + } + + set name [file tail $dir] + set pkgName $name + set version 0.0.1 + set loadPrefix [string totitle $pkgName] + set content {teaish-pkginfo-set } + #puts "0 content=$content" + if {[opt-str teaish-extension-pkginfo epi]} { + set epi [string trim $epi] + if {[string match "*\n*" $epi]} { + set epi "{$epi}" + } elseif {![string match "{*}" $epi]} { + append content "\{" $epi "\}" + } else { + append content $epi + } + #puts "2 content=$content\nepi=$epi" + } else { + append content [subst -nocommands -nobackslashes {{ + -name ${name} + -name.pkg ${pkgName} + -name.dist ${pkgName} + -version ${version} + -loadPrefix $loadPrefix + -libDir ${name}${version} + -vsatisfies {{Tcl 8.5-}} + -url {} + -options {} + -pragmas {full-dist} + }}] + #puts "3 content=$content" + } + #puts "1 content=$content" + append content "\n" { +#proc teaish-options {} { + # Return a list and/or use \[options-add\] to add new + # configure flags. This is called before teaish's + # bootstrapping is finished, so only teaish-* + # APIs which are explicitly noted as being safe + # early on may be used here. Any autosetup-related + # APIs may be used here. + # + # Return an empty string if there are no options to + # add or if they are added using \[options-add\]. + # + # If there are no options to add, this proc need + # not be defined. +#} + +# Called by teaish once bootstrapping is complete. +# This function is responsible for the client-specific +# parts of the configuration process. +proc teaish-configure {} { + teaish-src-add -dir -dist generic/teaish.c + teaish-define-to-cflag -quote TEAISH_PKGNAME TEAISH_VERSION + + # TODO: your code goes here.. +} +}; # $content + proj-file-write teaish.tcl $content + teaish__verbose 1 msg-result "Created teaish.tcl" + + set content "# Teaish test script. +# When this tcl script is invoked via 'make test' it will have loaded +# the package, run any teaish.pkginit.tcl code, and loaded +# autosetup/teaish/tester.tcl. +" + proj-file-write teaish.test.tcl $content + teaish__verbose 1 msg-result "Created teaish.test.tcl" + + set content [subst -nocommands -nobackslashes { +#include +static int +${loadPrefix}_Cmd(ClientData cdata, Tcl_Interp *interp, int objc, Tcl_Obj *const objv[]){ + Tcl_SetObjResult(interp, Tcl_NewStringObj("this is the ${name} extension", -1)); + return TCL_OK; +} + +extern int DLLEXPORT ${loadPrefix}_Init(Tcl_Interp *interp){ + if (Tcl_InitStubs(interp, TCL_VERSION, 0) == NULL) { + return TCL_ERROR; + } + if (Tcl_PkgProvide(interp, TEAISH_PKGNAME, TEAISH_VERSION) == TCL_ERROR) { + return TCL_ERROR; + } + Tcl_CreateObjCommand(interp, TEAISH_PKGNAME, ${loadPrefix}_Cmd, NULL, NULL); + return TCL_OK; +} +}] + proj-file-write generic/teaish.c $content + teaish__verbose 1 msg-result "Created generic/teaish.c" + + set content "# teaish makefile for the ${name} extension +# tx.src = \$(tx.dir)/generic/teaish.c +# tx.LDFLAGS = +# tx.CFLAGS = +" + proj-file-write teaish.make.in $content + teaish__verbose 1 msg-result "Created teaish.make.in" + + msg-result "Created new extension \[$dir\]." + + cd $cwd + set ::teaish__Config(install-ext-dir) $dir +} + +# +# Internal helper for teaish__install +# +proc teaish__install_file {f destDir force} { + set dest $destDir/[file tail $f] + if {[file isdirectory $f]} { + file mkdir $dest + } elseif {!$force && [file exists $dest]} { + array set st1 [file stat $f] + array set st2 [file stat $dest] + if {($st1(mtime) == $st2(mtime)) + && ($st1(size) == $st2(size))} { + if {[file tail $f] in { + pkgIndex.tcl.in + _teaish.tester.tcl.in + }} { + # Assume they're the same. In the scope of the "make dist" + # rules, this happens legitimately when an extension with a + # copy of teaish installed in the same dir assumes that the + # pkgIndex.tcl.in and _teaish.tester.tcl.in belong to the + # extension, whereas teaish believes they belong to teaish. + # So we end up with dupes of those. + return + } + } + proj-error -up "Cowardly refusing to overwrite \[$dest\]." \ + "Use --teaish-force to enable overwriting." + } else { + # file copy -force $f $destDir; # loses +x bit + # + # JimTcl doesn't have [file attribute], so we can't use that here + # (in the context of an autosetup configure script). + exec cp -p $f $dest + } +} + +# +# Installs a copy of teaish, with autosetup, to $dDest, which defaults +# to the --teaish-install=X or --teash-create-extension=X dir. Won't +# overwrite files unless --teaish-force is used. +# +proc teaish__install {{dDest ""}} { + if {$dDest in {auto ""}} { + set dDest [opt-val teaish-install] + if {$dDest in {auto ""}} { + if {[info exists ::teaish__Config(install-ext-dir)]} { + set dDest $::teaish__Config(install-ext-dir) + } + } + } + set force [opt-bool teaish-force] + if {$dDest in {auto ""}} { + proj-error "Cannot determine installation directory." + } elseif {!$force && [file exists $dDest/auto.def]} { + proj-error \ + "Target dir looks like it already contains teaish and/or autosetup: $dDest" \ + "\nUse --teaish-force to overwrite it." + } + + set dSrc $::autosetup(srcdir) + set dAS $::autosetup(libdir) + set dAST $::teaish__Config(core-dir) + set dASTF $dAST/feature + teaish__verbose 1 msg-result "Installing teaish to \[$dDest\]..." + if {$::teaish__Config(verbose)>1} { + msg-result "dSrc = $dSrc" + msg-result "dAS = $dAS" + msg-result "dAST = $dAST" + msg-result "dASTF = $dASTF" + msg-result "dDest = $dDest" + } + + # Dest subdirs... + set ddAS $dDest/autosetup + set ddAST $ddAS/teaish + set ddASTF $ddAST/feature + foreach {srcDir destDir} [list \ + $dAS $ddAS \ + $dAST $ddAST \ + $dASTF $ddASTF \ + ] { + teaish__verbose 1 msg-result "Copying files to $destDir..." + file mkdir $destDir + foreach f [glob -nocomplain -directory $srcDir *] { + if {[string match {*~} $f] || [string match "#*#" [file tail $f]]} { + # Editor-generated backups and emacs lock files + continue + } + teaish__verbose 2 msg-result "\t$f" + teaish__install_file $f $destDir $force + } + } + teaish__verbose 1 msg-result "Copying files to $dDest..." + foreach f { + auto.def configure Makefile.in pkgIndex.tcl.in + _teaish.tester.tcl.in + } { + teaish__verbose 2 msg-result "\t$f" + teaish__install_file $dSrc/$f $dDest $force + } + set ::teaish__Config(install-self-dir) $dDest + msg-result "Teaish $::teaish__Config(version) installed in \[$dDest\]." +} diff --git a/autosetup/teaish/feature.tcl b/autosetup/teaish/feature.tcl new file mode 100644 index 0000000000..6c927d1a77 --- /dev/null +++ b/autosetup/teaish/feature.tcl @@ -0,0 +1,214 @@ +######################################################################## +# 2025 April 7 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# * May you do good and not evil. +# * May you find forgiveness for yourself and forgive others. +# * May you share freely, never taking more than you give. +# +######################################################################## +# ----- @module feature-tests.tcl ----- +# @section TEA-ish collection of feature tests. +# +# Functions in this file with a prefix of teaish__ are +# private/internal APIs. Those with a prefix of teaish- are +# public APIs. + + +# @teaish-check-libz +# +# Checks for zlib.h and the function deflate in libz. If found, +# prepends -lz to the extension's ldflags and returns 1, else returns +# 0. It also defines LDFLAGS_LIBZ to the libs flag. +# +proc teaish-check-libz {} { + teaish-check-cached "Checking for libz" { + set rc 0 + if {[msg-quiet cc-check-includes zlib.h] && [msg-quiet proj-check-function-in-lib deflate z]} { + teaish-ldflags-prepend [define LDFLAGS_LIBZ [get-define lib_deflate]] + undefine lib_deflate + incr rc + } + expr $rc + } +} + +# @teaish-check-librt ?funclist? +# +# Checks whether -lrt is needed for any of the given functions. If +# so, appends -lrt via [teaish-ldflags-prepend] and returns 1, else +# returns 0. It also defines LDFLAGS_LIBRT to the libs flag or an +# empty string. +# +# Some systems (ex: SunOS) require -lrt in order to use nanosleep. +# +proc teaish-check-librt {{funclist {fdatasync nanosleep}}} { + teaish-check-cached -nostatus "Checking whether ($funclist) need librt" { + define LDFLAGS_LIBRT "" + foreach func $funclist { + if {[msg-quiet proj-check-function-in-lib $func rt]} { + set ldrt [get-define lib_${func}] + undefine lib_${func} + if {"" ne $ldrt} { + teaish-ldflags-prepend -r [define LDFLAGS_LIBRT $ldrt] + msg-result $ldrt + return 1 + } else { + msg-result "no lib needed" + return 1 + } + } + } + msg-result "not found" + return 0 + } +} + +# @teaish-check-stdint +# +# A thin proxy for [cc-with] which checks for and the +# various fixed-size int types it declares. It defines HAVE_STDINT_T +# to 0 or 1 and (if it's 1) defines HAVE_XYZ_T for each XYZ int type +# to 0 or 1, depending on whether its available. +proc teaish-check-stdint {} { + teaish-check-cached "Checking for stdint.h" { + msg-quiet cc-with {-includes stdint.h} \ + {cc-check-types int8_t int16_t int32_t int64_t intptr_t \ + uint8_t uint16_t uint32_t uint64_t uintptr_t} + } +} + +# @teaish-is-mingw +# +# Returns 1 if building for mingw, else 0. +proc teaish-is-mingw {} { + return [expr { + [string match *mingw* [get-define host]] && + ![file exists /dev/null] + }] +} + +# @teaish-check-libdl +# +# Checks for whether dlopen() can be found and whether it requires +# -ldl for linking. If found, returns 1, defines LDFLAGS_DLOPEN to the +# linker flags (if any), and passes those flags to +# teaish-ldflags-prepend. It unconditionally defines HAVE_DLOPEN to 0 +# or 1 (the its return result value). +proc teaish-check-dlopen {} { + teaish-check-cached -nostatus "Checking for dlopen()" { + set rc 0 + set lfl "" + if {[cc-with {-includes dlfcn.h} { + cctest -link 1 -declare "extern char* dlerror(void);" -code "dlerror();"}]} { + msg-result "-ldl not needed" + incr rc + } elseif {[cc-check-includes dlfcn.h]} { + incr rc + if {[cc-check-function-in-lib dlopen dl]} { + set lfl [get-define lib_dlopen] + undefine lib_dlopen + msg-result " dlopen() needs $lfl" + } else { + msg-result " - dlopen() not found in libdl. Assuming dlopen() is built-in." + } + } else { + msg-result "not found" + } + teaish-ldflags-prepend [define LDFLAGS_DLOPEN $lfl] + define HAVE_DLOPEN $rc + } +} + +# +# @teaish-check-libmath +# +# Handles the --enable-math flag. Returns 1 if found, else 0. +# If found, it prepends -lm (if needed) to the linker flags. +proc teaish-check-libmath {} { + teaish-check-cached "Checking for libc math library" { + set lfl "" + set rc 0 + if {[msg-quiet proj-check-function-in-lib ceil m]} { + incr rc + set lfl [get-define lib_ceil] + undefine lib_ceil + teaish-ldflags-prepend $lfl + msg-checking "$lfl " + } + define LDFLAGS_LIBMATH $lfl + expr $rc + } +} + +# @teaish-import-features ?-flags? feature-names... +# +# For each $name in feature-names... it invokes: +# +# use teaish/feature/$name +# +# to load TEAISH_AUTOSETUP_DIR/feature/$name.tcl +# +# By default, if a proc named teaish-check-${name}-options is defined +# after sourcing a file, it is called and its result is passed to +# proj-append-options. This can be suppressed with the -no-options +# flag. +# +# Flags: +# +# -no-options: disables the automatic running of +# teaish-check-NAME-options, +# +# -run: if the function teaish-check-NAME exists after importing +# then it is called. This flag must not be used when calling this +# function from teaish-options. This trumps both -pre and -post. +# +# -pre: if the function teaish-check-NAME exists after importing +# then it is passed to [teaish-checks-queue -pre]. +# +# -post: works like -pre but instead uses[teaish-checks-queue -post]. +proc teaish-import-features {args} { + set pk "" + set doOpt 1 + proj-parse-simple-flags args flags { + -no-options 0 {set doOpt 0} + -run 0 {expr 1} + -pre 0 {set pk -pre} + -post 0 {set pk -post} + } + # + # TODO: never import the same module more than once. The "use" + # command is smart enough to not do that but we would need to + # remember whether or not any teaish-check-${arg}* procs have been + # called before, and skip them. + # + if {$flags(-run) && "" ne $pk} { + proj-error "Cannot use both -run and $pk" \ + " (called from [proj-scope 1])" + } + + foreach arg $args { + uplevel "use teaish/feature/$arg" + if {$doOpt} { + set n "teaish-check-${arg}-options" + if {[llength [info proc $n]] > 0} { + if {"" ne [set x [$n]]} { + options-add $x + } + } + } + if {$flags(-run)} { + set n "teaish-check-${arg}" + if {[llength [info proc $n]] > 0} { + uplevel 1 $n + } + } elseif {"" ne $pk} { + set n "teaish-check-${arg}" + if {[llength [info proc $n]] > 0} { + teaish-checks-queue {*}$pk $n + } + } + } +} diff --git a/autosetup/teaish/tester.tcl b/autosetup/teaish/tester.tcl new file mode 100644 index 0000000000..a25b366e8d --- /dev/null +++ b/autosetup/teaish/tester.tcl @@ -0,0 +1,293 @@ +######################################################################## +# 2025 April 5 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# * May you do good and not evil. +# * May you find forgiveness for yourself and forgive others. +# * May you share freely, never taking more than you give. +# +######################################################################## +# +# Helper routines for running tests on teaish extensions +# +######################################################################## +# ----- @module teaish/tester.tcl ----- +# +# @section TEA-ish Testing APIs. +# +# Though these are part of the autosup dir hierarchy, they are not +# intended to be run from autosetup code. Rather, they're for use +# with/via teaish.tester.tcl and target canonical Tcl only, not JimTcl +# (which the autosetup pieces do target). + +# +# @test-current-scope ?lvl? +# +# Returns the name of the _calling_ proc from ($lvl + 1) levels up the +# call stack (where the caller's level will be 1 up from _this_ +# call). If $lvl would resolve to global scope "global scope" is +# returned and if it would be negative then a string indicating such +# is returned (as opposed to throwing an error). +# +proc test-current-scope {{lvl 0}} { + #uplevel [expr {$lvl + 1}] {lindex [info level 0] 0} + set ilvl [info level] + set offset [expr {$ilvl - $lvl - 1}] + if { $offset < 0} { + return "invalid scope ($offset)" + } elseif { $offset == 0} { + return "global scope" + } else { + return [lindex [info level $offset] 0] + } +} + +# @test-msg +# +# Emits all arugments to stdout. +# +proc test-msg {args} { + puts "$args" +} + +# @test-warn +# +# Emits all arugments to stderr. +# +proc test-warn {args} { + puts stderr "WARNING: $args" +} + +# +# @test-error msg +# +# Triggers a test-failed error with a string describing the calling +# scope and the provided message. +# +proc test-fail {args} { + #puts stderr "ERROR: \[[test-current-scope 1]]: $msg" + #exit 1 + error "FAIL: \[[test-current-scope 1]]: $args" +} + +array set ::test__Counters {} +array set ::test__Config { + verbose-assert 0 verbose-affirm 0 +} + +# Internal impl for affirm and assert. +# +# $args = ?-v? script {msg-on-fail ""} +proc test__affert {failMode args} { + if {$failMode} { + set what assert + } else { + set what affirm + } + set verbose $::test__Config(verbose-$what) + if {"-v" eq [lindex $args 0]} { + lassign $args - script msg + if {1 == [llength $args]} { + # If -v is the only arg, toggle default verbose mode + set ::test__Config(verbose-$what) [expr {!$::test__Config(verbose-$what)}] + return + } + incr verbose + } else { + lassign $args script msg + } + incr ::test__Counters($what) + if {![uplevel 1 expr [list $script]]} { + if {"" eq $msg} { + set msg $script + } + set txt [join [list $what # $::test__Counters($what) "failed:" $msg]] + if {$failMode} { + puts stderr $txt + exit 1 + } else { + error $txt + } + } elseif {$verbose} { + puts stderr [join [list $what # $::test__Counters($what) "passed:" $script]] + } +} + +# +# @affirm ?-v? script ?msg? +# +# Works like a conventional assert method does, but reports failures +# using [error] instead of [exit]. If -v is used, it reports passing +# assertions to stderr. $script is evaluated in the caller's scope as +# an argument to [expr]. +# +proc affirm {args} { + tailcall test__affert 0 {*}$args +} + +# +# @assert ?-v? script ?msg? +# +# Works like [affirm] but exits on error. +# +proc assert {args} { + tailcall test__affert 1 {*}$args +} + +# +# @assert-matches ?-e? pattern ?-e? rhs ?msg? +# +# Equivalent to assert {[string match $pattern $rhs]} except that +# if either of those are prefixed with an -e flag, they are eval'd +# and their results are used. +# +proc assert-matches {args} { + set evalLhs 0 + set evalRhs 0 + if {"-e" eq [lindex $args 0]} { + incr evalLhs + set args [lassign $args -] + } + set args [lassign $args pattern] + if {"-e" eq [lindex $args 0]} { + incr evalRhs + set args [lassign $args -] + } + set args [lassign $args rhs msg] + + if {$evalLhs} { + set pattern [uplevel 1 $pattern] + } + if {$evalRhs} { + set rhs [uplevel 1 $rhs] + } + #puts "***pattern=$pattern\n***rhs=$rhs" + tailcall test__affert 1 \ + [join [list \[ string match [list $pattern] [list $rhs] \]]] $msg + # why does this not work? [list \[ string match [list $pattern] [list $rhs] \]] $msg + # "\[string match [list $pattern] [list $rhs]\]" +} + +# +# @test-assert testId script ?msg? +# +# Works like [assert] but emits $testId to stdout first. +# +proc test-assert {testId script {msg ""}} { + puts "test $testId" + tailcall test__affert 1 $script $msg +} + +# +# @test-expect testId script result +# +# Runs $script in the calling scope and compares its result to +# $result, minus any leading or trailing whitespace. If they differ, +# it triggers an [assert]. +# +proc test-expect {testId script result} { + puts "test $testId" + set x [string trim [uplevel 1 $script]] + set result [string trim $result] + tailcall test__affert 0 [list "{$x}" eq "{$result}"] \ + "\nEXPECTED: <<$result>>\nGOT: <<$x>>" +} + +# +# @test-catch cmd ?...args? +# +# Runs [cmd ...args], repressing any exception except to possibly log +# the failure. Returns 1 if it caught anything, 0 if it didn't. +# +proc test-catch {cmd args} { + if {[catch { + uplevel 1 $cmd {*}$args + } rc xopts]} { + puts "[test-current-scope] ignoring failure of: $cmd [lindex $args 0]: $rc" + return 1 + } + return 0 +} + +# +# @test-catch-matching pattern (script|cmd args...) +# +# Works like test-catch, but it expects its argument(s) to to throw an +# error matching the given string (checked with [string match]). If +# they do not throw, or the error does not match $pattern, this +# function throws, else it returns 1. +# +# If there is no second argument, the $cmd is assumed to be a script, +# and will be eval'd in the caller's scope. +# +# TODO: add -glob and -regex flags to control matching flavor. +# +proc test-catch-matching {pattern cmd args} { + if {[catch { + #puts "**** catch-matching cmd=$cmd args=$args" + if {0 == [llength $args]} { + uplevel 1 $cmd {*}$args + } else { + $cmd {*}$args + } + } rc xopts]} { + if {[string match $pattern $rc]} { + return 1 + } else { + error "[test-current-scope] exception does not match {$pattern}: {$rc}" + } + } + error "[test-current-scope] expecting to see an error matching {$pattern}" +} + +if {![array exists ::teaish__BuildFlags]} { + array set ::teaish__BuildFlags {} +} + +# +# @teaish-build-flag3 flag tgtVar ?dflt? +# +# If the current build has the configure-time flag named $flag set +# then tgtVar is assigned its value and 1 is returned, else tgtVal is +# assigned $dflt and 0 is returned. +# +# Caveat #1: only valid when called in the context of teaish's default +# "make test" recipe, e.g. from teaish.test.tcl. It is not valid from +# a teaish.tcl configure script because (A) the state it relies on +# doesn't fully exist at that point and (B) that level of the API has +# more direct access to the build state. This function requires that +# an external script have populated its internal state, which is +# normally handled via teaish.tester.tcl.in. +# +# Caveat #2: defines in the style of HAVE_FEATURENAME with a value of +# 0 are, by long-standing configure script conventions, treated as +# _undefined_ here. +# +proc teaish-build-flag3 {flag tgtVar {dflt ""}} { + upvar $tgtVar tgt + if {[info exists ::teaish__BuildFlags($flag)]} { + set tgt $::teaish__BuildFlags($flag) + return 1; + } elseif {0==[array size ::teaish__BuildFlags]} { + test-warn \ + "\[[test-current-scope]] was called from " \ + "[test-current-scope 1] without the build flags imported." + } + set tgt $dflt + return 0 +} + +# +# @teaish-build-flag flag ?dflt? +# +# Convenience form of teaish-build-flag3 which returns the +# configure-time-defined value of $flag or "" if it's not defined (or +# if it's an empty string). +# +proc teaish-build-flag {flag {dflt ""}} { + set tgt "" + teaish-build-flag3 $flag tgt $dflt + return $tgt +} diff --git a/config.h.in b/config.h.in deleted file mode 100644 index b91f1f7940..0000000000 --- a/config.h.in +++ /dev/null @@ -1,150 +0,0 @@ -/* config.h.in. Generated from configure.ac by autoheader. */ - -/* Define to 1 if you have the header file. */ -#undef HAVE_DLFCN_H - -/* Define to 1 if you have the `fdatasync' function. */ -#undef HAVE_FDATASYNC - -/* Define to 1 if you have the `gmtime_r' function. */ -#undef HAVE_GMTIME_R - -/* Define to 1 if the system has the type `int16_t'. */ -#undef HAVE_INT16_T - -/* Define to 1 if the system has the type `int32_t'. */ -#undef HAVE_INT32_T - -/* Define to 1 if the system has the type `int64_t'. */ -#undef HAVE_INT64_T - -/* Define to 1 if the system has the type `int8_t'. */ -#undef HAVE_INT8_T - -/* Define to 1 if the system has the type `intptr_t'. */ -#undef HAVE_INTPTR_T - -/* Define to 1 if you have the header file. */ -#undef HAVE_INTTYPES_H - -/* Define to 1 if you have the `isnan' function. */ -#undef HAVE_ISNAN - -/* Define to 1 if you have the `crypto' library (-lcrypto). */ -#undef HAVE_LIBCRYPTO - -/* Define to 1 if you have the `nss3' library (-lnss3). */ -#undef HAVE_LIBNSS3 - -/* Define to 1 if you have the `tomcrypt' library (-ltomcrypt). */ -#undef HAVE_LIBTOMCRYPT - -/* Define to 1 if you have the `localtime_r' function. */ -#undef HAVE_LOCALTIME_R - -/* Define to 1 if you have the `localtime_s' function. */ -#undef HAVE_LOCALTIME_S - -/* Define to 1 if you have the header file. */ -#undef HAVE_MALLOC_H - -/* Define to 1 if you have the `malloc_usable_size' function. */ -#undef HAVE_MALLOC_USABLE_SIZE - -/* Define to 1 if you have the header file. */ -#undef HAVE_MEMORY_H - -/* Define to 1 if you have the `pread' function. */ -#undef HAVE_PREAD - -/* Define to 1 if you have the `pread64' function. */ -#undef HAVE_PREAD64 - -/* Define to 1 if you have the `pwrite' function. */ -#undef HAVE_PWRITE - -/* Define to 1 if you have the `pwrite64' function. */ -#undef HAVE_PWRITE64 - -/* Define to 1 if you have the header file. */ -#undef HAVE_STDINT_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_STDLIB_H - -/* Define to 1 if you have the `strchrnul' function. */ -#undef HAVE_STRCHRNUL - -/* Define to 1 if you have the header file. */ -#undef HAVE_STRINGS_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_STRING_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_SYS_STAT_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_SYS_TYPES_H - -/* Define to 1 if the system has the type `uint16_t'. */ -#undef HAVE_UINT16_T - -/* Define to 1 if the system has the type `uint32_t'. */ -#undef HAVE_UINT32_T - -/* Define to 1 if the system has the type `uint64_t'. */ -#undef HAVE_UINT64_T - -/* Define to 1 if the system has the type `uint8_t'. */ -#undef HAVE_UINT8_T - -/* Define to 1 if the system has the type `uintptr_t'. */ -#undef HAVE_UINTPTR_T - -/* Define to 1 if you have the header file. */ -#undef HAVE_UNISTD_H - -/* Define to 1 if you have the `usleep' function. */ -#undef HAVE_USLEEP - -/* Define to 1 if you have the `utime' function. */ -#undef HAVE_UTIME - -/* Define to 1 if you have the header file. */ -#undef HAVE_ZLIB_H - -/* Define to the sub-directory where libtool stores uninstalled libraries. */ -#undef LT_OBJDIR - -/* Define to the address where bug reports for this package should be sent. */ -#undef PACKAGE_BUGREPORT - -/* Define to the full name of this package. */ -#undef PACKAGE_NAME - -/* Define to the full name and version of this package. */ -#undef PACKAGE_STRING - -/* Define to the one symbol short name of this package. */ -#undef PACKAGE_TARNAME - -/* Define to the home page for this package. */ -#undef PACKAGE_URL - -/* Define to the version of this package. */ -#undef PACKAGE_VERSION - -/* Define to 1 if you have the ANSI C header files. */ -#undef STDC_HEADERS - -/* Enable large inode numbers on Mac OS X 10.5. */ -#ifndef _DARWIN_USE_64_BIT_INODE -# define _DARWIN_USE_64_BIT_INODE 1 -#endif - -/* Number of bits in a file offset, on hosts where this is settable. */ -#undef _FILE_OFFSET_BITS - -/* Define for large files, on AIX-style hosts. */ -#undef _LARGE_FILES diff --git a/configure b/configure index a2909ce0e9..64b60f8b35 100755 --- a/configure +++ b/configure @@ -1,15636 +1,4 @@ -#! /bin/sh -# Guess values for system-dependent variables and create Makefiles. -# Generated by GNU Autoconf 2.69 for sqlcipher 3.37.2. -# -# -# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc. -# -# -# This configure script is free software; the Free Software Foundation -# gives unlimited permission to copy, distribute and modify it. -## -------------------- ## -## M4sh Initialization. ## -## -------------------- ## - -# Be more Bourne compatible -DUALCASE=1; export DUALCASE # for MKS sh -if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : - emulate sh - NULLCMD=: - # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which - # is contrary to our usage. Disable this feature. - alias -g '${1+"$@"}'='"$@"' - setopt NO_GLOB_SUBST -else - case `(set -o) 2>/dev/null` in #( - *posix*) : - set -o posix ;; #( - *) : - ;; -esac -fi - - -as_nl=' -' -export as_nl -# Printing a long string crashes Solaris 7 /usr/bin/printf. -as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' -as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo -as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo -# Prefer a ksh shell builtin over an external printf program on Solaris, -# but without wasting forks for bash or zsh. -if test -z "$BASH_VERSION$ZSH_VERSION" \ - && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then - as_echo='print -r --' - as_echo_n='print -rn --' -elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then - as_echo='printf %s\n' - as_echo_n='printf %s' -else - if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then - as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' - as_echo_n='/usr/ucb/echo -n' - else - as_echo_body='eval expr "X$1" : "X\\(.*\\)"' - as_echo_n_body='eval - arg=$1; - case $arg in #( - *"$as_nl"*) - expr "X$arg" : "X\\(.*\\)$as_nl"; - arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; - esac; - expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" - ' - export as_echo_n_body - as_echo_n='sh -c $as_echo_n_body as_echo' - fi - export as_echo_body - as_echo='sh -c $as_echo_body as_echo' -fi - -# The user is always right. -if test "${PATH_SEPARATOR+set}" != set; then - PATH_SEPARATOR=: - (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { - (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || - PATH_SEPARATOR=';' - } -fi - - -# IFS -# We need space, tab and new line, in precisely that order. Quoting is -# there to prevent editors from complaining about space-tab. -# (If _AS_PATH_WALK were called with IFS unset, it would disable word -# splitting by setting IFS to empty value.) -IFS=" "" $as_nl" - -# Find who we are. Look in the path if we contain no directory separator. -as_myself= -case $0 in #(( - *[\\/]* ) as_myself=$0 ;; - *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break - done -IFS=$as_save_IFS - - ;; -esac -# We did not find ourselves, most probably we were run as `sh COMMAND' -# in which case we are not to be found in the path. -if test "x$as_myself" = x; then - as_myself=$0 -fi -if test ! -f "$as_myself"; then - $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 - exit 1 -fi - -# Unset variables that we do not need and which cause bugs (e.g. in -# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" -# suppresses any "Segmentation fault" message there. '((' could -# trigger a bug in pdksh 5.2.14. -for as_var in BASH_ENV ENV MAIL MAILPATH -do eval test x\${$as_var+set} = xset \ - && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : -done -PS1='$ ' -PS2='> ' -PS4='+ ' - -# NLS nuisances. -LC_ALL=C -export LC_ALL -LANGUAGE=C -export LANGUAGE - -# CDPATH. -(unset CDPATH) >/dev/null 2>&1 && unset CDPATH - -# Use a proper internal environment variable to ensure we don't fall - # into an infinite loop, continuously re-executing ourselves. - if test x"${_as_can_reexec}" != xno && test "x$CONFIG_SHELL" != x; then - _as_can_reexec=no; export _as_can_reexec; - # We cannot yet assume a decent shell, so we have to provide a -# neutralization value for shells without unset; and this also -# works around shells that cannot unset nonexistent variables. -# Preserve -v and -x to the replacement shell. -BASH_ENV=/dev/null -ENV=/dev/null -(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV -case $- in # (((( - *v*x* | *x*v* ) as_opts=-vx ;; - *v* ) as_opts=-v ;; - *x* ) as_opts=-x ;; - * ) as_opts= ;; -esac -exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} -# Admittedly, this is quite paranoid, since all the known shells bail -# out after a failed `exec'. -$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 -as_fn_exit 255 - fi - # We don't want this to propagate to other subprocesses. - { _as_can_reexec=; unset _as_can_reexec;} -if test "x$CONFIG_SHELL" = x; then - as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then : - emulate sh - NULLCMD=: - # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which - # is contrary to our usage. Disable this feature. - alias -g '\${1+\"\$@\"}'='\"\$@\"' - setopt NO_GLOB_SUBST -else - case \`(set -o) 2>/dev/null\` in #( - *posix*) : - set -o posix ;; #( - *) : - ;; -esac -fi -" - as_required="as_fn_return () { (exit \$1); } -as_fn_success () { as_fn_return 0; } -as_fn_failure () { as_fn_return 1; } -as_fn_ret_success () { return 0; } -as_fn_ret_failure () { return 1; } - -exitcode=0 -as_fn_success || { exitcode=1; echo as_fn_success failed.; } -as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; } -as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; } -as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; } -if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then : - -else - exitcode=1; echo positional parameters were not saved. -fi -test x\$exitcode = x0 || exit 1 -test -x / || exit 1" - as_suggested=" as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO - as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO - eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" && - test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1 - - test -n \"\${ZSH_VERSION+set}\${BASH_VERSION+set}\" || ( - ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' - ECHO=\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO - ECHO=\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO - PATH=/empty FPATH=/empty; export PATH FPATH - test \"X\`printf %s \$ECHO\`\" = \"X\$ECHO\" \\ - || test \"X\`print -r -- \$ECHO\`\" = \"X\$ECHO\" ) || exit 1 -test \$(( 1 + 1 )) = 2 || exit 1" - if (eval "$as_required") 2>/dev/null; then : - as_have_required=yes -else - as_have_required=no -fi - if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null; then : - -else - as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -as_found=false -for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - as_found=: - case $as_dir in #( - /*) - for as_base in sh bash ksh sh5; do - # Try only shells that exist, to save several forks. - as_shell=$as_dir/$as_base - if { test -f "$as_shell" || test -f "$as_shell.exe"; } && - { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$as_shell"; } 2>/dev/null; then : - CONFIG_SHELL=$as_shell as_have_required=yes - if { $as_echo "$as_bourne_compatible""$as_suggested" | as_run=a "$as_shell"; } 2>/dev/null; then : - break 2 -fi -fi - done;; - esac - as_found=false -done -$as_found || { if { test -f "$SHELL" || test -f "$SHELL.exe"; } && - { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$SHELL"; } 2>/dev/null; then : - CONFIG_SHELL=$SHELL as_have_required=yes -fi; } -IFS=$as_save_IFS - - - if test "x$CONFIG_SHELL" != x; then : - export CONFIG_SHELL - # We cannot yet assume a decent shell, so we have to provide a -# neutralization value for shells without unset; and this also -# works around shells that cannot unset nonexistent variables. -# Preserve -v and -x to the replacement shell. -BASH_ENV=/dev/null -ENV=/dev/null -(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV -case $- in # (((( - *v*x* | *x*v* ) as_opts=-vx ;; - *v* ) as_opts=-v ;; - *x* ) as_opts=-x ;; - * ) as_opts= ;; -esac -exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} -# Admittedly, this is quite paranoid, since all the known shells bail -# out after a failed `exec'. -$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 -exit 255 -fi - - if test x$as_have_required = xno; then : - $as_echo "$0: This script requires a shell more modern than all" - $as_echo "$0: the shells that I found on your system." - if test x${ZSH_VERSION+set} = xset ; then - $as_echo "$0: In particular, zsh $ZSH_VERSION has bugs and should" - $as_echo "$0: be upgraded to zsh 4.3.4 or later." - else - $as_echo "$0: Please tell bug-autoconf@gnu.org about your system, -$0: including any error possibly output before this -$0: message. Then install a modern shell, or manually run -$0: the script under such a shell if you do have one." - fi - exit 1 -fi -fi -fi -SHELL=${CONFIG_SHELL-/bin/sh} -export SHELL -# Unset more variables known to interfere with behavior of common tools. -CLICOLOR_FORCE= GREP_OPTIONS= -unset CLICOLOR_FORCE GREP_OPTIONS - -## --------------------- ## -## M4sh Shell Functions. ## -## --------------------- ## -# as_fn_unset VAR -# --------------- -# Portably unset VAR. -as_fn_unset () -{ - { eval $1=; unset $1;} -} -as_unset=as_fn_unset - -# as_fn_set_status STATUS -# ----------------------- -# Set $? to STATUS, without forking. -as_fn_set_status () -{ - return $1 -} # as_fn_set_status - -# as_fn_exit STATUS -# ----------------- -# Exit the shell with STATUS, even in a "trap 0" or "set -e" context. -as_fn_exit () -{ - set +e - as_fn_set_status $1 - exit $1 -} # as_fn_exit - -# as_fn_mkdir_p -# ------------- -# Create "$as_dir" as a directory, including parents if necessary. -as_fn_mkdir_p () -{ - - case $as_dir in #( - -*) as_dir=./$as_dir;; - esac - test -d "$as_dir" || eval $as_mkdir_p || { - as_dirs= - while :; do - case $as_dir in #( - *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( - *) as_qdir=$as_dir;; - esac - as_dirs="'$as_qdir' $as_dirs" - as_dir=`$as_dirname -- "$as_dir" || -$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$as_dir" : 'X\(//\)[^/]' \| \ - X"$as_dir" : 'X\(//\)$' \| \ - X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X"$as_dir" | - sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ - s//\1/ - q - } - /^X\(\/\/\)[^/].*/{ - s//\1/ - q - } - /^X\(\/\/\)$/{ - s//\1/ - q - } - /^X\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - test -d "$as_dir" && break - done - test -z "$as_dirs" || eval "mkdir $as_dirs" - } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" - - -} # as_fn_mkdir_p - -# as_fn_executable_p FILE -# ----------------------- -# Test if FILE is an executable regular file. -as_fn_executable_p () -{ - test -f "$1" && test -x "$1" -} # as_fn_executable_p -# as_fn_append VAR VALUE -# ---------------------- -# Append the text in VALUE to the end of the definition contained in VAR. Take -# advantage of any shell optimizations that allow amortized linear growth over -# repeated appends, instead of the typical quadratic growth present in naive -# implementations. -if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : - eval 'as_fn_append () - { - eval $1+=\$2 - }' -else - as_fn_append () - { - eval $1=\$$1\$2 - } -fi # as_fn_append - -# as_fn_arith ARG... -# ------------------ -# Perform arithmetic evaluation on the ARGs, and store the result in the -# global $as_val. Take advantage of shells that can avoid forks. The arguments -# must be portable across $(()) and expr. -if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : - eval 'as_fn_arith () - { - as_val=$(( $* )) - }' -else - as_fn_arith () - { - as_val=`expr "$@" || test $? -eq 1` - } -fi # as_fn_arith - - -# as_fn_error STATUS ERROR [LINENO LOG_FD] -# ---------------------------------------- -# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are -# provided, also output the error to LOG_FD, referencing LINENO. Then exit the -# script with STATUS, using 1 if that was 0. -as_fn_error () -{ - as_status=$1; test $as_status -eq 0 && as_status=1 - if test "$4"; then - as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 - fi - $as_echo "$as_me: error: $2" >&2 - as_fn_exit $as_status -} # as_fn_error - -if expr a : '\(a\)' >/dev/null 2>&1 && - test "X`expr 00001 : '.*\(...\)'`" = X001; then - as_expr=expr -else - as_expr=false -fi - -if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then - as_basename=basename -else - as_basename=false -fi - -if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then - as_dirname=dirname -else - as_dirname=false -fi - -as_me=`$as_basename -- "$0" || -$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ - X"$0" : 'X\(//\)$' \| \ - X"$0" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X/"$0" | - sed '/^.*\/\([^/][^/]*\)\/*$/{ - s//\1/ - q - } - /^X\/\(\/\/\)$/{ - s//\1/ - q - } - /^X\/\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - -# Avoid depending upon Character Ranges. -as_cr_letters='abcdefghijklmnopqrstuvwxyz' -as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' -as_cr_Letters=$as_cr_letters$as_cr_LETTERS -as_cr_digits='0123456789' -as_cr_alnum=$as_cr_Letters$as_cr_digits - - - as_lineno_1=$LINENO as_lineno_1a=$LINENO - as_lineno_2=$LINENO as_lineno_2a=$LINENO - eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" && - test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || { - # Blame Lee E. McMahon (1931-1989) for sed's syntax. :-) - sed -n ' - p - /[$]LINENO/= - ' <$as_myself | - sed ' - s/[$]LINENO.*/&-/ - t lineno - b - :lineno - N - :loop - s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ - t loop - s/-\n.*// - ' >$as_me.lineno && - chmod +x "$as_me.lineno" || - { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; } - - # If we had to re-execute with $CONFIG_SHELL, we're ensured to have - # already done that, so ensure we don't try to do so again and fall - # in an infinite loop. This has already happened in practice. - _as_can_reexec=no; export _as_can_reexec - # Don't try to exec as it changes $[0], causing all sort of problems - # (the dirname of $[0] is not the place where we might find the - # original and so on. Autoconf is especially sensitive to this). - . "./$as_me.lineno" - # Exit status is that of the last command. - exit -} - -ECHO_C= ECHO_N= ECHO_T= -case `echo -n x` in #((((( --n*) - case `echo 'xy\c'` in - *c*) ECHO_T=' ';; # ECHO_T is single tab character. - xy) ECHO_C='\c';; - *) echo `echo ksh88 bug on AIX 6.1` > /dev/null - ECHO_T=' ';; - esac;; -*) - ECHO_N='-n';; -esac - -rm -f conf$$ conf$$.exe conf$$.file -if test -d conf$$.dir; then - rm -f conf$$.dir/conf$$.file -else - rm -f conf$$.dir - mkdir conf$$.dir 2>/dev/null -fi -if (echo >conf$$.file) 2>/dev/null; then - if ln -s conf$$.file conf$$ 2>/dev/null; then - as_ln_s='ln -s' - # ... but there are two gotchas: - # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. - # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. - # In both cases, we have to default to `cp -pR'. - ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || - as_ln_s='cp -pR' - elif ln conf$$.file conf$$ 2>/dev/null; then - as_ln_s=ln - else - as_ln_s='cp -pR' - fi -else - as_ln_s='cp -pR' -fi -rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file -rmdir conf$$.dir 2>/dev/null - -if mkdir -p . 2>/dev/null; then - as_mkdir_p='mkdir -p "$as_dir"' -else - test -d ./-p && rmdir ./-p - as_mkdir_p=false -fi - -as_test_x='test -x' -as_executable_p=as_fn_executable_p - -# Sed expression to map a string onto a valid CPP name. -as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" - -# Sed expression to map a string onto a valid variable name. -as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" - -SHELL=${CONFIG_SHELL-/bin/sh} - - -test -n "$DJDIR" || exec 7<&0 &1 - -# Name of the host. -# hostname on some systems (SVR3.2, old GNU/Linux) returns a bogus exit status, -# so uname gets run too. -ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q` - -# -# Initializations. -# -ac_default_prefix=/usr/local -ac_clean_files= -ac_config_libobj_dir=. -LIBOBJS= -cross_compiling=no -subdirs= -MFLAGS= -MAKEFLAGS= - -# Identity of this package. -PACKAGE_NAME='sqlcipher' -PACKAGE_TARNAME='sqlcipher' -PACKAGE_VERSION='3.37.2' -PACKAGE_STRING='sqlcipher 3.37.2' -PACKAGE_BUGREPORT='' -PACKAGE_URL='' - -# Factoring default headers for most tests. -ac_includes_default="\ -#include -#ifdef HAVE_SYS_TYPES_H -# include -#endif -#ifdef HAVE_SYS_STAT_H -# include -#endif -#ifdef STDC_HEADERS -# include -# include -#else -# ifdef HAVE_STDLIB_H -# include -# endif -#endif -#ifdef HAVE_STRING_H -# if !defined STDC_HEADERS && defined HAVE_MEMORY_H -# include -# endif -# include -#endif -#ifdef HAVE_STRINGS_H -# include -#endif -#ifdef HAVE_INTTYPES_H -# include -#endif -#ifdef HAVE_STDINT_H -# include -#endif -#ifdef HAVE_UNISTD_H -# include -#endif" - -ac_subst_vars='LTLIBOBJS -LIBOBJS -BUILD_CFLAGS -USE_GCOV -OPT_FEATURE_FLAGS -HAVE_ZLIB -AMALGAMATION_LINE_MACROS -amalgamation_line_macros -USE_AMALGAMATION -TARGET_DEBUG -TARGET_HAVE_EDITLINE -TARGET_HAVE_READLINE -TARGET_READLINE_INC -TARGET_READLINE_LIBS -HAVE_TCL -TCL_SHLIB_SUFFIX -TCL_STUB_LIB_SPEC -TCL_STUB_LIB_FLAG -TCL_STUB_LIB_FILE -TCL_LIB_SPEC -TCL_LIB_FLAG -TCL_LIB_FILE -TCL_INCLUDE_SPEC -TCL_SRC_DIR -TCL_BIN_DIR -TCL_VERSION -TARGET_EXEEXT -SQLITE_OS_WIN -SQLITE_OS_UNIX -BUILD_EXEEXT -TEMP_STORE -ALLOWRELEASE -XTHREADCONNECT -SQLITE_THREADSAFE -BUILD_CC -RELEASE -VERSION -program_prefix -TCLLIBDIR -TCLSH_CMD -INSTALL_DATA -INSTALL_SCRIPT -INSTALL_PROGRAM -CPP -LT_SYS_LIBRARY_PATH -OTOOL64 -OTOOL -LIPO -NMEDIT -DSYMUTIL -MANIFEST_TOOL -AWK -RANLIB -STRIP -ac_ct_AR -AR -DLLTOOL -OBJDUMP -LN_S -NM -ac_ct_DUMPBIN -DUMPBIN -LD -FGREP -EGREP -GREP -SED -OBJEXT -EXEEXT -ac_ct_CC -CPPFLAGS -LDFLAGS -CFLAGS -CC -host_os -host_vendor -host_cpu -host -build_os -build_vendor -build_cpu -build -LIBTOOL -target_alias -host_alias -build_alias -LIBS -ECHO_T -ECHO_N -ECHO_C -DEFS -mandir -localedir -libdir -psdir -pdfdir -dvidir -htmldir -infodir -docdir -oldincludedir -includedir -runstatedir -localstatedir -sharedstatedir -sysconfdir -datadir -datarootdir -libexecdir -sbindir -bindir -program_transform_name -prefix -exec_prefix -PACKAGE_URL -PACKAGE_BUGREPORT -PACKAGE_STRING -PACKAGE_VERSION -PACKAGE_TARNAME -PACKAGE_NAME -PATH_SEPARATOR -SHELL' -ac_subst_files='' -ac_user_opts=' -enable_option_checking -enable_shared -enable_static -with_pic -enable_fast_install -with_aix_soname -with_gnu_ld -with_sysroot -enable_libtool_lock -enable_largefile -enable_threadsafe -with_crypto_lib -enable_cross_thread_connections -enable_releasemode -enable_tempstore -enable_tcl -with_tcl -enable_editline -enable_readline -with_readline_lib -with_readline_inc -enable_debug -enable_amalgamation -enable_load_extension -enable_math -enable_all -enable_memsys5 -enable_memsys3 -enable_fts3 -enable_fts4 -enable_fts5 -enable_json1 -enable_update_limit -enable_geopoly -enable_rtree -enable_session -enable_gcov -' - ac_precious_vars='build_alias -host_alias -target_alias -CC -CFLAGS -LDFLAGS -LIBS -CPPFLAGS -LT_SYS_LIBRARY_PATH -CPP -TCLLIBDIR -amalgamation_line_macros' - - -# Initialize some variables set by options. -ac_init_help= -ac_init_version=false -ac_unrecognized_opts= -ac_unrecognized_sep= -# The variables have the same names as the options, with -# dashes changed to underlines. -cache_file=/dev/null -exec_prefix=NONE -no_create= -no_recursion= -prefix=NONE -program_prefix=NONE -program_suffix=NONE -program_transform_name=s,x,x, -silent= -site= -srcdir= -verbose= -x_includes=NONE -x_libraries=NONE - -# Installation directory options. -# These are left unexpanded so users can "make install exec_prefix=/foo" -# and all the variables that are supposed to be based on exec_prefix -# by default will actually change. -# Use braces instead of parens because sh, perl, etc. also accept them. -# (The list follows the same order as the GNU Coding Standards.) -bindir='${exec_prefix}/bin' -sbindir='${exec_prefix}/sbin' -libexecdir='${exec_prefix}/libexec' -datarootdir='${prefix}/share' -datadir='${datarootdir}' -sysconfdir='${prefix}/etc' -sharedstatedir='${prefix}/com' -localstatedir='${prefix}/var' -runstatedir='${localstatedir}/run' -includedir='${prefix}/include' -oldincludedir='/usr/include' -docdir='${datarootdir}/doc/${PACKAGE_TARNAME}' -infodir='${datarootdir}/info' -htmldir='${docdir}' -dvidir='${docdir}' -pdfdir='${docdir}' -psdir='${docdir}' -libdir='${exec_prefix}/lib' -localedir='${datarootdir}/locale' -mandir='${datarootdir}/man' - -ac_prev= -ac_dashdash= -for ac_option -do - # If the previous option needs an argument, assign it. - if test -n "$ac_prev"; then - eval $ac_prev=\$ac_option - ac_prev= - continue - fi - - case $ac_option in - *=?*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;; - *=) ac_optarg= ;; - *) ac_optarg=yes ;; - esac - - # Accept the important Cygnus configure options, so we can diagnose typos. - - case $ac_dashdash$ac_option in - --) - ac_dashdash=yes ;; - - -bindir | --bindir | --bindi | --bind | --bin | --bi) - ac_prev=bindir ;; - -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*) - bindir=$ac_optarg ;; - - -build | --build | --buil | --bui | --bu) - ac_prev=build_alias ;; - -build=* | --build=* | --buil=* | --bui=* | --bu=*) - build_alias=$ac_optarg ;; - - -cache-file | --cache-file | --cache-fil | --cache-fi \ - | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c) - ac_prev=cache_file ;; - -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \ - | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*) - cache_file=$ac_optarg ;; - - --config-cache | -C) - cache_file=config.cache ;; - - -datadir | --datadir | --datadi | --datad) - ac_prev=datadir ;; - -datadir=* | --datadir=* | --datadi=* | --datad=*) - datadir=$ac_optarg ;; - - -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \ - | --dataroo | --dataro | --datar) - ac_prev=datarootdir ;; - -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \ - | --dataroot=* | --dataroo=* | --dataro=* | --datar=*) - datarootdir=$ac_optarg ;; - - -disable-* | --disable-*) - ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'` - # Reject names that are not valid shell variable names. - expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && - as_fn_error $? "invalid feature name: $ac_useropt" - ac_useropt_orig=$ac_useropt - ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` - case $ac_user_opts in - *" -"enable_$ac_useropt" -"*) ;; - *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig" - ac_unrecognized_sep=', ';; - esac - eval enable_$ac_useropt=no ;; - - -docdir | --docdir | --docdi | --doc | --do) - ac_prev=docdir ;; - -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*) - docdir=$ac_optarg ;; - - -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv) - ac_prev=dvidir ;; - -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*) - dvidir=$ac_optarg ;; - - -enable-* | --enable-*) - ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` - # Reject names that are not valid shell variable names. - expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && - as_fn_error $? "invalid feature name: $ac_useropt" - ac_useropt_orig=$ac_useropt - ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` - case $ac_user_opts in - *" -"enable_$ac_useropt" -"*) ;; - *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig" - ac_unrecognized_sep=', ';; - esac - eval enable_$ac_useropt=\$ac_optarg ;; - - -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \ - | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \ - | --exec | --exe | --ex) - ac_prev=exec_prefix ;; - -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \ - | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \ - | --exec=* | --exe=* | --ex=*) - exec_prefix=$ac_optarg ;; - - -gas | --gas | --ga | --g) - # Obsolete; use --with-gas. - with_gas=yes ;; - - -help | --help | --hel | --he | -h) - ac_init_help=long ;; - -help=r* | --help=r* | --hel=r* | --he=r* | -hr*) - ac_init_help=recursive ;; - -help=s* | --help=s* | --hel=s* | --he=s* | -hs*) - ac_init_help=short ;; - - -host | --host | --hos | --ho) - ac_prev=host_alias ;; - -host=* | --host=* | --hos=* | --ho=*) - host_alias=$ac_optarg ;; - - -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht) - ac_prev=htmldir ;; - -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \ - | --ht=*) - htmldir=$ac_optarg ;; - - -includedir | --includedir | --includedi | --included | --include \ - | --includ | --inclu | --incl | --inc) - ac_prev=includedir ;; - -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \ - | --includ=* | --inclu=* | --incl=* | --inc=*) - includedir=$ac_optarg ;; - - -infodir | --infodir | --infodi | --infod | --info | --inf) - ac_prev=infodir ;; - -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*) - infodir=$ac_optarg ;; - - -libdir | --libdir | --libdi | --libd) - ac_prev=libdir ;; - -libdir=* | --libdir=* | --libdi=* | --libd=*) - libdir=$ac_optarg ;; - - -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \ - | --libexe | --libex | --libe) - ac_prev=libexecdir ;; - -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \ - | --libexe=* | --libex=* | --libe=*) - libexecdir=$ac_optarg ;; - - -localedir | --localedir | --localedi | --localed | --locale) - ac_prev=localedir ;; - -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*) - localedir=$ac_optarg ;; - - -localstatedir | --localstatedir | --localstatedi | --localstated \ - | --localstate | --localstat | --localsta | --localst | --locals) - ac_prev=localstatedir ;; - -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \ - | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*) - localstatedir=$ac_optarg ;; - - -mandir | --mandir | --mandi | --mand | --man | --ma | --m) - ac_prev=mandir ;; - -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*) - mandir=$ac_optarg ;; - - -nfp | --nfp | --nf) - # Obsolete; use --without-fp. - with_fp=no ;; - - -no-create | --no-create | --no-creat | --no-crea | --no-cre \ - | --no-cr | --no-c | -n) - no_create=yes ;; - - -no-recursion | --no-recursion | --no-recursio | --no-recursi \ - | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r) - no_recursion=yes ;; - - -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \ - | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \ - | --oldin | --oldi | --old | --ol | --o) - ac_prev=oldincludedir ;; - -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \ - | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \ - | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*) - oldincludedir=$ac_optarg ;; - - -prefix | --prefix | --prefi | --pref | --pre | --pr | --p) - ac_prev=prefix ;; - -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*) - prefix=$ac_optarg ;; - - -program-prefix | --program-prefix | --program-prefi | --program-pref \ - | --program-pre | --program-pr | --program-p) - ac_prev=program_prefix ;; - -program-prefix=* | --program-prefix=* | --program-prefi=* \ - | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*) - program_prefix=$ac_optarg ;; - - -program-suffix | --program-suffix | --program-suffi | --program-suff \ - | --program-suf | --program-su | --program-s) - ac_prev=program_suffix ;; - -program-suffix=* | --program-suffix=* | --program-suffi=* \ - | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*) - program_suffix=$ac_optarg ;; - - -program-transform-name | --program-transform-name \ - | --program-transform-nam | --program-transform-na \ - | --program-transform-n | --program-transform- \ - | --program-transform | --program-transfor \ - | --program-transfo | --program-transf \ - | --program-trans | --program-tran \ - | --progr-tra | --program-tr | --program-t) - ac_prev=program_transform_name ;; - -program-transform-name=* | --program-transform-name=* \ - | --program-transform-nam=* | --program-transform-na=* \ - | --program-transform-n=* | --program-transform-=* \ - | --program-transform=* | --program-transfor=* \ - | --program-transfo=* | --program-transf=* \ - | --program-trans=* | --program-tran=* \ - | --progr-tra=* | --program-tr=* | --program-t=*) - program_transform_name=$ac_optarg ;; - - -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd) - ac_prev=pdfdir ;; - -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*) - pdfdir=$ac_optarg ;; - - -psdir | --psdir | --psdi | --psd | --ps) - ac_prev=psdir ;; - -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*) - psdir=$ac_optarg ;; - - -q | -quiet | --quiet | --quie | --qui | --qu | --q \ - | -silent | --silent | --silen | --sile | --sil) - silent=yes ;; - - -runstatedir | --runstatedir | --runstatedi | --runstated \ - | --runstate | --runstat | --runsta | --runst | --runs \ - | --run | --ru | --r) - ac_prev=runstatedir ;; - -runstatedir=* | --runstatedir=* | --runstatedi=* | --runstated=* \ - | --runstate=* | --runstat=* | --runsta=* | --runst=* | --runs=* \ - | --run=* | --ru=* | --r=*) - runstatedir=$ac_optarg ;; - - -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) - ac_prev=sbindir ;; - -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ - | --sbi=* | --sb=*) - sbindir=$ac_optarg ;; - - -sharedstatedir | --sharedstatedir | --sharedstatedi \ - | --sharedstated | --sharedstate | --sharedstat | --sharedsta \ - | --sharedst | --shareds | --shared | --share | --shar \ - | --sha | --sh) - ac_prev=sharedstatedir ;; - -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \ - | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \ - | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \ - | --sha=* | --sh=*) - sharedstatedir=$ac_optarg ;; - - -site | --site | --sit) - ac_prev=site ;; - -site=* | --site=* | --sit=*) - site=$ac_optarg ;; - - -srcdir | --srcdir | --srcdi | --srcd | --src | --sr) - ac_prev=srcdir ;; - -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*) - srcdir=$ac_optarg ;; - - -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \ - | --syscon | --sysco | --sysc | --sys | --sy) - ac_prev=sysconfdir ;; - -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \ - | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*) - sysconfdir=$ac_optarg ;; - - -target | --target | --targe | --targ | --tar | --ta | --t) - ac_prev=target_alias ;; - -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*) - target_alias=$ac_optarg ;; - - -v | -verbose | --verbose | --verbos | --verbo | --verb) - verbose=yes ;; - - -version | --version | --versio | --versi | --vers | -V) - ac_init_version=: ;; - - -with-* | --with-*) - ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` - # Reject names that are not valid shell variable names. - expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && - as_fn_error $? "invalid package name: $ac_useropt" - ac_useropt_orig=$ac_useropt - ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` - case $ac_user_opts in - *" -"with_$ac_useropt" -"*) ;; - *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig" - ac_unrecognized_sep=', ';; - esac - eval with_$ac_useropt=\$ac_optarg ;; - - -without-* | --without-*) - ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'` - # Reject names that are not valid shell variable names. - expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && - as_fn_error $? "invalid package name: $ac_useropt" - ac_useropt_orig=$ac_useropt - ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` - case $ac_user_opts in - *" -"with_$ac_useropt" -"*) ;; - *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig" - ac_unrecognized_sep=', ';; - esac - eval with_$ac_useropt=no ;; - - --x) - # Obsolete; use --with-x. - with_x=yes ;; - - -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \ - | --x-incl | --x-inc | --x-in | --x-i) - ac_prev=x_includes ;; - -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \ - | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*) - x_includes=$ac_optarg ;; - - -x-libraries | --x-libraries | --x-librarie | --x-librari \ - | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l) - ac_prev=x_libraries ;; - -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \ - | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*) - x_libraries=$ac_optarg ;; - - -*) as_fn_error $? "unrecognized option: \`$ac_option' -Try \`$0 --help' for more information" - ;; - - *=*) - ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='` - # Reject names that are not valid shell variable names. - case $ac_envvar in #( - '' | [0-9]* | *[!_$as_cr_alnum]* ) - as_fn_error $? "invalid variable name: \`$ac_envvar'" ;; - esac - eval $ac_envvar=\$ac_optarg - export $ac_envvar ;; - - *) - # FIXME: should be removed in autoconf 3.0. - $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2 - expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null && - $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2 - : "${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}" - ;; - - esac -done - -if test -n "$ac_prev"; then - ac_option=--`echo $ac_prev | sed 's/_/-/g'` - as_fn_error $? "missing argument to $ac_option" -fi - -if test -n "$ac_unrecognized_opts"; then - case $enable_option_checking in - no) ;; - fatal) as_fn_error $? "unrecognized options: $ac_unrecognized_opts" ;; - *) $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;; - esac -fi - -# Check all directory arguments for consistency. -for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \ - datadir sysconfdir sharedstatedir localstatedir includedir \ - oldincludedir docdir infodir htmldir dvidir pdfdir psdir \ - libdir localedir mandir runstatedir -do - eval ac_val=\$$ac_var - # Remove trailing slashes. - case $ac_val in - */ ) - ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'` - eval $ac_var=\$ac_val;; - esac - # Be sure to have absolute directory names. - case $ac_val in - [\\/$]* | ?:[\\/]* ) continue;; - NONE | '' ) case $ac_var in *prefix ) continue;; esac;; - esac - as_fn_error $? "expected an absolute directory name for --$ac_var: $ac_val" -done - -# There might be people who depend on the old broken behavior: `$host' -# used to hold the argument of --host etc. -# FIXME: To remove some day. -build=$build_alias -host=$host_alias -target=$target_alias - -# FIXME: To remove some day. -if test "x$host_alias" != x; then - if test "x$build_alias" = x; then - cross_compiling=maybe - elif test "x$build_alias" != "x$host_alias"; then - cross_compiling=yes - fi -fi - -ac_tool_prefix= -test -n "$host_alias" && ac_tool_prefix=$host_alias- - -test "$silent" = yes && exec 6>/dev/null - - -ac_pwd=`pwd` && test -n "$ac_pwd" && -ac_ls_di=`ls -di .` && -ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` || - as_fn_error $? "working directory cannot be determined" -test "X$ac_ls_di" = "X$ac_pwd_ls_di" || - as_fn_error $? "pwd does not report name of working directory" - - -# Find the source files, if location was not specified. -if test -z "$srcdir"; then - ac_srcdir_defaulted=yes - # Try the directory containing this script, then the parent directory. - ac_confdir=`$as_dirname -- "$as_myself" || -$as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$as_myself" : 'X\(//\)[^/]' \| \ - X"$as_myself" : 'X\(//\)$' \| \ - X"$as_myself" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X"$as_myself" | - sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ - s//\1/ - q - } - /^X\(\/\/\)[^/].*/{ - s//\1/ - q - } - /^X\(\/\/\)$/{ - s//\1/ - q - } - /^X\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - srcdir=$ac_confdir - if test ! -r "$srcdir/$ac_unique_file"; then - srcdir=.. - fi -else - ac_srcdir_defaulted=no -fi -if test ! -r "$srcdir/$ac_unique_file"; then - test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .." - as_fn_error $? "cannot find sources ($ac_unique_file) in $srcdir" -fi -ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work" -ac_abs_confdir=`( - cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error $? "$ac_msg" - pwd)` -# When building in place, set srcdir=. -if test "$ac_abs_confdir" = "$ac_pwd"; then - srcdir=. -fi -# Remove unnecessary trailing slashes from srcdir. -# Double slashes in file names in object file debugging info -# mess up M-x gdb in Emacs. -case $srcdir in -*/) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;; -esac -for ac_var in $ac_precious_vars; do - eval ac_env_${ac_var}_set=\${${ac_var}+set} - eval ac_env_${ac_var}_value=\$${ac_var} - eval ac_cv_env_${ac_var}_set=\${${ac_var}+set} - eval ac_cv_env_${ac_var}_value=\$${ac_var} -done - -# -# Report the --help message. -# -if test "$ac_init_help" = "long"; then - # Omit some internal or obsolete options to make the list less imposing. - # This message is too long to be a string in the A/UX 3.1 sh. - cat <<_ACEOF -\`configure' configures sqlcipher 3.37.2 to adapt to many kinds of systems. - -Usage: $0 [OPTION]... [VAR=VALUE]... - -To assign environment variables (e.g., CC, CFLAGS...), specify them as -VAR=VALUE. See below for descriptions of some of the useful variables. - -Defaults for the options are specified in brackets. - -Configuration: - -h, --help display this help and exit - --help=short display options specific to this package - --help=recursive display the short help of all the included packages - -V, --version display version information and exit - -q, --quiet, --silent do not print \`checking ...' messages - --cache-file=FILE cache test results in FILE [disabled] - -C, --config-cache alias for \`--cache-file=config.cache' - -n, --no-create do not create output files - --srcdir=DIR find the sources in DIR [configure dir or \`..'] - -Installation directories: - --prefix=PREFIX install architecture-independent files in PREFIX - [$ac_default_prefix] - --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX - [PREFIX] - -By default, \`make install' will install all the files in -\`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify -an installation prefix other than \`$ac_default_prefix' using \`--prefix', -for instance \`--prefix=\$HOME'. - -For better control, use the options below. - -Fine tuning of the installation directories: - --bindir=DIR user executables [EPREFIX/bin] - --sbindir=DIR system admin executables [EPREFIX/sbin] - --libexecdir=DIR program executables [EPREFIX/libexec] - --sysconfdir=DIR read-only single-machine data [PREFIX/etc] - --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] - --localstatedir=DIR modifiable single-machine data [PREFIX/var] - --runstatedir=DIR modifiable per-process data [LOCALSTATEDIR/run] - --libdir=DIR object code libraries [EPREFIX/lib] - --includedir=DIR C header files [PREFIX/include] - --oldincludedir=DIR C header files for non-gcc [/usr/include] - --datarootdir=DIR read-only arch.-independent data root [PREFIX/share] - --datadir=DIR read-only architecture-independent data [DATAROOTDIR] - --infodir=DIR info documentation [DATAROOTDIR/info] - --localedir=DIR locale-dependent data [DATAROOTDIR/locale] - --mandir=DIR man documentation [DATAROOTDIR/man] - --docdir=DIR documentation root [DATAROOTDIR/doc/sqlcipher] - --htmldir=DIR html documentation [DOCDIR] - --dvidir=DIR dvi documentation [DOCDIR] - --pdfdir=DIR pdf documentation [DOCDIR] - --psdir=DIR ps documentation [DOCDIR] -_ACEOF - - cat <<\_ACEOF - -System types: - --build=BUILD configure for building on BUILD [guessed] - --host=HOST cross-compile to build programs to run on HOST [BUILD] -_ACEOF -fi - -if test -n "$ac_init_help"; then - case $ac_init_help in - short | recursive ) echo "Configuration of sqlcipher 3.37.2:";; - esac - cat <<\_ACEOF - -Optional Features: - --disable-option-checking ignore unrecognized --enable/--with options - --disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no) - --enable-FEATURE[=ARG] include FEATURE [ARG=yes] - --enable-shared[=PKGS] build shared libraries [default=yes] - --enable-static[=PKGS] build static libraries [default=yes] - --enable-fast-install[=PKGS] - optimize for fast installation [default=yes] - --disable-libtool-lock avoid locking (might break parallel builds) - --disable-largefile omit support for large files - --disable-threadsafe Disable mutexing - --enable-cross-thread-connections - Allow connection sharing across threads - --enable-releasemode Support libtool link to release mode - --enable-tempstore Use an in-ram database for temporary tables - (never,no,yes,always) - --disable-tcl do not build TCL extension - --enable-editline enable BSD editline support - --disable-readline disable readline support - --enable-debug enable debugging & verbose explain - --disable-amalgamation Disable the amalgamation and instead build all files - separately - --disable-load-extension - Disable loading of external extensions - --disable-math Disable math functions - --enable-all Enable FTS4, FTS5, Geopoly, JSON, RTree, Sessions - --enable-memsys5 Enable MEMSYS5 - --enable-memsys3 Enable MEMSYS3 - --enable-fts3 Enable the FTS3 extension - --enable-fts4 Enable the FTS4 extension - --enable-fts5 Enable the FTS5 extension - --enable-json1 Enable the JSON1 extension - --enable-update-limit Enable the UPDATE/DELETE LIMIT clause - --enable-geopoly Enable the GEOPOLY extension - --enable-rtree Enable the RTREE extension - --enable-session Enable the SESSION extension - --enable-gcov Enable coverage testing using gcov - -Optional Packages: - --with-PACKAGE[=ARG] use PACKAGE [ARG=yes] - --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no) - --with-pic[=PKGS] try to use only PIC/non-PIC objects [default=use - both] - --with-aix-soname=aix|svr4|both - shared library versioning (aka "SONAME") variant to - provide on AIX, [default=aix]. - --with-gnu-ld assume the C compiler uses GNU ld [default=no] - --with-sysroot[=DIR] Search for dependent libraries within DIR (or the - compiler's sysroot if not specified). - --with-crypto-lib Specify which crypto library to use - --with-tcl=DIR directory containing tcl configuration - (tclConfig.sh) - --with-readline-lib specify readline library - --with-readline-inc specify readline include paths - -Some influential environment variables: - CC C compiler command - CFLAGS C compiler flags - LDFLAGS linker flags, e.g. -L if you have libraries in a - nonstandard directory - LIBS libraries to pass to the linker, e.g. -l - CPPFLAGS (Objective) C/C++ preprocessor flags, e.g. -I if - you have headers in a nonstandard directory - LT_SYS_LIBRARY_PATH - User-defined run-time library search path. - CPP C preprocessor - TCLLIBDIR Where to install tcl plugin - amalgamation_line_macros - - -Use these variables to override the choices made by `configure' or to help -it to find libraries and programs with nonstandard names/locations. - -Report bugs to the package provider. -_ACEOF -ac_status=$? -fi - -if test "$ac_init_help" = "recursive"; then - # If there are subdirs, report their specific --help. - for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue - test -d "$ac_dir" || - { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } || - continue - ac_builddir=. - -case "$ac_dir" in -.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; -*) - ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` - # A ".." for each directory in $ac_dir_suffix. - ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` - case $ac_top_builddir_sub in - "") ac_top_builddir_sub=. ac_top_build_prefix= ;; - *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; - esac ;; -esac -ac_abs_top_builddir=$ac_pwd -ac_abs_builddir=$ac_pwd$ac_dir_suffix -# for backward compatibility: -ac_top_builddir=$ac_top_build_prefix - -case $srcdir in - .) # We are building in place. - ac_srcdir=. - ac_top_srcdir=$ac_top_builddir_sub - ac_abs_top_srcdir=$ac_pwd ;; - [\\/]* | ?:[\\/]* ) # Absolute name. - ac_srcdir=$srcdir$ac_dir_suffix; - ac_top_srcdir=$srcdir - ac_abs_top_srcdir=$srcdir ;; - *) # Relative name. - ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix - ac_top_srcdir=$ac_top_build_prefix$srcdir - ac_abs_top_srcdir=$ac_pwd/$srcdir ;; -esac -ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix - - cd "$ac_dir" || { ac_status=$?; continue; } - # Check for guested configure. - if test -f "$ac_srcdir/configure.gnu"; then - echo && - $SHELL "$ac_srcdir/configure.gnu" --help=recursive - elif test -f "$ac_srcdir/configure"; then - echo && - $SHELL "$ac_srcdir/configure" --help=recursive - else - $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2 - fi || ac_status=$? - cd "$ac_pwd" || { ac_status=$?; break; } - done -fi - -test -n "$ac_init_help" && exit $ac_status -if $ac_init_version; then - cat <<\_ACEOF -sqlcipher configure 3.37.2 -generated by GNU Autoconf 2.69 - -Copyright (C) 2012 Free Software Foundation, Inc. -This configure script is free software; the Free Software Foundation -gives unlimited permission to copy, distribute and modify it. -_ACEOF - exit -fi - -## ------------------------ ## -## Autoconf initialization. ## -## ------------------------ ## - -# ac_fn_c_try_compile LINENO -# -------------------------- -# Try to compile conftest.$ac_ext, and return whether this succeeded. -ac_fn_c_try_compile () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - rm -f conftest.$ac_objext - if { { ac_try="$ac_compile" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_compile") 2>conftest.err - ac_status=$? - if test -s conftest.err; then - grep -v '^ *+' conftest.err >conftest.er1 - cat conftest.er1 >&5 - mv -f conftest.er1 conftest.err - fi - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } && { - test -z "$ac_c_werror_flag" || - test ! -s conftest.err - } && test -s conftest.$ac_objext; then : - ac_retval=0 -else - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - ac_retval=1 -fi - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - as_fn_set_status $ac_retval - -} # ac_fn_c_try_compile - -# ac_fn_c_try_link LINENO -# ----------------------- -# Try to link conftest.$ac_ext, and return whether this succeeded. -ac_fn_c_try_link () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - rm -f conftest.$ac_objext conftest$ac_exeext - if { { ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_link") 2>conftest.err - ac_status=$? - if test -s conftest.err; then - grep -v '^ *+' conftest.err >conftest.er1 - cat conftest.er1 >&5 - mv -f conftest.er1 conftest.err - fi - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } && { - test -z "$ac_c_werror_flag" || - test ! -s conftest.err - } && test -s conftest$ac_exeext && { - test "$cross_compiling" = yes || - test -x conftest$ac_exeext - }; then : - ac_retval=0 -else - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - ac_retval=1 -fi - # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information - # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would - # interfere with the next link command; also delete a directory that is - # left behind by Apple's compiler. We do this before executing the actions. - rm -rf conftest.dSYM conftest_ipa8_conftest.oo - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - as_fn_set_status $ac_retval - -} # ac_fn_c_try_link - -# ac_fn_c_check_header_compile LINENO HEADER VAR INCLUDES -# ------------------------------------------------------- -# Tests whether HEADER exists and can be compiled using the include files in -# INCLUDES, setting the cache variable VAR accordingly. -ac_fn_c_check_header_compile () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 -$as_echo_n "checking for $2... " >&6; } -if eval \${$3+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -#include <$2> -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - eval "$3=yes" -else - eval "$3=no" -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -eval ac_res=\$$3 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - -} # ac_fn_c_check_header_compile - -# ac_fn_c_try_cpp LINENO -# ---------------------- -# Try to preprocess conftest.$ac_ext, and return whether this succeeded. -ac_fn_c_try_cpp () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - if { { ac_try="$ac_cpp conftest.$ac_ext" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err - ac_status=$? - if test -s conftest.err; then - grep -v '^ *+' conftest.err >conftest.er1 - cat conftest.er1 >&5 - mv -f conftest.er1 conftest.err - fi - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } > conftest.i && { - test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || - test ! -s conftest.err - }; then : - ac_retval=0 -else - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - ac_retval=1 -fi - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - as_fn_set_status $ac_retval - -} # ac_fn_c_try_cpp - -# ac_fn_c_try_run LINENO -# ---------------------- -# Try to link conftest.$ac_ext, and return whether this succeeded. Assumes -# that executables *can* be run. -ac_fn_c_try_run () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - if { { ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_link") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } && { ac_try='./conftest$ac_exeext' - { { case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_try") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; }; then : - ac_retval=0 -else - $as_echo "$as_me: program exited with status $ac_status" >&5 - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - ac_retval=$ac_status -fi - rm -rf conftest.dSYM conftest_ipa8_conftest.oo - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - as_fn_set_status $ac_retval - -} # ac_fn_c_try_run - -# ac_fn_c_check_func LINENO FUNC VAR -# ---------------------------------- -# Tests whether FUNC exists, setting the cache variable VAR accordingly -ac_fn_c_check_func () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 -$as_echo_n "checking for $2... " >&6; } -if eval \${$3+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -/* Define $2 to an innocuous variant, in case declares $2. - For example, HP-UX 11i declares gettimeofday. */ -#define $2 innocuous_$2 - -/* System header to define __stub macros and hopefully few prototypes, - which can conflict with char $2 (); below. - Prefer to if __STDC__ is defined, since - exists even on freestanding compilers. */ - -#ifdef __STDC__ -# include -#else -# include -#endif - -#undef $2 - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char $2 (); -/* The GNU C library defines this for functions which it implements - to always fail with ENOSYS. Some functions are actually named - something starting with __ and the normal name is an alias. */ -#if defined __stub_$2 || defined __stub___$2 -choke me -#endif - -int -main () -{ -return $2 (); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - eval "$3=yes" -else - eval "$3=no" -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -fi -eval ac_res=\$$3 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - -} # ac_fn_c_check_func - -# ac_fn_c_check_type LINENO TYPE VAR INCLUDES -# ------------------------------------------- -# Tests whether TYPE exists after having included INCLUDES, setting cache -# variable VAR accordingly. -ac_fn_c_check_type () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 -$as_echo_n "checking for $2... " >&6; } -if eval \${$3+:} false; then : - $as_echo_n "(cached) " >&6 -else - eval "$3=no" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -int -main () -{ -if (sizeof ($2)) - return 0; - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -int -main () -{ -if (sizeof (($2))) - return 0; - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - -else - eval "$3=yes" -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -eval ac_res=\$$3 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - -} # ac_fn_c_check_type - -# ac_fn_c_check_header_mongrel LINENO HEADER VAR INCLUDES -# ------------------------------------------------------- -# Tests whether HEADER exists, giving a warning if it cannot be compiled using -# the include files in INCLUDES and setting the cache variable VAR -# accordingly. -ac_fn_c_check_header_mongrel () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - if eval \${$3+:} false; then : - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 -$as_echo_n "checking for $2... " >&6; } -if eval \${$3+:} false; then : - $as_echo_n "(cached) " >&6 -fi -eval ac_res=\$$3 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } -else - # Is the header compilable? -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 usability" >&5 -$as_echo_n "checking $2 usability... " >&6; } -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -#include <$2> -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_header_compiler=yes -else - ac_header_compiler=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_compiler" >&5 -$as_echo "$ac_header_compiler" >&6; } - -# Is the header present? -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 presence" >&5 -$as_echo_n "checking $2 presence... " >&6; } -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include <$2> -_ACEOF -if ac_fn_c_try_cpp "$LINENO"; then : - ac_header_preproc=yes -else - ac_header_preproc=no -fi -rm -f conftest.err conftest.i conftest.$ac_ext -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_preproc" >&5 -$as_echo "$ac_header_preproc" >&6; } - -# So? What about this header? -case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in #(( - yes:no: ) - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&5 -$as_echo "$as_me: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 -$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} - ;; - no:yes:* ) - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: present but cannot be compiled" >&5 -$as_echo "$as_me: WARNING: $2: present but cannot be compiled" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: check for missing prerequisite headers?" >&5 -$as_echo "$as_me: WARNING: $2: check for missing prerequisite headers?" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: see the Autoconf documentation" >&5 -$as_echo "$as_me: WARNING: $2: see the Autoconf documentation" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&5 -$as_echo "$as_me: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 -$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} - ;; -esac - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 -$as_echo_n "checking for $2... " >&6; } -if eval \${$3+:} false; then : - $as_echo_n "(cached) " >&6 -else - eval "$3=\$ac_header_compiler" -fi -eval ac_res=\$$3 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } -fi - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - -} # ac_fn_c_check_header_mongrel -cat >config.log <<_ACEOF -This file contains any messages produced by compilers while -running configure, to aid debugging if configure makes a mistake. - -It was created by sqlcipher $as_me 3.37.2, which was -generated by GNU Autoconf 2.69. Invocation command line was - - $ $0 $@ - -_ACEOF -exec 5>>config.log -{ -cat <<_ASUNAME -## --------- ## -## Platform. ## -## --------- ## - -hostname = `(hostname || uname -n) 2>/dev/null | sed 1q` -uname -m = `(uname -m) 2>/dev/null || echo unknown` -uname -r = `(uname -r) 2>/dev/null || echo unknown` -uname -s = `(uname -s) 2>/dev/null || echo unknown` -uname -v = `(uname -v) 2>/dev/null || echo unknown` - -/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown` -/bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown` - -/bin/arch = `(/bin/arch) 2>/dev/null || echo unknown` -/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown` -/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown` -/usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown` -/bin/machine = `(/bin/machine) 2>/dev/null || echo unknown` -/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown` -/bin/universe = `(/bin/universe) 2>/dev/null || echo unknown` - -_ASUNAME - -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - $as_echo "PATH: $as_dir" - done -IFS=$as_save_IFS - -} >&5 - -cat >&5 <<_ACEOF - - -## ----------- ## -## Core tests. ## -## ----------- ## - -_ACEOF - - -# Keep a trace of the command line. -# Strip out --no-create and --no-recursion so they do not pile up. -# Strip out --silent because we don't want to record it for future runs. -# Also quote any args containing shell meta-characters. -# Make two passes to allow for proper duplicate-argument suppression. -ac_configure_args= -ac_configure_args0= -ac_configure_args1= -ac_must_keep_next=false -for ac_pass in 1 2 -do - for ac_arg - do - case $ac_arg in - -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;; - -q | -quiet | --quiet | --quie | --qui | --qu | --q \ - | -silent | --silent | --silen | --sile | --sil) - continue ;; - *\'*) - ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; - esac - case $ac_pass in - 1) as_fn_append ac_configure_args0 " '$ac_arg'" ;; - 2) - as_fn_append ac_configure_args1 " '$ac_arg'" - if test $ac_must_keep_next = true; then - ac_must_keep_next=false # Got value, back to normal. - else - case $ac_arg in - *=* | --config-cache | -C | -disable-* | --disable-* \ - | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \ - | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \ - | -with-* | --with-* | -without-* | --without-* | --x) - case "$ac_configure_args0 " in - "$ac_configure_args1"*" '$ac_arg' "* ) continue ;; - esac - ;; - -* ) ac_must_keep_next=true ;; - esac - fi - as_fn_append ac_configure_args " '$ac_arg'" - ;; - esac - done -done -{ ac_configure_args0=; unset ac_configure_args0;} -{ ac_configure_args1=; unset ac_configure_args1;} - -# When interrupted or exit'd, cleanup temporary files, and complete -# config.log. We remove comments because anyway the quotes in there -# would cause problems or look ugly. -# WARNING: Use '\'' to represent an apostrophe within the trap. -# WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug. -trap 'exit_status=$? - # Save into config.log some information that might help in debugging. - { - echo - - $as_echo "## ---------------- ## -## Cache variables. ## -## ---------------- ##" - echo - # The following way of writing the cache mishandles newlines in values, -( - for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do - eval ac_val=\$$ac_var - case $ac_val in #( - *${as_nl}*) - case $ac_var in #( - *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 -$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; - esac - case $ac_var in #( - _ | IFS | as_nl) ;; #( - BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( - *) { eval $ac_var=; unset $ac_var;} ;; - esac ;; - esac - done - (set) 2>&1 | - case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #( - *${as_nl}ac_space=\ *) - sed -n \ - "s/'\''/'\''\\\\'\'''\''/g; - s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p" - ;; #( - *) - sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" - ;; - esac | - sort -) - echo - - $as_echo "## ----------------- ## -## Output variables. ## -## ----------------- ##" - echo - for ac_var in $ac_subst_vars - do - eval ac_val=\$$ac_var - case $ac_val in - *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; - esac - $as_echo "$ac_var='\''$ac_val'\''" - done | sort - echo - - if test -n "$ac_subst_files"; then - $as_echo "## ------------------- ## -## File substitutions. ## -## ------------------- ##" - echo - for ac_var in $ac_subst_files - do - eval ac_val=\$$ac_var - case $ac_val in - *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; - esac - $as_echo "$ac_var='\''$ac_val'\''" - done | sort - echo - fi - - if test -s confdefs.h; then - $as_echo "## ----------- ## -## confdefs.h. ## -## ----------- ##" - echo - cat confdefs.h - echo - fi - test "$ac_signal" != 0 && - $as_echo "$as_me: caught signal $ac_signal" - $as_echo "$as_me: exit $exit_status" - } >&5 - rm -f core *.core core.conftest.* && - rm -f -r conftest* confdefs* conf$$* $ac_clean_files && - exit $exit_status -' 0 -for ac_signal in 1 2 13 15; do - trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal -done -ac_signal=0 - -# confdefs.h avoids OS command line length limits that DEFS can exceed. -rm -f -r conftest* confdefs.h - -$as_echo "/* confdefs.h */" > confdefs.h - -# Predefined preprocessor variables. - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_NAME "$PACKAGE_NAME" -_ACEOF - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_TARNAME "$PACKAGE_TARNAME" -_ACEOF - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_VERSION "$PACKAGE_VERSION" -_ACEOF - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_STRING "$PACKAGE_STRING" -_ACEOF - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT" -_ACEOF - -cat >>confdefs.h <<_ACEOF -#define PACKAGE_URL "$PACKAGE_URL" -_ACEOF - - -# Let the site file select an alternate cache file if it wants to. -# Prefer an explicitly selected file to automatically selected ones. -ac_site_file1=NONE -ac_site_file2=NONE -if test -n "$CONFIG_SITE"; then - # We do not want a PATH search for config.site. - case $CONFIG_SITE in #(( - -*) ac_site_file1=./$CONFIG_SITE;; - */*) ac_site_file1=$CONFIG_SITE;; - *) ac_site_file1=./$CONFIG_SITE;; - esac -elif test "x$prefix" != xNONE; then - ac_site_file1=$prefix/share/config.site - ac_site_file2=$prefix/etc/config.site -else - ac_site_file1=$ac_default_prefix/share/config.site - ac_site_file2=$ac_default_prefix/etc/config.site -fi -for ac_site_file in "$ac_site_file1" "$ac_site_file2" -do - test "x$ac_site_file" = xNONE && continue - if test /dev/null != "$ac_site_file" && test -r "$ac_site_file"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5 -$as_echo "$as_me: loading site script $ac_site_file" >&6;} - sed 's/^/| /' "$ac_site_file" >&5 - . "$ac_site_file" \ - || { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error $? "failed to load site script $ac_site_file -See \`config.log' for more details" "$LINENO" 5; } - fi -done - -if test -r "$cache_file"; then - # Some versions of bash will fail to source /dev/null (special files - # actually), so we avoid doing that. DJGPP emulates it as a regular file. - if test /dev/null != "$cache_file" && test -f "$cache_file"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5 -$as_echo "$as_me: loading cache $cache_file" >&6;} - case $cache_file in - [\\/]* | ?:[\\/]* ) . "$cache_file";; - *) . "./$cache_file";; - esac - fi -else - { $as_echo "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5 -$as_echo "$as_me: creating cache $cache_file" >&6;} - >$cache_file -fi - -# Check that the precious variables saved in the cache have kept the same -# value. -ac_cache_corrupted=false -for ac_var in $ac_precious_vars; do - eval ac_old_set=\$ac_cv_env_${ac_var}_set - eval ac_new_set=\$ac_env_${ac_var}_set - eval ac_old_val=\$ac_cv_env_${ac_var}_value - eval ac_new_val=\$ac_env_${ac_var}_value - case $ac_old_set,$ac_new_set in - set,) - { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 -$as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} - ac_cache_corrupted=: ;; - ,set) - { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5 -$as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} - ac_cache_corrupted=: ;; - ,);; - *) - if test "x$ac_old_val" != "x$ac_new_val"; then - # differences in whitespace do not lead to failure. - ac_old_val_w=`echo x $ac_old_val` - ac_new_val_w=`echo x $ac_new_val` - if test "$ac_old_val_w" != "$ac_new_val_w"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5 -$as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} - ac_cache_corrupted=: - else - { $as_echo "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5 -$as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;} - eval $ac_var=\$ac_old_val - fi - { $as_echo "$as_me:${as_lineno-$LINENO}: former value: \`$ac_old_val'" >&5 -$as_echo "$as_me: former value: \`$ac_old_val'" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: current value: \`$ac_new_val'" >&5 -$as_echo "$as_me: current value: \`$ac_new_val'" >&2;} - fi;; - esac - # Pass precious variables to config.status. - if test "$ac_new_set" = set; then - case $ac_new_val in - *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; - *) ac_arg=$ac_var=$ac_new_val ;; - esac - case " $ac_configure_args " in - *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. - *) as_fn_append ac_configure_args " '$ac_arg'" ;; - esac - fi -done -if $ac_cache_corrupted; then - { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5 -$as_echo "$as_me: error: changes in the environment can compromise the build" >&2;} - as_fn_error $? "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5 -fi -## -------------------- ## -## Main body of script. ## -## -------------------- ## - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - - - -sqlite_version_sanity_check=`cat $srcdir/VERSION | tr -d '\n'` -if test "$PACKAGE_VERSION" != "$sqlite_version_sanity_check" ; then -as_fn_error $? "configure script is out of date: - configure \$PACKAGE_VERSION = $PACKAGE_VERSION - top level VERSION file = $sqlite_version_sanity_check -please regen with autoconf" "$LINENO" 5 -fi - -######### -# Programs needed -# -ac_aux_dir= -for ac_dir in "$srcdir" "$srcdir/.." "$srcdir/../.."; do - if test -f "$ac_dir/install-sh"; then - ac_aux_dir=$ac_dir - ac_install_sh="$ac_aux_dir/install-sh -c" - break - elif test -f "$ac_dir/install.sh"; then - ac_aux_dir=$ac_dir - ac_install_sh="$ac_aux_dir/install.sh -c" - break - elif test -f "$ac_dir/shtool"; then - ac_aux_dir=$ac_dir - ac_install_sh="$ac_aux_dir/shtool install -c" - break - fi -done -if test -z "$ac_aux_dir"; then - as_fn_error $? "cannot find install-sh, install.sh, or shtool in \"$srcdir\" \"$srcdir/..\" \"$srcdir/../..\"" "$LINENO" 5 -fi - -# These three variables are undocumented and unsupported, -# and are intended to be withdrawn in a future Autoconf release. -# They can cause serious problems if a builder's source tree is in a directory -# whose full name contains unusual characters. -ac_config_guess="$SHELL $ac_aux_dir/config.guess" # Please don't use this var. -ac_config_sub="$SHELL $ac_aux_dir/config.sub" # Please don't use this var. -ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var. - - -case `pwd` in - *\ * | *\ *) - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&5 -$as_echo "$as_me: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&2;} ;; -esac - - - -macro_version='2.4.6' -macro_revision='2.4.6' - - - - - - - - - - - - - -ltmain=$ac_aux_dir/ltmain.sh - -# Make sure we can run config.sub. -$SHELL "$ac_aux_dir/config.sub" sun4 >/dev/null 2>&1 || - as_fn_error $? "cannot run $SHELL $ac_aux_dir/config.sub" "$LINENO" 5 - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking build system type" >&5 -$as_echo_n "checking build system type... " >&6; } -if ${ac_cv_build+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_build_alias=$build_alias -test "x$ac_build_alias" = x && - ac_build_alias=`$SHELL "$ac_aux_dir/config.guess"` -test "x$ac_build_alias" = x && - as_fn_error $? "cannot guess build type; you must specify one" "$LINENO" 5 -ac_cv_build=`$SHELL "$ac_aux_dir/config.sub" $ac_build_alias` || - as_fn_error $? "$SHELL $ac_aux_dir/config.sub $ac_build_alias failed" "$LINENO" 5 - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_build" >&5 -$as_echo "$ac_cv_build" >&6; } -case $ac_cv_build in -*-*-*) ;; -*) as_fn_error $? "invalid value of canonical build" "$LINENO" 5;; -esac -build=$ac_cv_build -ac_save_IFS=$IFS; IFS='-' -set x $ac_cv_build -shift -build_cpu=$1 -build_vendor=$2 -shift; shift -# Remember, the first character of IFS is used to create $*, -# except with old shells: -build_os=$* -IFS=$ac_save_IFS -case $build_os in *\ *) build_os=`echo "$build_os" | sed 's/ /-/g'`;; esac - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking host system type" >&5 -$as_echo_n "checking host system type... " >&6; } -if ${ac_cv_host+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test "x$host_alias" = x; then - ac_cv_host=$ac_cv_build -else - ac_cv_host=`$SHELL "$ac_aux_dir/config.sub" $host_alias` || - as_fn_error $? "$SHELL $ac_aux_dir/config.sub $host_alias failed" "$LINENO" 5 -fi - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_host" >&5 -$as_echo "$ac_cv_host" >&6; } -case $ac_cv_host in -*-*-*) ;; -*) as_fn_error $? "invalid value of canonical host" "$LINENO" 5;; -esac -host=$ac_cv_host -ac_save_IFS=$IFS; IFS='-' -set x $ac_cv_host -shift -host_cpu=$1 -host_vendor=$2 -shift; shift -# Remember, the first character of IFS is used to create $*, -# except with old shells: -host_os=$* -IFS=$ac_save_IFS -case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac - - -# Backslashify metacharacters that are still active within -# double-quoted strings. -sed_quote_subst='s/\(["`$\\]\)/\\\1/g' - -# Same as above, but do not quote variable references. -double_quote_subst='s/\(["`\\]\)/\\\1/g' - -# Sed substitution to delay expansion of an escaped shell variable in a -# double_quote_subst'ed string. -delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g' - -# Sed substitution to delay expansion of an escaped single quote. -delay_single_quote_subst='s/'\''/'\'\\\\\\\'\''/g' - -# Sed substitution to avoid accidental globbing in evaled expressions -no_glob_subst='s/\*/\\\*/g' - -ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' -ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO -ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to print strings" >&5 -$as_echo_n "checking how to print strings... " >&6; } -# Test print first, because it will be a builtin if present. -if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \ - test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then - ECHO='print -r --' -elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then - ECHO='printf %s\n' -else - # Use this function as a fallback that always works. - func_fallback_echo () - { - eval 'cat <<_LTECHO_EOF -$1 -_LTECHO_EOF' - } - ECHO='func_fallback_echo' -fi - -# func_echo_all arg... -# Invoke $ECHO with all args, space-separated. -func_echo_all () -{ - $ECHO "" -} - -case $ECHO in - printf*) { $as_echo "$as_me:${as_lineno-$LINENO}: result: printf" >&5 -$as_echo "printf" >&6; } ;; - print*) { $as_echo "$as_me:${as_lineno-$LINENO}: result: print -r" >&5 -$as_echo "print -r" >&6; } ;; - *) { $as_echo "$as_me:${as_lineno-$LINENO}: result: cat" >&5 -$as_echo "cat" >&6; } ;; -esac - - - - - - - - - - - - - - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu -if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. -set dummy ${ac_tool_prefix}gcc; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$CC"; then - ac_cv_prog_CC="$CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_CC="${ac_tool_prefix}gcc" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -CC=$ac_cv_prog_CC -if test -n "$CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 -$as_echo "$CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - -fi -if test -z "$ac_cv_prog_CC"; then - ac_ct_CC=$CC - # Extract the first word of "gcc", so it can be a program name with args. -set dummy gcc; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_ac_ct_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_CC"; then - ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_ac_ct_CC="gcc" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -ac_ct_CC=$ac_cv_prog_ac_ct_CC -if test -n "$ac_ct_CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 -$as_echo "$ac_ct_CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - if test "x$ac_ct_CC" = x; then - CC="" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - CC=$ac_ct_CC - fi -else - CC="$ac_cv_prog_CC" -fi - -if test -z "$CC"; then - if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. -set dummy ${ac_tool_prefix}cc; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$CC"; then - ac_cv_prog_CC="$CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_CC="${ac_tool_prefix}cc" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -CC=$ac_cv_prog_CC -if test -n "$CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 -$as_echo "$CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - fi -fi -if test -z "$CC"; then - # Extract the first word of "cc", so it can be a program name with args. -set dummy cc; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$CC"; then - ac_cv_prog_CC="$CC" # Let the user override the test. -else - ac_prog_rejected=no -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then - ac_prog_rejected=yes - continue - fi - ac_cv_prog_CC="cc" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -if test $ac_prog_rejected = yes; then - # We found a bogon in the path, so make sure we never use it. - set dummy $ac_cv_prog_CC - shift - if test $# != 0; then - # We chose a different compiler from the bogus one. - # However, it has the same basename, so the bogon will be chosen - # first if we set CC to just the basename; use the full file name. - shift - ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@" - fi -fi -fi -fi -CC=$ac_cv_prog_CC -if test -n "$CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 -$as_echo "$CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - -fi -if test -z "$CC"; then - if test -n "$ac_tool_prefix"; then - for ac_prog in cl.exe - do - # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. -set dummy $ac_tool_prefix$ac_prog; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$CC"; then - ac_cv_prog_CC="$CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_CC="$ac_tool_prefix$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -CC=$ac_cv_prog_CC -if test -n "$CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 -$as_echo "$CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - test -n "$CC" && break - done -fi -if test -z "$CC"; then - ac_ct_CC=$CC - for ac_prog in cl.exe -do - # Extract the first word of "$ac_prog", so it can be a program name with args. -set dummy $ac_prog; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_ac_ct_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_CC"; then - ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_ac_ct_CC="$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -ac_ct_CC=$ac_cv_prog_ac_ct_CC -if test -n "$ac_ct_CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 -$as_echo "$ac_ct_CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - test -n "$ac_ct_CC" && break -done - - if test "x$ac_ct_CC" = x; then - CC="" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - CC=$ac_ct_CC - fi -fi - -fi - - -test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error $? "no acceptable C compiler found in \$PATH -See \`config.log' for more details" "$LINENO" 5; } - -# Provide some information about the compiler. -$as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5 -set X $ac_compile -ac_compiler=$2 -for ac_option in --version -v -V -qversion; do - { { ac_try="$ac_compiler $ac_option >&5" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_compiler $ac_option >&5") 2>conftest.err - ac_status=$? - if test -s conftest.err; then - sed '10a\ -... rest of stderr output deleted ... - 10q' conftest.err >conftest.er1 - cat conftest.er1 >&5 - fi - rm -f conftest.er1 conftest.err - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } -done - -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -ac_clean_files_save=$ac_clean_files -ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out" -# Try to create an executable without -o first, disregard a.out. -# It will help us diagnose broken compilers, and finding out an intuition -# of exeext. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5 -$as_echo_n "checking whether the C compiler works... " >&6; } -ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'` - -# The possible output files: -ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*" - -ac_rmfiles= -for ac_file in $ac_files -do - case $ac_file in - *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; - * ) ac_rmfiles="$ac_rmfiles $ac_file";; - esac -done -rm -f $ac_rmfiles - -if { { ac_try="$ac_link_default" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_link_default") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then : - # Autoconf-2.13 could set the ac_cv_exeext variable to `no'. -# So ignore a value of `no', otherwise this would lead to `EXEEXT = no' -# in a Makefile. We should not override ac_cv_exeext if it was cached, -# so that the user can short-circuit this test for compilers unknown to -# Autoconf. -for ac_file in $ac_files '' -do - test -f "$ac_file" || continue - case $ac_file in - *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) - ;; - [ab].out ) - # We found the default executable, but exeext='' is most - # certainly right. - break;; - *.* ) - if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no; - then :; else - ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` - fi - # We set ac_cv_exeext here because the later test for it is not - # safe: cross compilers may not add the suffix if given an `-o' - # argument, so we may need to know it at that point already. - # Even if this section looks crufty: it has the advantage of - # actually working. - break;; - * ) - break;; - esac -done -test "$ac_cv_exeext" = no && ac_cv_exeext= - -else - ac_file='' -fi -if test -z "$ac_file"; then : - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -$as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - -{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error 77 "C compiler cannot create executables -See \`config.log' for more details" "$LINENO" 5; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5 -$as_echo_n "checking for C compiler default output file name... " >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5 -$as_echo "$ac_file" >&6; } -ac_exeext=$ac_cv_exeext - -rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out -ac_clean_files=$ac_clean_files_save -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5 -$as_echo_n "checking for suffix of executables... " >&6; } -if { { ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_link") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then : - # If both `conftest.exe' and `conftest' are `present' (well, observable) -# catch `conftest.exe'. For instance with Cygwin, `ls conftest' will -# work properly (i.e., refer to `conftest.exe'), while it won't with -# `rm'. -for ac_file in conftest.exe conftest conftest.*; do - test -f "$ac_file" || continue - case $ac_file in - *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; - *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` - break;; - * ) break;; - esac -done -else - { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error $? "cannot compute suffix of executables: cannot compile and link -See \`config.log' for more details" "$LINENO" 5; } -fi -rm -f conftest conftest$ac_cv_exeext -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5 -$as_echo "$ac_cv_exeext" >&6; } - -rm -f conftest.$ac_ext -EXEEXT=$ac_cv_exeext -ac_exeext=$EXEEXT -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -int -main () -{ -FILE *f = fopen ("conftest.out", "w"); - return ferror (f) || fclose (f) != 0; - - ; - return 0; -} -_ACEOF -ac_clean_files="$ac_clean_files conftest.out" -# Check that the compiler produces executables we can run. If not, either -# the compiler is broken, or we cross compile. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5 -$as_echo_n "checking whether we are cross compiling... " >&6; } -if test "$cross_compiling" != yes; then - { { ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_link") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } - if { ac_try='./conftest$ac_cv_exeext' - { { case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_try") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; }; then - cross_compiling=no - else - if test "$cross_compiling" = maybe; then - cross_compiling=yes - else - { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error $? "cannot run C compiled programs. -If you meant to cross compile, use \`--host'. -See \`config.log' for more details" "$LINENO" 5; } - fi - fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5 -$as_echo "$cross_compiling" >&6; } - -rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out -ac_clean_files=$ac_clean_files_save -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5 -$as_echo_n "checking for suffix of object files... " >&6; } -if ${ac_cv_objext+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -rm -f conftest.o conftest.obj -if { { ac_try="$ac_compile" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_compile") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then : - for ac_file in conftest.o conftest.obj conftest.*; do - test -f "$ac_file" || continue; - case $ac_file in - *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;; - *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'` - break;; - esac -done -else - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - -{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error $? "cannot compute suffix of object files: cannot compile -See \`config.log' for more details" "$LINENO" 5; } -fi -rm -f conftest.$ac_cv_objext conftest.$ac_ext -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5 -$as_echo "$ac_cv_objext" >&6; } -OBJEXT=$ac_cv_objext -ac_objext=$OBJEXT -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5 -$as_echo_n "checking whether we are using the GNU C compiler... " >&6; } -if ${ac_cv_c_compiler_gnu+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ -#ifndef __GNUC__ - choke me -#endif - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_compiler_gnu=yes -else - ac_compiler_gnu=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -ac_cv_c_compiler_gnu=$ac_compiler_gnu - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5 -$as_echo "$ac_cv_c_compiler_gnu" >&6; } -if test $ac_compiler_gnu = yes; then - GCC=yes -else - GCC= -fi -ac_test_CFLAGS=${CFLAGS+set} -ac_save_CFLAGS=$CFLAGS -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5 -$as_echo_n "checking whether $CC accepts -g... " >&6; } -if ${ac_cv_prog_cc_g+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_save_c_werror_flag=$ac_c_werror_flag - ac_c_werror_flag=yes - ac_cv_prog_cc_g=no - CFLAGS="-g" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_prog_cc_g=yes -else - CFLAGS="" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - -else - ac_c_werror_flag=$ac_save_c_werror_flag - CFLAGS="-g" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_prog_cc_g=yes -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - ac_c_werror_flag=$ac_save_c_werror_flag -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5 -$as_echo "$ac_cv_prog_cc_g" >&6; } -if test "$ac_test_CFLAGS" = set; then - CFLAGS=$ac_save_CFLAGS -elif test $ac_cv_prog_cc_g = yes; then - if test "$GCC" = yes; then - CFLAGS="-g -O2" - else - CFLAGS="-g" - fi -else - if test "$GCC" = yes; then - CFLAGS="-O2" - else - CFLAGS= - fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5 -$as_echo_n "checking for $CC option to accept ISO C89... " >&6; } -if ${ac_cv_prog_cc_c89+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_cv_prog_cc_c89=no -ac_save_CC=$CC -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -#include -struct stat; -/* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ -struct buf { int x; }; -FILE * (*rcsopen) (struct buf *, struct stat *, int); -static char *e (p, i) - char **p; - int i; -{ - return p[i]; -} -static char *f (char * (*g) (char **, int), char **p, ...) -{ - char *s; - va_list v; - va_start (v,p); - s = g (p, va_arg (v,int)); - va_end (v); - return s; -} - -/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has - function prototypes and stuff, but not '\xHH' hex character constants. - These don't provoke an error unfortunately, instead are silently treated - as 'x'. The following induces an error, until -std is added to get - proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an - array size at least. It's necessary to write '\x00'==0 to get something - that's true only with -std. */ -int osf4_cc_array ['\x00' == 0 ? 1 : -1]; - -/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters - inside strings and character constants. */ -#define FOO(x) 'x' -int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1]; - -int test (int i, double x); -struct s1 {int (*f) (int a);}; -struct s2 {int (*f) (double a);}; -int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); -int argc; -char **argv; -int -main () -{ -return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; - ; - return 0; -} -_ACEOF -for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ - -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" -do - CC="$ac_save_CC $ac_arg" - if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_prog_cc_c89=$ac_arg -fi -rm -f core conftest.err conftest.$ac_objext - test "x$ac_cv_prog_cc_c89" != "xno" && break -done -rm -f conftest.$ac_ext -CC=$ac_save_CC - -fi -# AC_CACHE_VAL -case "x$ac_cv_prog_cc_c89" in - x) - { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 -$as_echo "none needed" >&6; } ;; - xno) - { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 -$as_echo "unsupported" >&6; } ;; - *) - CC="$CC $ac_cv_prog_cc_c89" - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5 -$as_echo "$ac_cv_prog_cc_c89" >&6; } ;; -esac -if test "x$ac_cv_prog_cc_c89" != xno; then : - -fi - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a sed that does not truncate output" >&5 -$as_echo_n "checking for a sed that does not truncate output... " >&6; } -if ${ac_cv_path_SED+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_script=s/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb/ - for ac_i in 1 2 3 4 5 6 7; do - ac_script="$ac_script$as_nl$ac_script" - done - echo "$ac_script" 2>/dev/null | sed 99q >conftest.sed - { ac_script=; unset ac_script;} - if test -z "$SED"; then - ac_path_SED_found=false - # Loop through the user's path and test for each of PROGNAME-LIST - as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_prog in sed gsed; do - for ac_exec_ext in '' $ac_executable_extensions; do - ac_path_SED="$as_dir/$ac_prog$ac_exec_ext" - as_fn_executable_p "$ac_path_SED" || continue -# Check for GNU ac_path_SED and select it if it is found. - # Check for GNU $ac_path_SED -case `"$ac_path_SED" --version 2>&1` in -*GNU*) - ac_cv_path_SED="$ac_path_SED" ac_path_SED_found=:;; -*) - ac_count=0 - $as_echo_n 0123456789 >"conftest.in" - while : - do - cat "conftest.in" "conftest.in" >"conftest.tmp" - mv "conftest.tmp" "conftest.in" - cp "conftest.in" "conftest.nl" - $as_echo '' >> "conftest.nl" - "$ac_path_SED" -f conftest.sed < "conftest.nl" >"conftest.out" 2>/dev/null || break - diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break - as_fn_arith $ac_count + 1 && ac_count=$as_val - if test $ac_count -gt ${ac_path_SED_max-0}; then - # Best one so far, save it but keep looking for a better one - ac_cv_path_SED="$ac_path_SED" - ac_path_SED_max=$ac_count - fi - # 10*(2^10) chars as input seems more than enough - test $ac_count -gt 10 && break - done - rm -f conftest.in conftest.tmp conftest.nl conftest.out;; -esac - - $ac_path_SED_found && break 3 - done - done - done -IFS=$as_save_IFS - if test -z "$ac_cv_path_SED"; then - as_fn_error $? "no acceptable sed could be found in \$PATH" "$LINENO" 5 - fi -else - ac_cv_path_SED=$SED -fi - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_SED" >&5 -$as_echo "$ac_cv_path_SED" >&6; } - SED="$ac_cv_path_SED" - rm -f conftest.sed - -test -z "$SED" && SED=sed -Xsed="$SED -e 1s/^X//" - - - - - - - - - - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5 -$as_echo_n "checking for grep that handles long lines and -e... " >&6; } -if ${ac_cv_path_GREP+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -z "$GREP"; then - ac_path_GREP_found=false - # Loop through the user's path and test for each of PROGNAME-LIST - as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_prog in grep ggrep; do - for ac_exec_ext in '' $ac_executable_extensions; do - ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext" - as_fn_executable_p "$ac_path_GREP" || continue -# Check for GNU ac_path_GREP and select it if it is found. - # Check for GNU $ac_path_GREP -case `"$ac_path_GREP" --version 2>&1` in -*GNU*) - ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;; -*) - ac_count=0 - $as_echo_n 0123456789 >"conftest.in" - while : - do - cat "conftest.in" "conftest.in" >"conftest.tmp" - mv "conftest.tmp" "conftest.in" - cp "conftest.in" "conftest.nl" - $as_echo 'GREP' >> "conftest.nl" - "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break - diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break - as_fn_arith $ac_count + 1 && ac_count=$as_val - if test $ac_count -gt ${ac_path_GREP_max-0}; then - # Best one so far, save it but keep looking for a better one - ac_cv_path_GREP="$ac_path_GREP" - ac_path_GREP_max=$ac_count - fi - # 10*(2^10) chars as input seems more than enough - test $ac_count -gt 10 && break - done - rm -f conftest.in conftest.tmp conftest.nl conftest.out;; -esac - - $ac_path_GREP_found && break 3 - done - done - done -IFS=$as_save_IFS - if test -z "$ac_cv_path_GREP"; then - as_fn_error $? "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 - fi -else - ac_cv_path_GREP=$GREP -fi - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5 -$as_echo "$ac_cv_path_GREP" >&6; } - GREP="$ac_cv_path_GREP" - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5 -$as_echo_n "checking for egrep... " >&6; } -if ${ac_cv_path_EGREP+:} false; then : - $as_echo_n "(cached) " >&6 -else - if echo a | $GREP -E '(a|b)' >/dev/null 2>&1 - then ac_cv_path_EGREP="$GREP -E" - else - if test -z "$EGREP"; then - ac_path_EGREP_found=false - # Loop through the user's path and test for each of PROGNAME-LIST - as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_prog in egrep; do - for ac_exec_ext in '' $ac_executable_extensions; do - ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext" - as_fn_executable_p "$ac_path_EGREP" || continue -# Check for GNU ac_path_EGREP and select it if it is found. - # Check for GNU $ac_path_EGREP -case `"$ac_path_EGREP" --version 2>&1` in -*GNU*) - ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;; -*) - ac_count=0 - $as_echo_n 0123456789 >"conftest.in" - while : - do - cat "conftest.in" "conftest.in" >"conftest.tmp" - mv "conftest.tmp" "conftest.in" - cp "conftest.in" "conftest.nl" - $as_echo 'EGREP' >> "conftest.nl" - "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break - diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break - as_fn_arith $ac_count + 1 && ac_count=$as_val - if test $ac_count -gt ${ac_path_EGREP_max-0}; then - # Best one so far, save it but keep looking for a better one - ac_cv_path_EGREP="$ac_path_EGREP" - ac_path_EGREP_max=$ac_count - fi - # 10*(2^10) chars as input seems more than enough - test $ac_count -gt 10 && break - done - rm -f conftest.in conftest.tmp conftest.nl conftest.out;; -esac - - $ac_path_EGREP_found && break 3 - done - done - done -IFS=$as_save_IFS - if test -z "$ac_cv_path_EGREP"; then - as_fn_error $? "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 - fi -else - ac_cv_path_EGREP=$EGREP -fi - - fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5 -$as_echo "$ac_cv_path_EGREP" >&6; } - EGREP="$ac_cv_path_EGREP" - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for fgrep" >&5 -$as_echo_n "checking for fgrep... " >&6; } -if ${ac_cv_path_FGREP+:} false; then : - $as_echo_n "(cached) " >&6 -else - if echo 'ab*c' | $GREP -F 'ab*c' >/dev/null 2>&1 - then ac_cv_path_FGREP="$GREP -F" - else - if test -z "$FGREP"; then - ac_path_FGREP_found=false - # Loop through the user's path and test for each of PROGNAME-LIST - as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_prog in fgrep; do - for ac_exec_ext in '' $ac_executable_extensions; do - ac_path_FGREP="$as_dir/$ac_prog$ac_exec_ext" - as_fn_executable_p "$ac_path_FGREP" || continue -# Check for GNU ac_path_FGREP and select it if it is found. - # Check for GNU $ac_path_FGREP -case `"$ac_path_FGREP" --version 2>&1` in -*GNU*) - ac_cv_path_FGREP="$ac_path_FGREP" ac_path_FGREP_found=:;; -*) - ac_count=0 - $as_echo_n 0123456789 >"conftest.in" - while : - do - cat "conftest.in" "conftest.in" >"conftest.tmp" - mv "conftest.tmp" "conftest.in" - cp "conftest.in" "conftest.nl" - $as_echo 'FGREP' >> "conftest.nl" - "$ac_path_FGREP" FGREP < "conftest.nl" >"conftest.out" 2>/dev/null || break - diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break - as_fn_arith $ac_count + 1 && ac_count=$as_val - if test $ac_count -gt ${ac_path_FGREP_max-0}; then - # Best one so far, save it but keep looking for a better one - ac_cv_path_FGREP="$ac_path_FGREP" - ac_path_FGREP_max=$ac_count - fi - # 10*(2^10) chars as input seems more than enough - test $ac_count -gt 10 && break - done - rm -f conftest.in conftest.tmp conftest.nl conftest.out;; -esac - - $ac_path_FGREP_found && break 3 - done - done - done -IFS=$as_save_IFS - if test -z "$ac_cv_path_FGREP"; then - as_fn_error $? "no acceptable fgrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 - fi -else - ac_cv_path_FGREP=$FGREP -fi - - fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_FGREP" >&5 -$as_echo "$ac_cv_path_FGREP" >&6; } - FGREP="$ac_cv_path_FGREP" - - -test -z "$GREP" && GREP=grep - - - - - - - - - - - - - - - - - - - -# Check whether --with-gnu-ld was given. -if test "${with_gnu_ld+set}" = set; then : - withval=$with_gnu_ld; test no = "$withval" || with_gnu_ld=yes -else - with_gnu_ld=no -fi - -ac_prog=ld -if test yes = "$GCC"; then - # Check if gcc -print-prog-name=ld gives a path. - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ld used by $CC" >&5 -$as_echo_n "checking for ld used by $CC... " >&6; } - case $host in - *-*-mingw*) - # gcc leaves a trailing carriage return, which upsets mingw - ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; - *) - ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; - esac - case $ac_prog in - # Accept absolute paths. - [\\/]* | ?:[\\/]*) - re_direlt='/[^/][^/]*/\.\./' - # Canonicalize the pathname of ld - ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'` - while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do - ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"` - done - test -z "$LD" && LD=$ac_prog - ;; - "") - # If it fails, then pretend we aren't using GCC. - ac_prog=ld - ;; - *) - # If it is relative, then search for the first ld in PATH. - with_gnu_ld=unknown - ;; - esac -elif test yes = "$with_gnu_ld"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU ld" >&5 -$as_echo_n "checking for GNU ld... " >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5 -$as_echo_n "checking for non-GNU ld... " >&6; } -fi -if ${lt_cv_path_LD+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -z "$LD"; then - lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR - for ac_dir in $PATH; do - IFS=$lt_save_ifs - test -z "$ac_dir" && ac_dir=. - if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then - lt_cv_path_LD=$ac_dir/$ac_prog - # Check to see if the program is GNU ld. I'd rather use --version, - # but apparently some variants of GNU ld only accept -v. - # Break only if it was the GNU/non-GNU ld that we prefer. - case `"$lt_cv_path_LD" -v 2>&1 &5 -$as_echo "$LD" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi -test -z "$LD" && as_fn_error $? "no acceptable ld found in \$PATH" "$LINENO" 5 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if the linker ($LD) is GNU ld" >&5 -$as_echo_n "checking if the linker ($LD) is GNU ld... " >&6; } -if ${lt_cv_prog_gnu_ld+:} false; then : - $as_echo_n "(cached) " >&6 -else - # I'd rather use --version here, but apparently some GNU lds only accept -v. -case `$LD -v 2>&1 &5 -$as_echo "$lt_cv_prog_gnu_ld" >&6; } -with_gnu_ld=$lt_cv_prog_gnu_ld - - - - - - - - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for BSD- or MS-compatible name lister (nm)" >&5 -$as_echo_n "checking for BSD- or MS-compatible name lister (nm)... " >&6; } -if ${lt_cv_path_NM+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$NM"; then - # Let the user override the test. - lt_cv_path_NM=$NM -else - lt_nm_to_check=${ac_tool_prefix}nm - if test -n "$ac_tool_prefix" && test "$build" = "$host"; then - lt_nm_to_check="$lt_nm_to_check nm" - fi - for lt_tmp_nm in $lt_nm_to_check; do - lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR - for ac_dir in $PATH /usr/ccs/bin/elf /usr/ccs/bin /usr/ucb /bin; do - IFS=$lt_save_ifs - test -z "$ac_dir" && ac_dir=. - tmp_nm=$ac_dir/$lt_tmp_nm - if test -f "$tmp_nm" || test -f "$tmp_nm$ac_exeext"; then - # Check to see if the nm accepts a BSD-compat flag. - # Adding the 'sed 1q' prevents false positives on HP-UX, which says: - # nm: unknown option "B" ignored - # Tru64's nm complains that /dev/null is an invalid object file - # MSYS converts /dev/null to NUL, MinGW nm treats NUL as empty - case $build_os in - mingw*) lt_bad_file=conftest.nm/nofile ;; - *) lt_bad_file=/dev/null ;; - esac - case `"$tmp_nm" -B $lt_bad_file 2>&1 | sed '1q'` in - *$lt_bad_file* | *'Invalid file or object type'*) - lt_cv_path_NM="$tmp_nm -B" - break 2 - ;; - *) - case `"$tmp_nm" -p /dev/null 2>&1 | sed '1q'` in - */dev/null*) - lt_cv_path_NM="$tmp_nm -p" - break 2 - ;; - *) - lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but - continue # so that we can try to find one that supports BSD flags - ;; - esac - ;; - esac - fi - done - IFS=$lt_save_ifs - done - : ${lt_cv_path_NM=no} -fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_NM" >&5 -$as_echo "$lt_cv_path_NM" >&6; } -if test no != "$lt_cv_path_NM"; then - NM=$lt_cv_path_NM -else - # Didn't find any BSD compatible name lister, look for dumpbin. - if test -n "$DUMPBIN"; then : - # Let the user override the test. - else - if test -n "$ac_tool_prefix"; then - for ac_prog in dumpbin "link -dump" - do - # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. -set dummy $ac_tool_prefix$ac_prog; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_DUMPBIN+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$DUMPBIN"; then - ac_cv_prog_DUMPBIN="$DUMPBIN" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_DUMPBIN="$ac_tool_prefix$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -DUMPBIN=$ac_cv_prog_DUMPBIN -if test -n "$DUMPBIN"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DUMPBIN" >&5 -$as_echo "$DUMPBIN" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - test -n "$DUMPBIN" && break - done -fi -if test -z "$DUMPBIN"; then - ac_ct_DUMPBIN=$DUMPBIN - for ac_prog in dumpbin "link -dump" -do - # Extract the first word of "$ac_prog", so it can be a program name with args. -set dummy $ac_prog; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_ac_ct_DUMPBIN+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_DUMPBIN"; then - ac_cv_prog_ac_ct_DUMPBIN="$ac_ct_DUMPBIN" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_ac_ct_DUMPBIN="$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -ac_ct_DUMPBIN=$ac_cv_prog_ac_ct_DUMPBIN -if test -n "$ac_ct_DUMPBIN"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DUMPBIN" >&5 -$as_echo "$ac_ct_DUMPBIN" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - test -n "$ac_ct_DUMPBIN" && break -done - - if test "x$ac_ct_DUMPBIN" = x; then - DUMPBIN=":" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - DUMPBIN=$ac_ct_DUMPBIN - fi -fi - - case `$DUMPBIN -symbols -headers /dev/null 2>&1 | sed '1q'` in - *COFF*) - DUMPBIN="$DUMPBIN -symbols -headers" - ;; - *) - DUMPBIN=: - ;; - esac - fi - - if test : != "$DUMPBIN"; then - NM=$DUMPBIN - fi -fi -test -z "$NM" && NM=nm - - - - - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking the name lister ($NM) interface" >&5 -$as_echo_n "checking the name lister ($NM) interface... " >&6; } -if ${lt_cv_nm_interface+:} false; then : - $as_echo_n "(cached) " >&6 -else - lt_cv_nm_interface="BSD nm" - echo "int some_variable = 0;" > conftest.$ac_ext - (eval echo "\"\$as_me:$LINENO: $ac_compile\"" >&5) - (eval "$ac_compile" 2>conftest.err) - cat conftest.err >&5 - (eval echo "\"\$as_me:$LINENO: $NM \\\"conftest.$ac_objext\\\"\"" >&5) - (eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out) - cat conftest.err >&5 - (eval echo "\"\$as_me:$LINENO: output\"" >&5) - cat conftest.out >&5 - if $GREP 'External.*some_variable' conftest.out > /dev/null; then - lt_cv_nm_interface="MS dumpbin" - fi - rm -f conftest* -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_nm_interface" >&5 -$as_echo "$lt_cv_nm_interface" >&6; } - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ln -s works" >&5 -$as_echo_n "checking whether ln -s works... " >&6; } -LN_S=$as_ln_s -if test "$LN_S" = "ln -s"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no, using $LN_S" >&5 -$as_echo "no, using $LN_S" >&6; } -fi - -# find the maximum length of command line arguments -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking the maximum length of command line arguments" >&5 -$as_echo_n "checking the maximum length of command line arguments... " >&6; } -if ${lt_cv_sys_max_cmd_len+:} false; then : - $as_echo_n "(cached) " >&6 -else - i=0 - teststring=ABCD - - case $build_os in - msdosdjgpp*) - # On DJGPP, this test can blow up pretty badly due to problems in libc - # (any single argument exceeding 2000 bytes causes a buffer overrun - # during glob expansion). Even if it were fixed, the result of this - # check would be larger than it should be. - lt_cv_sys_max_cmd_len=12288; # 12K is about right - ;; - - gnu*) - # Under GNU Hurd, this test is not required because there is - # no limit to the length of command line arguments. - # Libtool will interpret -1 as no limit whatsoever - lt_cv_sys_max_cmd_len=-1; - ;; - - cygwin* | mingw* | cegcc*) - # On Win9x/ME, this test blows up -- it succeeds, but takes - # about 5 minutes as the teststring grows exponentially. - # Worse, since 9x/ME are not pre-emptively multitasking, - # you end up with a "frozen" computer, even though with patience - # the test eventually succeeds (with a max line length of 256k). - # Instead, let's just punt: use the minimum linelength reported by - # all of the supported platforms: 8192 (on NT/2K/XP). - lt_cv_sys_max_cmd_len=8192; - ;; - - mint*) - # On MiNT this can take a long time and run out of memory. - lt_cv_sys_max_cmd_len=8192; - ;; - - amigaos*) - # On AmigaOS with pdksh, this test takes hours, literally. - # So we just punt and use a minimum line length of 8192. - lt_cv_sys_max_cmd_len=8192; - ;; - - bitrig* | darwin* | dragonfly* | freebsd* | netbsd* | openbsd*) - # This has been around since 386BSD, at least. Likely further. - if test -x /sbin/sysctl; then - lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax` - elif test -x /usr/sbin/sysctl; then - lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax` - else - lt_cv_sys_max_cmd_len=65536 # usable default for all BSDs - fi - # And add a safety zone - lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` - lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` - ;; - - interix*) - # We know the value 262144 and hardcode it with a safety zone (like BSD) - lt_cv_sys_max_cmd_len=196608 - ;; - - os2*) - # The test takes a long time on OS/2. - lt_cv_sys_max_cmd_len=8192 - ;; - - osf*) - # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure - # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not - # nice to cause kernel panics so lets avoid the loop below. - # First set a reasonable default. - lt_cv_sys_max_cmd_len=16384 - # - if test -x /sbin/sysconfig; then - case `/sbin/sysconfig -q proc exec_disable_arg_limit` in - *1*) lt_cv_sys_max_cmd_len=-1 ;; - esac - fi - ;; - sco3.2v5*) - lt_cv_sys_max_cmd_len=102400 - ;; - sysv5* | sco5v6* | sysv4.2uw2*) - kargmax=`grep ARG_MAX /etc/conf/cf.d/stune 2>/dev/null` - if test -n "$kargmax"; then - lt_cv_sys_max_cmd_len=`echo $kargmax | sed 's/.*[ ]//'` - else - lt_cv_sys_max_cmd_len=32768 - fi - ;; - *) - lt_cv_sys_max_cmd_len=`(getconf ARG_MAX) 2> /dev/null` - if test -n "$lt_cv_sys_max_cmd_len" && \ - test undefined != "$lt_cv_sys_max_cmd_len"; then - lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` - lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` - else - # Make teststring a little bigger before we do anything with it. - # a 1K string should be a reasonable start. - for i in 1 2 3 4 5 6 7 8; do - teststring=$teststring$teststring - done - SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}} - # If test is not a shell built-in, we'll probably end up computing a - # maximum length that is only half of the actual maximum length, but - # we can't tell. - while { test X`env echo "$teststring$teststring" 2>/dev/null` \ - = "X$teststring$teststring"; } >/dev/null 2>&1 && - test 17 != "$i" # 1/2 MB should be enough - do - i=`expr $i + 1` - teststring=$teststring$teststring - done - # Only check the string length outside the loop. - lt_cv_sys_max_cmd_len=`expr "X$teststring" : ".*" 2>&1` - teststring= - # Add a significant safety factor because C++ compilers can tack on - # massive amounts of additional arguments before passing them to the - # linker. It appears as though 1/2 is a usable value. - lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 2` - fi - ;; - esac - -fi - -if test -n "$lt_cv_sys_max_cmd_len"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sys_max_cmd_len" >&5 -$as_echo "$lt_cv_sys_max_cmd_len" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: none" >&5 -$as_echo "none" >&6; } -fi -max_cmd_len=$lt_cv_sys_max_cmd_len - - - - - - -: ${CP="cp -f"} -: ${MV="mv -f"} -: ${RM="rm -f"} - -if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then - lt_unset=unset -else - lt_unset=false -fi - - - - - -# test EBCDIC or ASCII -case `echo X|tr X '\101'` in - A) # ASCII based system - # \n is not interpreted correctly by Solaris 8 /usr/ucb/tr - lt_SP2NL='tr \040 \012' - lt_NL2SP='tr \015\012 \040\040' - ;; - *) # EBCDIC based system - lt_SP2NL='tr \100 \n' - lt_NL2SP='tr \r\n \100\100' - ;; -esac - - - - - - - - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to $host format" >&5 -$as_echo_n "checking how to convert $build file names to $host format... " >&6; } -if ${lt_cv_to_host_file_cmd+:} false; then : - $as_echo_n "(cached) " >&6 -else - case $host in - *-*-mingw* ) - case $build in - *-*-mingw* ) # actually msys - lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32 - ;; - *-*-cygwin* ) - lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32 - ;; - * ) # otherwise, assume *nix - lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32 - ;; - esac - ;; - *-*-cygwin* ) - case $build in - *-*-mingw* ) # actually msys - lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin - ;; - *-*-cygwin* ) - lt_cv_to_host_file_cmd=func_convert_file_noop - ;; - * ) # otherwise, assume *nix - lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin - ;; - esac - ;; - * ) # unhandled hosts (and "normal" native builds) - lt_cv_to_host_file_cmd=func_convert_file_noop - ;; -esac - -fi - -to_host_file_cmd=$lt_cv_to_host_file_cmd -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_host_file_cmd" >&5 -$as_echo "$lt_cv_to_host_file_cmd" >&6; } - - - - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to toolchain format" >&5 -$as_echo_n "checking how to convert $build file names to toolchain format... " >&6; } -if ${lt_cv_to_tool_file_cmd+:} false; then : - $as_echo_n "(cached) " >&6 -else - #assume ordinary cross tools, or native build. -lt_cv_to_tool_file_cmd=func_convert_file_noop -case $host in - *-*-mingw* ) - case $build in - *-*-mingw* ) # actually msys - lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32 - ;; - esac - ;; -esac - -fi - -to_tool_file_cmd=$lt_cv_to_tool_file_cmd -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_tool_file_cmd" >&5 -$as_echo "$lt_cv_to_tool_file_cmd" >&6; } - - - - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $LD option to reload object files" >&5 -$as_echo_n "checking for $LD option to reload object files... " >&6; } -if ${lt_cv_ld_reload_flag+:} false; then : - $as_echo_n "(cached) " >&6 -else - lt_cv_ld_reload_flag='-r' -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_reload_flag" >&5 -$as_echo "$lt_cv_ld_reload_flag" >&6; } -reload_flag=$lt_cv_ld_reload_flag -case $reload_flag in -"" | " "*) ;; -*) reload_flag=" $reload_flag" ;; -esac -reload_cmds='$LD$reload_flag -o $output$reload_objs' -case $host_os in - cygwin* | mingw* | pw32* | cegcc*) - if test yes != "$GCC"; then - reload_cmds=false - fi - ;; - darwin*) - if test yes = "$GCC"; then - reload_cmds='$LTCC $LTCFLAGS -nostdlib $wl-r -o $output$reload_objs' - else - reload_cmds='$LD$reload_flag -o $output$reload_objs' - fi - ;; -esac - - - - - - - - - -if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}objdump", so it can be a program name with args. -set dummy ${ac_tool_prefix}objdump; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_OBJDUMP+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$OBJDUMP"; then - ac_cv_prog_OBJDUMP="$OBJDUMP" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_OBJDUMP="${ac_tool_prefix}objdump" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -OBJDUMP=$ac_cv_prog_OBJDUMP -if test -n "$OBJDUMP"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OBJDUMP" >&5 -$as_echo "$OBJDUMP" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - -fi -if test -z "$ac_cv_prog_OBJDUMP"; then - ac_ct_OBJDUMP=$OBJDUMP - # Extract the first word of "objdump", so it can be a program name with args. -set dummy objdump; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_ac_ct_OBJDUMP+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_OBJDUMP"; then - ac_cv_prog_ac_ct_OBJDUMP="$ac_ct_OBJDUMP" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_ac_ct_OBJDUMP="objdump" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -ac_ct_OBJDUMP=$ac_cv_prog_ac_ct_OBJDUMP -if test -n "$ac_ct_OBJDUMP"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OBJDUMP" >&5 -$as_echo "$ac_ct_OBJDUMP" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - if test "x$ac_ct_OBJDUMP" = x; then - OBJDUMP="false" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - OBJDUMP=$ac_ct_OBJDUMP - fi -else - OBJDUMP="$ac_cv_prog_OBJDUMP" -fi - -test -z "$OBJDUMP" && OBJDUMP=objdump - - - - - - - - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to recognize dependent libraries" >&5 -$as_echo_n "checking how to recognize dependent libraries... " >&6; } -if ${lt_cv_deplibs_check_method+:} false; then : - $as_echo_n "(cached) " >&6 -else - lt_cv_file_magic_cmd='$MAGIC_CMD' -lt_cv_file_magic_test_file= -lt_cv_deplibs_check_method='unknown' -# Need to set the preceding variable on all platforms that support -# interlibrary dependencies. -# 'none' -- dependencies not supported. -# 'unknown' -- same as none, but documents that we really don't know. -# 'pass_all' -- all dependencies passed with no checks. -# 'test_compile' -- check by making test program. -# 'file_magic [[regex]]' -- check by looking for files in library path -# that responds to the $file_magic_cmd with a given extended regex. -# If you have 'file' or equivalent on your system and you're not sure -# whether 'pass_all' will *always* work, you probably want this one. - -case $host_os in -aix[4-9]*) - lt_cv_deplibs_check_method=pass_all - ;; - -beos*) - lt_cv_deplibs_check_method=pass_all - ;; - -bsdi[45]*) - lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib)' - lt_cv_file_magic_cmd='/usr/bin/file -L' - lt_cv_file_magic_test_file=/shlib/libc.so - ;; - -cygwin*) - # func_win32_libid is a shell function defined in ltmain.sh - lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' - lt_cv_file_magic_cmd='func_win32_libid' - ;; - -mingw* | pw32*) - # Base MSYS/MinGW do not provide the 'file' command needed by - # func_win32_libid shell function, so use a weaker test based on 'objdump', - # unless we find 'file', for example because we are cross-compiling. - if ( file / ) >/dev/null 2>&1; then - lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' - lt_cv_file_magic_cmd='func_win32_libid' - else - # Keep this pattern in sync with the one in func_win32_libid. - lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' - lt_cv_file_magic_cmd='$OBJDUMP -f' - fi - ;; - -cegcc*) - # use the weaker test based on 'objdump'. See mingw*. - lt_cv_deplibs_check_method='file_magic file format pe-arm-.*little(.*architecture: arm)?' - lt_cv_file_magic_cmd='$OBJDUMP -f' - ;; - -darwin* | rhapsody*) - lt_cv_deplibs_check_method=pass_all - ;; - -freebsd* | dragonfly*) - if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then - case $host_cpu in - i*86 ) - # Not sure whether the presence of OpenBSD here was a mistake. - # Let's accept both of them until this is cleared up. - lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[3-9]86 (compact )?demand paged shared library' - lt_cv_file_magic_cmd=/usr/bin/file - lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*` - ;; - esac - else - lt_cv_deplibs_check_method=pass_all - fi - ;; - -haiku*) - lt_cv_deplibs_check_method=pass_all - ;; - -hpux10.20* | hpux11*) - lt_cv_file_magic_cmd=/usr/bin/file - case $host_cpu in - ia64*) - lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF-[0-9][0-9]) shared object file - IA64' - lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so - ;; - hppa*64*) - lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF[ -][0-9][0-9])(-bit)?( [LM]SB)? shared object( file)?[, -]* PA-RISC [0-9]\.[0-9]' - lt_cv_file_magic_test_file=/usr/lib/pa20_64/libc.sl - ;; - *) - lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|PA-RISC[0-9]\.[0-9]) shared library' - lt_cv_file_magic_test_file=/usr/lib/libc.sl - ;; - esac - ;; - -interix[3-9]*) - # PIC code is broken on Interix 3.x, that's why |\.a not |_pic\.a here - lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so|\.a)$' - ;; - -irix5* | irix6* | nonstopux*) - case $LD in - *-32|*"-32 ") libmagic=32-bit;; - *-n32|*"-n32 ") libmagic=N32;; - *-64|*"-64 ") libmagic=64-bit;; - *) libmagic=never-match;; - esac - lt_cv_deplibs_check_method=pass_all - ;; - -# This must be glibc/ELF. -linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) - lt_cv_deplibs_check_method=pass_all - ;; - -netbsd* | netbsdelf*-gnu) - if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then - lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$' - else - lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so|_pic\.a)$' - fi - ;; - -newos6*) - lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (executable|dynamic lib)' - lt_cv_file_magic_cmd=/usr/bin/file - lt_cv_file_magic_test_file=/usr/lib/libnls.so - ;; - -*nto* | *qnx*) - lt_cv_deplibs_check_method=pass_all - ;; - -openbsd* | bitrig*) - if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then - lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|\.so|_pic\.a)$' - else - lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$' - fi - ;; - -osf3* | osf4* | osf5*) - lt_cv_deplibs_check_method=pass_all - ;; - -rdos*) - lt_cv_deplibs_check_method=pass_all - ;; - -solaris*) - lt_cv_deplibs_check_method=pass_all - ;; - -sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) - lt_cv_deplibs_check_method=pass_all - ;; - -sysv4 | sysv4.3*) - case $host_vendor in - motorola) - lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib) M[0-9][0-9]* Version [0-9]' - lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*` - ;; - ncr) - lt_cv_deplibs_check_method=pass_all - ;; - sequent) - lt_cv_file_magic_cmd='/bin/file' - lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [LM]SB (shared object|dynamic lib )' - ;; - sni) - lt_cv_file_magic_cmd='/bin/file' - lt_cv_deplibs_check_method="file_magic ELF [0-9][0-9]*-bit [LM]SB dynamic lib" - lt_cv_file_magic_test_file=/lib/libc.so - ;; - siemens) - lt_cv_deplibs_check_method=pass_all - ;; - pc) - lt_cv_deplibs_check_method=pass_all - ;; - esac - ;; - -tpf*) - lt_cv_deplibs_check_method=pass_all - ;; -os2*) - lt_cv_deplibs_check_method=pass_all - ;; -esac - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_deplibs_check_method" >&5 -$as_echo "$lt_cv_deplibs_check_method" >&6; } - -file_magic_glob= -want_nocaseglob=no -if test "$build" = "$host"; then - case $host_os in - mingw* | pw32*) - if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then - want_nocaseglob=yes - else - file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[\1]\/[\1]\/g;/g"` - fi - ;; - esac -fi - -file_magic_cmd=$lt_cv_file_magic_cmd -deplibs_check_method=$lt_cv_deplibs_check_method -test -z "$deplibs_check_method" && deplibs_check_method=unknown - - - - - - - - - - - - - - - - - - - - - - -if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}dlltool", so it can be a program name with args. -set dummy ${ac_tool_prefix}dlltool; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_DLLTOOL+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$DLLTOOL"; then - ac_cv_prog_DLLTOOL="$DLLTOOL" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_DLLTOOL="${ac_tool_prefix}dlltool" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -DLLTOOL=$ac_cv_prog_DLLTOOL -if test -n "$DLLTOOL"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DLLTOOL" >&5 -$as_echo "$DLLTOOL" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - -fi -if test -z "$ac_cv_prog_DLLTOOL"; then - ac_ct_DLLTOOL=$DLLTOOL - # Extract the first word of "dlltool", so it can be a program name with args. -set dummy dlltool; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_ac_ct_DLLTOOL+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_DLLTOOL"; then - ac_cv_prog_ac_ct_DLLTOOL="$ac_ct_DLLTOOL" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_ac_ct_DLLTOOL="dlltool" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -ac_ct_DLLTOOL=$ac_cv_prog_ac_ct_DLLTOOL -if test -n "$ac_ct_DLLTOOL"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DLLTOOL" >&5 -$as_echo "$ac_ct_DLLTOOL" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - if test "x$ac_ct_DLLTOOL" = x; then - DLLTOOL="false" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - DLLTOOL=$ac_ct_DLLTOOL - fi -else - DLLTOOL="$ac_cv_prog_DLLTOOL" -fi - -test -z "$DLLTOOL" && DLLTOOL=dlltool - - - - - - - - - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to associate runtime and link libraries" >&5 -$as_echo_n "checking how to associate runtime and link libraries... " >&6; } -if ${lt_cv_sharedlib_from_linklib_cmd+:} false; then : - $as_echo_n "(cached) " >&6 -else - lt_cv_sharedlib_from_linklib_cmd='unknown' - -case $host_os in -cygwin* | mingw* | pw32* | cegcc*) - # two different shell functions defined in ltmain.sh; - # decide which one to use based on capabilities of $DLLTOOL - case `$DLLTOOL --help 2>&1` in - *--identify-strict*) - lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib - ;; - *) - lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback - ;; - esac - ;; -*) - # fallback: assume linklib IS sharedlib - lt_cv_sharedlib_from_linklib_cmd=$ECHO - ;; -esac - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sharedlib_from_linklib_cmd" >&5 -$as_echo "$lt_cv_sharedlib_from_linklib_cmd" >&6; } -sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd -test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO - - - - - - - - -if test -n "$ac_tool_prefix"; then - for ac_prog in ar - do - # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. -set dummy $ac_tool_prefix$ac_prog; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_AR+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$AR"; then - ac_cv_prog_AR="$AR" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_AR="$ac_tool_prefix$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -AR=$ac_cv_prog_AR -if test -n "$AR"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AR" >&5 -$as_echo "$AR" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - test -n "$AR" && break - done -fi -if test -z "$AR"; then - ac_ct_AR=$AR - for ac_prog in ar -do - # Extract the first word of "$ac_prog", so it can be a program name with args. -set dummy $ac_prog; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_ac_ct_AR+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_AR"; then - ac_cv_prog_ac_ct_AR="$ac_ct_AR" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_ac_ct_AR="$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -ac_ct_AR=$ac_cv_prog_ac_ct_AR -if test -n "$ac_ct_AR"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_AR" >&5 -$as_echo "$ac_ct_AR" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - test -n "$ac_ct_AR" && break -done - - if test "x$ac_ct_AR" = x; then - AR="false" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - AR=$ac_ct_AR - fi -fi - -: ${AR=ar} -: ${AR_FLAGS=cr} - - - - - - - - - - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for archiver @FILE support" >&5 -$as_echo_n "checking for archiver @FILE support... " >&6; } -if ${lt_cv_ar_at_file+:} false; then : - $as_echo_n "(cached) " >&6 -else - lt_cv_ar_at_file=no - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - echo conftest.$ac_objext > conftest.lst - lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&5' - { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 - (eval $lt_ar_try) 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } - if test 0 -eq "$ac_status"; then - # Ensure the archiver fails upon bogus file names. - rm -f conftest.$ac_objext libconftest.a - { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 - (eval $lt_ar_try) 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } - if test 0 -ne "$ac_status"; then - lt_cv_ar_at_file=@ - fi - fi - rm -f conftest.* libconftest.a - -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ar_at_file" >&5 -$as_echo "$lt_cv_ar_at_file" >&6; } - -if test no = "$lt_cv_ar_at_file"; then - archiver_list_spec= -else - archiver_list_spec=$lt_cv_ar_at_file -fi - - - - - - - -if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args. -set dummy ${ac_tool_prefix}strip; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_STRIP+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$STRIP"; then - ac_cv_prog_STRIP="$STRIP" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_STRIP="${ac_tool_prefix}strip" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -STRIP=$ac_cv_prog_STRIP -if test -n "$STRIP"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $STRIP" >&5 -$as_echo "$STRIP" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - -fi -if test -z "$ac_cv_prog_STRIP"; then - ac_ct_STRIP=$STRIP - # Extract the first word of "strip", so it can be a program name with args. -set dummy strip; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_ac_ct_STRIP+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_STRIP"; then - ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_ac_ct_STRIP="strip" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP -if test -n "$ac_ct_STRIP"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_STRIP" >&5 -$as_echo "$ac_ct_STRIP" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - if test "x$ac_ct_STRIP" = x; then - STRIP=":" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - STRIP=$ac_ct_STRIP - fi -else - STRIP="$ac_cv_prog_STRIP" -fi - -test -z "$STRIP" && STRIP=: - - - - - - -if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}ranlib", so it can be a program name with args. -set dummy ${ac_tool_prefix}ranlib; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_RANLIB+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$RANLIB"; then - ac_cv_prog_RANLIB="$RANLIB" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -RANLIB=$ac_cv_prog_RANLIB -if test -n "$RANLIB"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $RANLIB" >&5 -$as_echo "$RANLIB" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - -fi -if test -z "$ac_cv_prog_RANLIB"; then - ac_ct_RANLIB=$RANLIB - # Extract the first word of "ranlib", so it can be a program name with args. -set dummy ranlib; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_ac_ct_RANLIB+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_RANLIB"; then - ac_cv_prog_ac_ct_RANLIB="$ac_ct_RANLIB" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_ac_ct_RANLIB="ranlib" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -ac_ct_RANLIB=$ac_cv_prog_ac_ct_RANLIB -if test -n "$ac_ct_RANLIB"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_RANLIB" >&5 -$as_echo "$ac_ct_RANLIB" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - if test "x$ac_ct_RANLIB" = x; then - RANLIB=":" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - RANLIB=$ac_ct_RANLIB - fi -else - RANLIB="$ac_cv_prog_RANLIB" -fi - -test -z "$RANLIB" && RANLIB=: - - - - - - -# Determine commands to create old-style static archives. -old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs' -old_postinstall_cmds='chmod 644 $oldlib' -old_postuninstall_cmds= - -if test -n "$RANLIB"; then - case $host_os in - bitrig* | openbsd*) - old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$tool_oldlib" - ;; - *) - old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$tool_oldlib" - ;; - esac - old_archive_cmds="$old_archive_cmds~\$RANLIB \$tool_oldlib" -fi - -case $host_os in - darwin*) - lock_old_archive_extraction=yes ;; - *) - lock_old_archive_extraction=no ;; -esac - - - - - - - - - - - - - - - - - - - - - -for ac_prog in gawk mawk nawk awk -do - # Extract the first word of "$ac_prog", so it can be a program name with args. -set dummy $ac_prog; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_AWK+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$AWK"; then - ac_cv_prog_AWK="$AWK" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_AWK="$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -AWK=$ac_cv_prog_AWK -if test -n "$AWK"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AWK" >&5 -$as_echo "$AWK" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - test -n "$AWK" && break -done - - - - - - - - - - - - - - - - - - - -# If no C compiler was specified, use CC. -LTCC=${LTCC-"$CC"} - -# If no C compiler flags were specified, use CFLAGS. -LTCFLAGS=${LTCFLAGS-"$CFLAGS"} - -# Allow CC to be a program name with arguments. -compiler=$CC - - -# Check for command to grab the raw symbol name followed by C symbol from nm. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking command to parse $NM output from $compiler object" >&5 -$as_echo_n "checking command to parse $NM output from $compiler object... " >&6; } -if ${lt_cv_sys_global_symbol_pipe+:} false; then : - $as_echo_n "(cached) " >&6 -else - -# These are sane defaults that work on at least a few old systems. -# [They come from Ultrix. What could be older than Ultrix?!! ;)] - -# Character class describing NM global symbol codes. -symcode='[BCDEGRST]' - -# Regexp to match symbols that can be accessed directly from C. -sympat='\([_A-Za-z][_A-Za-z0-9]*\)' - -# Define system-specific variables. -case $host_os in -aix*) - symcode='[BCDT]' - ;; -cygwin* | mingw* | pw32* | cegcc*) - symcode='[ABCDGISTW]' - ;; -hpux*) - if test ia64 = "$host_cpu"; then - symcode='[ABCDEGRST]' - fi - ;; -irix* | nonstopux*) - symcode='[BCDEGRST]' - ;; -osf*) - symcode='[BCDEGQRST]' - ;; -solaris*) - symcode='[BDRT]' - ;; -sco3.2v5*) - symcode='[DT]' - ;; -sysv4.2uw2*) - symcode='[DT]' - ;; -sysv5* | sco5v6* | unixware* | OpenUNIX*) - symcode='[ABDT]' - ;; -sysv4) - symcode='[DFNSTU]' - ;; -esac - -# If we're using GNU nm, then use its standard symbol codes. -case `$NM -V 2>&1` in -*GNU* | *'with BFD'*) - symcode='[ABCDGIRSTW]' ;; -esac - -if test "$lt_cv_nm_interface" = "MS dumpbin"; then - # Gets list of data symbols to import. - lt_cv_sys_global_symbol_to_import="sed -n -e 's/^I .* \(.*\)$/\1/p'" - # Adjust the below global symbol transforms to fixup imported variables. - lt_cdecl_hook=" -e 's/^I .* \(.*\)$/extern __declspec(dllimport) char \1;/p'" - lt_c_name_hook=" -e 's/^I .* \(.*\)$/ {\"\1\", (void *) 0},/p'" - lt_c_name_lib_hook="\ - -e 's/^I .* \(lib.*\)$/ {\"\1\", (void *) 0},/p'\ - -e 's/^I .* \(.*\)$/ {\"lib\1\", (void *) 0},/p'" -else - # Disable hooks by default. - lt_cv_sys_global_symbol_to_import= - lt_cdecl_hook= - lt_c_name_hook= - lt_c_name_lib_hook= -fi - -# Transform an extracted symbol line into a proper C declaration. -# Some systems (esp. on ia64) link data and code symbols differently, -# so use this general approach. -lt_cv_sys_global_symbol_to_cdecl="sed -n"\ -$lt_cdecl_hook\ -" -e 's/^T .* \(.*\)$/extern int \1();/p'"\ -" -e 's/^$symcode$symcode* .* \(.*\)$/extern char \1;/p'" - -# Transform an extracted symbol line into symbol name and symbol address -lt_cv_sys_global_symbol_to_c_name_address="sed -n"\ -$lt_c_name_hook\ -" -e 's/^: \(.*\) .*$/ {\"\1\", (void *) 0},/p'"\ -" -e 's/^$symcode$symcode* .* \(.*\)$/ {\"\1\", (void *) \&\1},/p'" - -# Transform an extracted symbol line into symbol name with lib prefix and -# symbol address. -lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n"\ -$lt_c_name_lib_hook\ -" -e 's/^: \(.*\) .*$/ {\"\1\", (void *) 0},/p'"\ -" -e 's/^$symcode$symcode* .* \(lib.*\)$/ {\"\1\", (void *) \&\1},/p'"\ -" -e 's/^$symcode$symcode* .* \(.*\)$/ {\"lib\1\", (void *) \&\1},/p'" - -# Handle CRLF in mingw tool chain -opt_cr= -case $build_os in -mingw*) - opt_cr=`$ECHO 'x\{0,1\}' | tr x '\015'` # option cr in regexp - ;; -esac - -# Try without a prefix underscore, then with it. -for ac_symprfx in "" "_"; do - - # Transform symcode, sympat, and symprfx into a raw symbol and a C symbol. - symxfrm="\\1 $ac_symprfx\\2 \\2" - - # Write the raw and C identifiers. - if test "$lt_cv_nm_interface" = "MS dumpbin"; then - # Fake it for dumpbin and say T for any non-static function, - # D for any global variable and I for any imported variable. - # Also find C++ and __fastcall symbols from MSVC++, - # which start with @ or ?. - lt_cv_sys_global_symbol_pipe="$AWK '"\ -" {last_section=section; section=\$ 3};"\ -" /^COFF SYMBOL TABLE/{for(i in hide) delete hide[i]};"\ -" /Section length .*#relocs.*(pick any)/{hide[last_section]=1};"\ -" /^ *Symbol name *: /{split(\$ 0,sn,\":\"); si=substr(sn[2],2)};"\ -" /^ *Type *: code/{print \"T\",si,substr(si,length(prfx))};"\ -" /^ *Type *: data/{print \"I\",si,substr(si,length(prfx))};"\ -" \$ 0!~/External *\|/{next};"\ -" / 0+ UNDEF /{next}; / UNDEF \([^|]\)*()/{next};"\ -" {if(hide[section]) next};"\ -" {f=\"D\"}; \$ 0~/\(\).*\|/{f=\"T\"};"\ -" {split(\$ 0,a,/\||\r/); split(a[2],s)};"\ -" s[1]~/^[@?]/{print f,s[1],s[1]; next};"\ -" s[1]~prfx {split(s[1],t,\"@\"); print f,t[1],substr(t[1],length(prfx))}"\ -" ' prfx=^$ac_symprfx" - else - lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[ ]\($symcode$symcode*\)[ ][ ]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" - fi - lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | sed '/ __gnu_lto/d'" - - # Check to see that the pipe works correctly. - pipe_works=no - - rm -f conftest* - cat > conftest.$ac_ext <<_LT_EOF -#ifdef __cplusplus -extern "C" { -#endif -char nm_test_var; -void nm_test_func(void); -void nm_test_func(void){} -#ifdef __cplusplus -} -#endif -int main(){nm_test_var='a';nm_test_func();return(0);} -_LT_EOF - - if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 - (eval $ac_compile) 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then - # Now try to grab the symbols. - nlist=conftest.nm - $ECHO "$as_me:$LINENO: $NM conftest.$ac_objext | $lt_cv_sys_global_symbol_pipe > $nlist" >&5 - if eval "$NM" conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist 2>&5 && test -s "$nlist"; then - # Try sorting and uniquifying the output. - if sort "$nlist" | uniq > "$nlist"T; then - mv -f "$nlist"T "$nlist" - else - rm -f "$nlist"T - fi - - # Make sure that we snagged all the symbols we need. - if $GREP ' nm_test_var$' "$nlist" >/dev/null; then - if $GREP ' nm_test_func$' "$nlist" >/dev/null; then - cat <<_LT_EOF > conftest.$ac_ext -/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ -#if defined _WIN32 || defined __CYGWIN__ || defined _WIN32_WCE -/* DATA imports from DLLs on WIN32 can't be const, because runtime - relocations are performed -- see ld's documentation on pseudo-relocs. */ -# define LT_DLSYM_CONST -#elif defined __osf__ -/* This system does not cope well with relocations in const data. */ -# define LT_DLSYM_CONST -#else -# define LT_DLSYM_CONST const -#endif - -#ifdef __cplusplus -extern "C" { -#endif - -_LT_EOF - # Now generate the symbol file. - eval "$lt_cv_sys_global_symbol_to_cdecl"' < "$nlist" | $GREP -v main >> conftest.$ac_ext' - - cat <<_LT_EOF >> conftest.$ac_ext - -/* The mapping between symbol names and symbols. */ -LT_DLSYM_CONST struct { - const char *name; - void *address; -} -lt__PROGRAM__LTX_preloaded_symbols[] = -{ - { "@PROGRAM@", (void *) 0 }, -_LT_EOF - $SED "s/^$symcode$symcode* .* \(.*\)$/ {\"\1\", (void *) \&\1},/" < "$nlist" | $GREP -v main >> conftest.$ac_ext - cat <<\_LT_EOF >> conftest.$ac_ext - {0, (void *) 0} -}; - -/* This works around a problem in FreeBSD linker */ -#ifdef FREEBSD_WORKAROUND -static const void *lt_preloaded_setup() { - return lt__PROGRAM__LTX_preloaded_symbols; -} -#endif - -#ifdef __cplusplus -} -#endif -_LT_EOF - # Now try linking the two files. - mv conftest.$ac_objext conftstm.$ac_objext - lt_globsym_save_LIBS=$LIBS - lt_globsym_save_CFLAGS=$CFLAGS - LIBS=conftstm.$ac_objext - CFLAGS="$CFLAGS$lt_prog_compiler_no_builtin_flag" - if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 - (eval $ac_link) 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } && test -s conftest$ac_exeext; then - pipe_works=yes - fi - LIBS=$lt_globsym_save_LIBS - CFLAGS=$lt_globsym_save_CFLAGS - else - echo "cannot find nm_test_func in $nlist" >&5 - fi - else - echo "cannot find nm_test_var in $nlist" >&5 - fi - else - echo "cannot run $lt_cv_sys_global_symbol_pipe" >&5 - fi - else - echo "$progname: failed program was:" >&5 - cat conftest.$ac_ext >&5 - fi - rm -rf conftest* conftst* - - # Do not use the global_symbol_pipe unless it works. - if test yes = "$pipe_works"; then - break - else - lt_cv_sys_global_symbol_pipe= - fi -done - -fi - -if test -z "$lt_cv_sys_global_symbol_pipe"; then - lt_cv_sys_global_symbol_to_cdecl= -fi -if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: failed" >&5 -$as_echo "failed" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: ok" >&5 -$as_echo "ok" >&6; } -fi - -# Response file support. -if test "$lt_cv_nm_interface" = "MS dumpbin"; then - nm_file_list_spec='@' -elif $NM --help 2>/dev/null | grep '[@]FILE' >/dev/null; then - nm_file_list_spec='@' -fi - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for sysroot" >&5 -$as_echo_n "checking for sysroot... " >&6; } - -# Check whether --with-sysroot was given. -if test "${with_sysroot+set}" = set; then : - withval=$with_sysroot; -else - with_sysroot=no -fi - - -lt_sysroot= -case $with_sysroot in #( - yes) - if test yes = "$GCC"; then - lt_sysroot=`$CC --print-sysroot 2>/dev/null` - fi - ;; #( - /*) - lt_sysroot=`echo "$with_sysroot" | sed -e "$sed_quote_subst"` - ;; #( - no|'') - ;; #( - *) - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $with_sysroot" >&5 -$as_echo "$with_sysroot" >&6; } - as_fn_error $? "The sysroot must be an absolute path." "$LINENO" 5 - ;; -esac - - { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${lt_sysroot:-no}" >&5 -$as_echo "${lt_sysroot:-no}" >&6; } - - - - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a working dd" >&5 -$as_echo_n "checking for a working dd... " >&6; } -if ${ac_cv_path_lt_DD+:} false; then : - $as_echo_n "(cached) " >&6 -else - printf 0123456789abcdef0123456789abcdef >conftest.i -cat conftest.i conftest.i >conftest2.i -: ${lt_DD:=$DD} -if test -z "$lt_DD"; then - ac_path_lt_DD_found=false - # Loop through the user's path and test for each of PROGNAME-LIST - as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_prog in dd; do - for ac_exec_ext in '' $ac_executable_extensions; do - ac_path_lt_DD="$as_dir/$ac_prog$ac_exec_ext" - as_fn_executable_p "$ac_path_lt_DD" || continue -if "$ac_path_lt_DD" bs=32 count=1 conftest.out 2>/dev/null; then - cmp -s conftest.i conftest.out \ - && ac_cv_path_lt_DD="$ac_path_lt_DD" ac_path_lt_DD_found=: -fi - $ac_path_lt_DD_found && break 3 - done - done - done -IFS=$as_save_IFS - if test -z "$ac_cv_path_lt_DD"; then - : - fi -else - ac_cv_path_lt_DD=$lt_DD -fi - -rm -f conftest.i conftest2.i conftest.out -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_lt_DD" >&5 -$as_echo "$ac_cv_path_lt_DD" >&6; } - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to truncate binary pipes" >&5 -$as_echo_n "checking how to truncate binary pipes... " >&6; } -if ${lt_cv_truncate_bin+:} false; then : - $as_echo_n "(cached) " >&6 -else - printf 0123456789abcdef0123456789abcdef >conftest.i -cat conftest.i conftest.i >conftest2.i -lt_cv_truncate_bin= -if "$ac_cv_path_lt_DD" bs=32 count=1 conftest.out 2>/dev/null; then - cmp -s conftest.i conftest.out \ - && lt_cv_truncate_bin="$ac_cv_path_lt_DD bs=4096 count=1" -fi -rm -f conftest.i conftest2.i conftest.out -test -z "$lt_cv_truncate_bin" && lt_cv_truncate_bin="$SED -e 4q" -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_truncate_bin" >&5 -$as_echo "$lt_cv_truncate_bin" >&6; } - - - - - - - -# Calculate cc_basename. Skip known compiler wrappers and cross-prefix. -func_cc_basename () -{ - for cc_temp in $*""; do - case $cc_temp in - compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; - distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; - \-*) ;; - *) break;; - esac - done - func_cc_basename_result=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"` -} - -# Check whether --enable-libtool-lock was given. -if test "${enable_libtool_lock+set}" = set; then : - enableval=$enable_libtool_lock; -fi - -test no = "$enable_libtool_lock" || enable_libtool_lock=yes - -# Some flags need to be propagated to the compiler or linker for good -# libtool support. -case $host in -ia64-*-hpux*) - # Find out what ABI is being produced by ac_compile, and set mode - # options accordingly. - echo 'int i;' > conftest.$ac_ext - if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 - (eval $ac_compile) 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then - case `/usr/bin/file conftest.$ac_objext` in - *ELF-32*) - HPUX_IA64_MODE=32 - ;; - *ELF-64*) - HPUX_IA64_MODE=64 - ;; - esac - fi - rm -rf conftest* - ;; -*-*-irix6*) - # Find out what ABI is being produced by ac_compile, and set linker - # options accordingly. - echo '#line '$LINENO' "configure"' > conftest.$ac_ext - if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 - (eval $ac_compile) 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then - if test yes = "$lt_cv_prog_gnu_ld"; then - case `/usr/bin/file conftest.$ac_objext` in - *32-bit*) - LD="${LD-ld} -melf32bsmip" - ;; - *N32*) - LD="${LD-ld} -melf32bmipn32" - ;; - *64-bit*) - LD="${LD-ld} -melf64bmip" - ;; - esac - else - case `/usr/bin/file conftest.$ac_objext` in - *32-bit*) - LD="${LD-ld} -32" - ;; - *N32*) - LD="${LD-ld} -n32" - ;; - *64-bit*) - LD="${LD-ld} -64" - ;; - esac - fi - fi - rm -rf conftest* - ;; - -mips64*-*linux*) - # Find out what ABI is being produced by ac_compile, and set linker - # options accordingly. - echo '#line '$LINENO' "configure"' > conftest.$ac_ext - if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 - (eval $ac_compile) 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then - emul=elf - case `/usr/bin/file conftest.$ac_objext` in - *32-bit*) - emul="${emul}32" - ;; - *64-bit*) - emul="${emul}64" - ;; - esac - case `/usr/bin/file conftest.$ac_objext` in - *MSB*) - emul="${emul}btsmip" - ;; - *LSB*) - emul="${emul}ltsmip" - ;; - esac - case `/usr/bin/file conftest.$ac_objext` in - *N32*) - emul="${emul}n32" - ;; - esac - LD="${LD-ld} -m $emul" - fi - rm -rf conftest* - ;; - -x86_64-*kfreebsd*-gnu|x86_64-*linux*|powerpc*-*linux*| \ -s390*-*linux*|s390*-*tpf*|sparc*-*linux*) - # Find out what ABI is being produced by ac_compile, and set linker - # options accordingly. Note that the listed cases only cover the - # situations where additional linker options are needed (such as when - # doing 32-bit compilation for a host where ld defaults to 64-bit, or - # vice versa); the common cases where no linker options are needed do - # not appear in the list. - echo 'int i;' > conftest.$ac_ext - if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 - (eval $ac_compile) 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then - case `/usr/bin/file conftest.o` in - *32-bit*) - case $host in - x86_64-*kfreebsd*-gnu) - LD="${LD-ld} -m elf_i386_fbsd" - ;; - x86_64-*linux*) - case `/usr/bin/file conftest.o` in - *x86-64*) - LD="${LD-ld} -m elf32_x86_64" - ;; - *) - LD="${LD-ld} -m elf_i386" - ;; - esac - ;; - powerpc64le-*linux*) - LD="${LD-ld} -m elf32lppclinux" - ;; - powerpc64-*linux*) - LD="${LD-ld} -m elf32ppclinux" - ;; - s390x-*linux*) - LD="${LD-ld} -m elf_s390" - ;; - sparc64-*linux*) - LD="${LD-ld} -m elf32_sparc" - ;; - esac - ;; - *64-bit*) - case $host in - x86_64-*kfreebsd*-gnu) - LD="${LD-ld} -m elf_x86_64_fbsd" - ;; - x86_64-*linux*) - LD="${LD-ld} -m elf_x86_64" - ;; - powerpcle-*linux*) - LD="${LD-ld} -m elf64lppc" - ;; - powerpc-*linux*) - LD="${LD-ld} -m elf64ppc" - ;; - s390*-*linux*|s390*-*tpf*) - LD="${LD-ld} -m elf64_s390" - ;; - sparc*-*linux*) - LD="${LD-ld} -m elf64_sparc" - ;; - esac - ;; - esac - fi - rm -rf conftest* - ;; - -*-*-sco3.2v5*) - # On SCO OpenServer 5, we need -belf to get full-featured binaries. - SAVE_CFLAGS=$CFLAGS - CFLAGS="$CFLAGS -belf" - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler needs -belf" >&5 -$as_echo_n "checking whether the C compiler needs -belf... " >&6; } -if ${lt_cv_cc_needs_belf+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - lt_cv_cc_needs_belf=yes -else - lt_cv_cc_needs_belf=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext - ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_cc_needs_belf" >&5 -$as_echo "$lt_cv_cc_needs_belf" >&6; } - if test yes != "$lt_cv_cc_needs_belf"; then - # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf - CFLAGS=$SAVE_CFLAGS - fi - ;; -*-*solaris*) - # Find out what ABI is being produced by ac_compile, and set linker - # options accordingly. - echo 'int i;' > conftest.$ac_ext - if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 - (eval $ac_compile) 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then - case `/usr/bin/file conftest.o` in - *64-bit*) - case $lt_cv_prog_gnu_ld in - yes*) - case $host in - i?86-*-solaris*|x86_64-*-solaris*) - LD="${LD-ld} -m elf_x86_64" - ;; - sparc*-*-solaris*) - LD="${LD-ld} -m elf64_sparc" - ;; - esac - # GNU ld 2.21 introduced _sol2 emulations. Use them if available. - if ${LD-ld} -V | grep _sol2 >/dev/null 2>&1; then - LD=${LD-ld}_sol2 - fi - ;; - *) - if ${LD-ld} -64 -r -o conftest2.o conftest.o >/dev/null 2>&1; then - LD="${LD-ld} -64" - fi - ;; - esac - ;; - esac - fi - rm -rf conftest* - ;; -esac - -need_locks=$enable_libtool_lock - -if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}mt", so it can be a program name with args. -set dummy ${ac_tool_prefix}mt; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_MANIFEST_TOOL+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$MANIFEST_TOOL"; then - ac_cv_prog_MANIFEST_TOOL="$MANIFEST_TOOL" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_MANIFEST_TOOL="${ac_tool_prefix}mt" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -MANIFEST_TOOL=$ac_cv_prog_MANIFEST_TOOL -if test -n "$MANIFEST_TOOL"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MANIFEST_TOOL" >&5 -$as_echo "$MANIFEST_TOOL" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - -fi -if test -z "$ac_cv_prog_MANIFEST_TOOL"; then - ac_ct_MANIFEST_TOOL=$MANIFEST_TOOL - # Extract the first word of "mt", so it can be a program name with args. -set dummy mt; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_ac_ct_MANIFEST_TOOL+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_MANIFEST_TOOL"; then - ac_cv_prog_ac_ct_MANIFEST_TOOL="$ac_ct_MANIFEST_TOOL" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_ac_ct_MANIFEST_TOOL="mt" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -ac_ct_MANIFEST_TOOL=$ac_cv_prog_ac_ct_MANIFEST_TOOL -if test -n "$ac_ct_MANIFEST_TOOL"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_MANIFEST_TOOL" >&5 -$as_echo "$ac_ct_MANIFEST_TOOL" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - if test "x$ac_ct_MANIFEST_TOOL" = x; then - MANIFEST_TOOL=":" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - MANIFEST_TOOL=$ac_ct_MANIFEST_TOOL - fi -else - MANIFEST_TOOL="$ac_cv_prog_MANIFEST_TOOL" -fi - -test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $MANIFEST_TOOL is a manifest tool" >&5 -$as_echo_n "checking if $MANIFEST_TOOL is a manifest tool... " >&6; } -if ${lt_cv_path_mainfest_tool+:} false; then : - $as_echo_n "(cached) " >&6 -else - lt_cv_path_mainfest_tool=no - echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&5 - $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out - cat conftest.err >&5 - if $GREP 'Manifest Tool' conftest.out > /dev/null; then - lt_cv_path_mainfest_tool=yes - fi - rm -f conftest* -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_mainfest_tool" >&5 -$as_echo "$lt_cv_path_mainfest_tool" >&6; } -if test yes != "$lt_cv_path_mainfest_tool"; then - MANIFEST_TOOL=: -fi - - - - - - - case $host_os in - rhapsody* | darwin*) - if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}dsymutil", so it can be a program name with args. -set dummy ${ac_tool_prefix}dsymutil; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_DSYMUTIL+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$DSYMUTIL"; then - ac_cv_prog_DSYMUTIL="$DSYMUTIL" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_DSYMUTIL="${ac_tool_prefix}dsymutil" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -DSYMUTIL=$ac_cv_prog_DSYMUTIL -if test -n "$DSYMUTIL"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DSYMUTIL" >&5 -$as_echo "$DSYMUTIL" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - -fi -if test -z "$ac_cv_prog_DSYMUTIL"; then - ac_ct_DSYMUTIL=$DSYMUTIL - # Extract the first word of "dsymutil", so it can be a program name with args. -set dummy dsymutil; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_ac_ct_DSYMUTIL+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_DSYMUTIL"; then - ac_cv_prog_ac_ct_DSYMUTIL="$ac_ct_DSYMUTIL" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_ac_ct_DSYMUTIL="dsymutil" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -ac_ct_DSYMUTIL=$ac_cv_prog_ac_ct_DSYMUTIL -if test -n "$ac_ct_DSYMUTIL"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DSYMUTIL" >&5 -$as_echo "$ac_ct_DSYMUTIL" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - if test "x$ac_ct_DSYMUTIL" = x; then - DSYMUTIL=":" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - DSYMUTIL=$ac_ct_DSYMUTIL - fi -else - DSYMUTIL="$ac_cv_prog_DSYMUTIL" -fi - - if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}nmedit", so it can be a program name with args. -set dummy ${ac_tool_prefix}nmedit; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_NMEDIT+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$NMEDIT"; then - ac_cv_prog_NMEDIT="$NMEDIT" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_NMEDIT="${ac_tool_prefix}nmedit" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -NMEDIT=$ac_cv_prog_NMEDIT -if test -n "$NMEDIT"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $NMEDIT" >&5 -$as_echo "$NMEDIT" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - -fi -if test -z "$ac_cv_prog_NMEDIT"; then - ac_ct_NMEDIT=$NMEDIT - # Extract the first word of "nmedit", so it can be a program name with args. -set dummy nmedit; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_ac_ct_NMEDIT+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_NMEDIT"; then - ac_cv_prog_ac_ct_NMEDIT="$ac_ct_NMEDIT" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_ac_ct_NMEDIT="nmedit" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -ac_ct_NMEDIT=$ac_cv_prog_ac_ct_NMEDIT -if test -n "$ac_ct_NMEDIT"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_NMEDIT" >&5 -$as_echo "$ac_ct_NMEDIT" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - if test "x$ac_ct_NMEDIT" = x; then - NMEDIT=":" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - NMEDIT=$ac_ct_NMEDIT - fi -else - NMEDIT="$ac_cv_prog_NMEDIT" -fi - - if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}lipo", so it can be a program name with args. -set dummy ${ac_tool_prefix}lipo; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_LIPO+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$LIPO"; then - ac_cv_prog_LIPO="$LIPO" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_LIPO="${ac_tool_prefix}lipo" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -LIPO=$ac_cv_prog_LIPO -if test -n "$LIPO"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LIPO" >&5 -$as_echo "$LIPO" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - -fi -if test -z "$ac_cv_prog_LIPO"; then - ac_ct_LIPO=$LIPO - # Extract the first word of "lipo", so it can be a program name with args. -set dummy lipo; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_ac_ct_LIPO+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_LIPO"; then - ac_cv_prog_ac_ct_LIPO="$ac_ct_LIPO" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_ac_ct_LIPO="lipo" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -ac_ct_LIPO=$ac_cv_prog_ac_ct_LIPO -if test -n "$ac_ct_LIPO"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_LIPO" >&5 -$as_echo "$ac_ct_LIPO" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - if test "x$ac_ct_LIPO" = x; then - LIPO=":" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - LIPO=$ac_ct_LIPO - fi -else - LIPO="$ac_cv_prog_LIPO" -fi - - if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}otool", so it can be a program name with args. -set dummy ${ac_tool_prefix}otool; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_OTOOL+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$OTOOL"; then - ac_cv_prog_OTOOL="$OTOOL" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_OTOOL="${ac_tool_prefix}otool" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -OTOOL=$ac_cv_prog_OTOOL -if test -n "$OTOOL"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OTOOL" >&5 -$as_echo "$OTOOL" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - -fi -if test -z "$ac_cv_prog_OTOOL"; then - ac_ct_OTOOL=$OTOOL - # Extract the first word of "otool", so it can be a program name with args. -set dummy otool; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_ac_ct_OTOOL+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_OTOOL"; then - ac_cv_prog_ac_ct_OTOOL="$ac_ct_OTOOL" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_ac_ct_OTOOL="otool" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -ac_ct_OTOOL=$ac_cv_prog_ac_ct_OTOOL -if test -n "$ac_ct_OTOOL"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OTOOL" >&5 -$as_echo "$ac_ct_OTOOL" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - if test "x$ac_ct_OTOOL" = x; then - OTOOL=":" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - OTOOL=$ac_ct_OTOOL - fi -else - OTOOL="$ac_cv_prog_OTOOL" -fi - - if test -n "$ac_tool_prefix"; then - # Extract the first word of "${ac_tool_prefix}otool64", so it can be a program name with args. -set dummy ${ac_tool_prefix}otool64; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_OTOOL64+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$OTOOL64"; then - ac_cv_prog_OTOOL64="$OTOOL64" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_OTOOL64="${ac_tool_prefix}otool64" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -OTOOL64=$ac_cv_prog_OTOOL64 -if test -n "$OTOOL64"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OTOOL64" >&5 -$as_echo "$OTOOL64" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - -fi -if test -z "$ac_cv_prog_OTOOL64"; then - ac_ct_OTOOL64=$OTOOL64 - # Extract the first word of "otool64", so it can be a program name with args. -set dummy otool64; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_ac_ct_OTOOL64+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ac_ct_OTOOL64"; then - ac_cv_prog_ac_ct_OTOOL64="$ac_ct_OTOOL64" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_ac_ct_OTOOL64="otool64" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -ac_ct_OTOOL64=$ac_cv_prog_ac_ct_OTOOL64 -if test -n "$ac_ct_OTOOL64"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OTOOL64" >&5 -$as_echo "$ac_ct_OTOOL64" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - if test "x$ac_ct_OTOOL64" = x; then - OTOOL64=":" - else - case $cross_compiling:$ac_tool_warned in -yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 -$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} -ac_tool_warned=yes ;; -esac - OTOOL64=$ac_ct_OTOOL64 - fi -else - OTOOL64="$ac_cv_prog_OTOOL64" -fi - - - - - - - - - - - - - - - - - - - - - - - - - - - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -single_module linker flag" >&5 -$as_echo_n "checking for -single_module linker flag... " >&6; } -if ${lt_cv_apple_cc_single_mod+:} false; then : - $as_echo_n "(cached) " >&6 -else - lt_cv_apple_cc_single_mod=no - if test -z "$LT_MULTI_MODULE"; then - # By default we will add the -single_module flag. You can override - # by either setting the environment variable LT_MULTI_MODULE - # non-empty at configure time, or by adding -multi_module to the - # link flags. - rm -rf libconftest.dylib* - echo "int foo(void){return 1;}" > conftest.c - echo "$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ --dynamiclib -Wl,-single_module conftest.c" >&5 - $LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ - -dynamiclib -Wl,-single_module conftest.c 2>conftest.err - _lt_result=$? - # If there is a non-empty error log, and "single_module" - # appears in it, assume the flag caused a linker warning - if test -s conftest.err && $GREP single_module conftest.err; then - cat conftest.err >&5 - # Otherwise, if the output was created with a 0 exit code from - # the compiler, it worked. - elif test -f libconftest.dylib && test 0 = "$_lt_result"; then - lt_cv_apple_cc_single_mod=yes - else - cat conftest.err >&5 - fi - rm -rf libconftest.dylib* - rm -f conftest.* - fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_apple_cc_single_mod" >&5 -$as_echo "$lt_cv_apple_cc_single_mod" >&6; } - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -exported_symbols_list linker flag" >&5 -$as_echo_n "checking for -exported_symbols_list linker flag... " >&6; } -if ${lt_cv_ld_exported_symbols_list+:} false; then : - $as_echo_n "(cached) " >&6 -else - lt_cv_ld_exported_symbols_list=no - save_LDFLAGS=$LDFLAGS - echo "_main" > conftest.sym - LDFLAGS="$LDFLAGS -Wl,-exported_symbols_list,conftest.sym" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - lt_cv_ld_exported_symbols_list=yes -else - lt_cv_ld_exported_symbols_list=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext - LDFLAGS=$save_LDFLAGS - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_exported_symbols_list" >&5 -$as_echo "$lt_cv_ld_exported_symbols_list" >&6; } - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -force_load linker flag" >&5 -$as_echo_n "checking for -force_load linker flag... " >&6; } -if ${lt_cv_ld_force_load+:} false; then : - $as_echo_n "(cached) " >&6 -else - lt_cv_ld_force_load=no - cat > conftest.c << _LT_EOF -int forced_loaded() { return 2;} -_LT_EOF - echo "$LTCC $LTCFLAGS -c -o conftest.o conftest.c" >&5 - $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&5 - echo "$AR cr libconftest.a conftest.o" >&5 - $AR cr libconftest.a conftest.o 2>&5 - echo "$RANLIB libconftest.a" >&5 - $RANLIB libconftest.a 2>&5 - cat > conftest.c << _LT_EOF -int main() { return 0;} -_LT_EOF - echo "$LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a" >&5 - $LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a 2>conftest.err - _lt_result=$? - if test -s conftest.err && $GREP force_load conftest.err; then - cat conftest.err >&5 - elif test -f conftest && test 0 = "$_lt_result" && $GREP forced_load conftest >/dev/null 2>&1; then - lt_cv_ld_force_load=yes - else - cat conftest.err >&5 - fi - rm -f conftest.err libconftest.a conftest conftest.c - rm -rf conftest.dSYM - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_force_load" >&5 -$as_echo "$lt_cv_ld_force_load" >&6; } - case $host_os in - rhapsody* | darwin1.[012]) - _lt_dar_allow_undefined='$wl-undefined ${wl}suppress' ;; - darwin1.*) - _lt_dar_allow_undefined='$wl-flat_namespace $wl-undefined ${wl}suppress' ;; - darwin*) # darwin 5.x on - # if running on 10.5 or later, the deployment target defaults - # to the OS version, if on x86, and 10.4, the deployment - # target defaults to 10.4. Don't you love it? - case ${MACOSX_DEPLOYMENT_TARGET-10.0},$host in - 10.0,*86*-darwin8*|10.0,*-darwin[91]*) - _lt_dar_allow_undefined='$wl-undefined ${wl}dynamic_lookup' ;; - 10.[012][,.]*) - _lt_dar_allow_undefined='$wl-flat_namespace $wl-undefined ${wl}suppress' ;; - 10.*) - _lt_dar_allow_undefined='$wl-undefined ${wl}dynamic_lookup' ;; - esac - ;; - esac - if test yes = "$lt_cv_apple_cc_single_mod"; then - _lt_dar_single_mod='$single_module' - fi - if test yes = "$lt_cv_ld_exported_symbols_list"; then - _lt_dar_export_syms=' $wl-exported_symbols_list,$output_objdir/$libname-symbols.expsym' - else - _lt_dar_export_syms='~$NMEDIT -s $output_objdir/$libname-symbols.expsym $lib' - fi - if test : != "$DSYMUTIL" && test no = "$lt_cv_ld_force_load"; then - _lt_dsymutil='~$DSYMUTIL $lib || :' - else - _lt_dsymutil= - fi - ;; - esac - -# func_munge_path_list VARIABLE PATH -# ----------------------------------- -# VARIABLE is name of variable containing _space_ separated list of -# directories to be munged by the contents of PATH, which is string -# having a format: -# "DIR[:DIR]:" -# string "DIR[ DIR]" will be prepended to VARIABLE -# ":DIR[:DIR]" -# string "DIR[ DIR]" will be appended to VARIABLE -# "DIRP[:DIRP]::[DIRA:]DIRA" -# string "DIRP[ DIRP]" will be prepended to VARIABLE and string -# "DIRA[ DIRA]" will be appended to VARIABLE -# "DIR[:DIR]" -# VARIABLE will be replaced by "DIR[ DIR]" -func_munge_path_list () -{ - case x$2 in - x) - ;; - *:) - eval $1=\"`$ECHO $2 | $SED 's/:/ /g'` \$$1\" - ;; - x:*) - eval $1=\"\$$1 `$ECHO $2 | $SED 's/:/ /g'`\" - ;; - *::*) - eval $1=\"\$$1\ `$ECHO $2 | $SED -e 's/.*:://' -e 's/:/ /g'`\" - eval $1=\"`$ECHO $2 | $SED -e 's/::.*//' -e 's/:/ /g'`\ \$$1\" - ;; - *) - eval $1=\"`$ECHO $2 | $SED 's/:/ /g'`\" - ;; - esac -} - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C preprocessor" >&5 -$as_echo_n "checking how to run the C preprocessor... " >&6; } -# On Suns, sometimes $CPP names a directory. -if test -n "$CPP" && test -d "$CPP"; then - CPP= -fi -if test -z "$CPP"; then - if ${ac_cv_prog_CPP+:} false; then : - $as_echo_n "(cached) " >&6 -else - # Double quotes because CPP needs to be expanded - for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp" - do - ac_preproc_ok=false -for ac_c_preproc_warn_flag in '' yes -do - # Use a header file that comes with gcc, so configuring glibc - # with a fresh cross-compiler works. - # Prefer to if __STDC__ is defined, since - # exists even on freestanding compilers. - # On the NeXT, cc -E runs the code through the compiler's parser, - # not just through cpp. "Syntax error" is here to catch this case. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#ifdef __STDC__ -# include -#else -# include -#endif - Syntax error -_ACEOF -if ac_fn_c_try_cpp "$LINENO"; then : - -else - # Broken: fails on valid input. -continue -fi -rm -f conftest.err conftest.i conftest.$ac_ext - - # OK, works on sane cases. Now check whether nonexistent headers - # can be detected and how. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -_ACEOF -if ac_fn_c_try_cpp "$LINENO"; then : - # Broken: success on invalid input. -continue -else - # Passes both tests. -ac_preproc_ok=: -break -fi -rm -f conftest.err conftest.i conftest.$ac_ext - -done -# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. -rm -f conftest.i conftest.err conftest.$ac_ext -if $ac_preproc_ok; then : - break -fi - - done - ac_cv_prog_CPP=$CPP - -fi - CPP=$ac_cv_prog_CPP -else - ac_cv_prog_CPP=$CPP -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $CPP" >&5 -$as_echo "$CPP" >&6; } -ac_preproc_ok=false -for ac_c_preproc_warn_flag in '' yes -do - # Use a header file that comes with gcc, so configuring glibc - # with a fresh cross-compiler works. - # Prefer to if __STDC__ is defined, since - # exists even on freestanding compilers. - # On the NeXT, cc -E runs the code through the compiler's parser, - # not just through cpp. "Syntax error" is here to catch this case. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#ifdef __STDC__ -# include -#else -# include -#endif - Syntax error -_ACEOF -if ac_fn_c_try_cpp "$LINENO"; then : - -else - # Broken: fails on valid input. -continue -fi -rm -f conftest.err conftest.i conftest.$ac_ext - - # OK, works on sane cases. Now check whether nonexistent headers - # can be detected and how. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -_ACEOF -if ac_fn_c_try_cpp "$LINENO"; then : - # Broken: success on invalid input. -continue -else - # Passes both tests. -ac_preproc_ok=: -break -fi -rm -f conftest.err conftest.i conftest.$ac_ext - -done -# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. -rm -f conftest.i conftest.err conftest.$ac_ext -if $ac_preproc_ok; then : - -else - { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error $? "C preprocessor \"$CPP\" fails sanity check -See \`config.log' for more details" "$LINENO" 5; } -fi - -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5 -$as_echo_n "checking for ANSI C header files... " >&6; } -if ${ac_cv_header_stdc+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -#include -#include -#include - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_header_stdc=yes -else - ac_cv_header_stdc=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - -if test $ac_cv_header_stdc = yes; then - # SunOS 4.x string.h does not declare mem*, contrary to ANSI. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include - -_ACEOF -if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | - $EGREP "memchr" >/dev/null 2>&1; then : - -else - ac_cv_header_stdc=no -fi -rm -f conftest* - -fi - -if test $ac_cv_header_stdc = yes; then - # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include - -_ACEOF -if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | - $EGREP "free" >/dev/null 2>&1; then : - -else - ac_cv_header_stdc=no -fi -rm -f conftest* - -fi - -if test $ac_cv_header_stdc = yes; then - # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. - if test "$cross_compiling" = yes; then : - : -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -#include -#if ((' ' & 0x0FF) == 0x020) -# define ISLOWER(c) ('a' <= (c) && (c) <= 'z') -# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) -#else -# define ISLOWER(c) \ - (('a' <= (c) && (c) <= 'i') \ - || ('j' <= (c) && (c) <= 'r') \ - || ('s' <= (c) && (c) <= 'z')) -# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) -#endif - -#define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) -int -main () -{ - int i; - for (i = 0; i < 256; i++) - if (XOR (islower (i), ISLOWER (i)) - || toupper (i) != TOUPPER (i)) - return 2; - return 0; -} -_ACEOF -if ac_fn_c_try_run "$LINENO"; then : - -else - ac_cv_header_stdc=no -fi -rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ - conftest.$ac_objext conftest.beam conftest.$ac_ext -fi - -fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5 -$as_echo "$ac_cv_header_stdc" >&6; } -if test $ac_cv_header_stdc = yes; then - -$as_echo "#define STDC_HEADERS 1" >>confdefs.h - -fi - -# On IRIX 5.3, sys/types and inttypes.h are conflicting. -for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \ - inttypes.h stdint.h unistd.h -do : - as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` -ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default -" -if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : - cat >>confdefs.h <<_ACEOF -#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 -_ACEOF - -fi - -done - - -for ac_header in dlfcn.h -do : - ac_fn_c_check_header_compile "$LINENO" "dlfcn.h" "ac_cv_header_dlfcn_h" "$ac_includes_default -" -if test "x$ac_cv_header_dlfcn_h" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_DLFCN_H 1 -_ACEOF - -fi - -done - - - - - -# Set options - - - - enable_dlopen=no - - - enable_win32_dll=no - - - # Check whether --enable-shared was given. -if test "${enable_shared+set}" = set; then : - enableval=$enable_shared; p=${PACKAGE-default} - case $enableval in - yes) enable_shared=yes ;; - no) enable_shared=no ;; - *) - enable_shared=no - # Look at the argument we got. We use all the common list separators. - lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, - for pkg in $enableval; do - IFS=$lt_save_ifs - if test "X$pkg" = "X$p"; then - enable_shared=yes - fi - done - IFS=$lt_save_ifs - ;; - esac -else - enable_shared=yes -fi - - - - - - - - - - # Check whether --enable-static was given. -if test "${enable_static+set}" = set; then : - enableval=$enable_static; p=${PACKAGE-default} - case $enableval in - yes) enable_static=yes ;; - no) enable_static=no ;; - *) - enable_static=no - # Look at the argument we got. We use all the common list separators. - lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, - for pkg in $enableval; do - IFS=$lt_save_ifs - if test "X$pkg" = "X$p"; then - enable_static=yes - fi - done - IFS=$lt_save_ifs - ;; - esac -else - enable_static=yes -fi - - - - - - - - - - -# Check whether --with-pic was given. -if test "${with_pic+set}" = set; then : - withval=$with_pic; lt_p=${PACKAGE-default} - case $withval in - yes|no) pic_mode=$withval ;; - *) - pic_mode=default - # Look at the argument we got. We use all the common list separators. - lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, - for lt_pkg in $withval; do - IFS=$lt_save_ifs - if test "X$lt_pkg" = "X$lt_p"; then - pic_mode=yes - fi - done - IFS=$lt_save_ifs - ;; - esac -else - pic_mode=default -fi - - - - - - - - - # Check whether --enable-fast-install was given. -if test "${enable_fast_install+set}" = set; then : - enableval=$enable_fast_install; p=${PACKAGE-default} - case $enableval in - yes) enable_fast_install=yes ;; - no) enable_fast_install=no ;; - *) - enable_fast_install=no - # Look at the argument we got. We use all the common list separators. - lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, - for pkg in $enableval; do - IFS=$lt_save_ifs - if test "X$pkg" = "X$p"; then - enable_fast_install=yes - fi - done - IFS=$lt_save_ifs - ;; - esac -else - enable_fast_install=yes -fi - - - - - - - - - shared_archive_member_spec= -case $host,$enable_shared in -power*-*-aix[5-9]*,yes) - { $as_echo "$as_me:${as_lineno-$LINENO}: checking which variant of shared library versioning to provide" >&5 -$as_echo_n "checking which variant of shared library versioning to provide... " >&6; } - -# Check whether --with-aix-soname was given. -if test "${with_aix_soname+set}" = set; then : - withval=$with_aix_soname; case $withval in - aix|svr4|both) - ;; - *) - as_fn_error $? "Unknown argument to --with-aix-soname" "$LINENO" 5 - ;; - esac - lt_cv_with_aix_soname=$with_aix_soname -else - if ${lt_cv_with_aix_soname+:} false; then : - $as_echo_n "(cached) " >&6 -else - lt_cv_with_aix_soname=aix -fi - - with_aix_soname=$lt_cv_with_aix_soname -fi - - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $with_aix_soname" >&5 -$as_echo "$with_aix_soname" >&6; } - if test aix != "$with_aix_soname"; then - # For the AIX way of multilib, we name the shared archive member - # based on the bitwidth used, traditionally 'shr.o' or 'shr_64.o', - # and 'shr.imp' or 'shr_64.imp', respectively, for the Import File. - # Even when GNU compilers ignore OBJECT_MODE but need '-maix64' flag, - # the AIX toolchain works better with OBJECT_MODE set (default 32). - if test 64 = "${OBJECT_MODE-32}"; then - shared_archive_member_spec=shr_64 - else - shared_archive_member_spec=shr - fi - fi - ;; -*) - with_aix_soname=aix - ;; -esac - - - - - - - - - - -# This can be used to rebuild libtool when needed -LIBTOOL_DEPS=$ltmain - -# Always use our own libtool. -LIBTOOL='$(SHELL) $(top_builddir)/libtool' - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -test -z "$LN_S" && LN_S="ln -s" - - - - - - - - - - - - - - -if test -n "${ZSH_VERSION+set}"; then - setopt NO_GLOB_SUBST -fi - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for objdir" >&5 -$as_echo_n "checking for objdir... " >&6; } -if ${lt_cv_objdir+:} false; then : - $as_echo_n "(cached) " >&6 -else - rm -f .libs 2>/dev/null -mkdir .libs 2>/dev/null -if test -d .libs; then - lt_cv_objdir=.libs -else - # MS-DOS does not allow filenames that begin with a dot. - lt_cv_objdir=_libs -fi -rmdir .libs 2>/dev/null -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_objdir" >&5 -$as_echo "$lt_cv_objdir" >&6; } -objdir=$lt_cv_objdir - - - - - -cat >>confdefs.h <<_ACEOF -#define LT_OBJDIR "$lt_cv_objdir/" -_ACEOF - - - - -case $host_os in -aix3*) - # AIX sometimes has problems with the GCC collect2 program. For some - # reason, if we set the COLLECT_NAMES environment variable, the problems - # vanish in a puff of smoke. - if test set != "${COLLECT_NAMES+set}"; then - COLLECT_NAMES= - export COLLECT_NAMES - fi - ;; -esac - -# Global variables: -ofile=libtool -can_build_shared=yes - -# All known linkers require a '.a' archive for static linking (except MSVC, -# which needs '.lib'). -libext=a - -with_gnu_ld=$lt_cv_prog_gnu_ld - -old_CC=$CC -old_CFLAGS=$CFLAGS - -# Set sane defaults for various variables -test -z "$CC" && CC=cc -test -z "$LTCC" && LTCC=$CC -test -z "$LTCFLAGS" && LTCFLAGS=$CFLAGS -test -z "$LD" && LD=ld -test -z "$ac_objext" && ac_objext=o - -func_cc_basename $compiler -cc_basename=$func_cc_basename_result - - -# Only perform the check for file, if the check method requires it -test -z "$MAGIC_CMD" && MAGIC_CMD=file -case $deplibs_check_method in -file_magic*) - if test "$file_magic_cmd" = '$MAGIC_CMD'; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ${ac_tool_prefix}file" >&5 -$as_echo_n "checking for ${ac_tool_prefix}file... " >&6; } -if ${lt_cv_path_MAGIC_CMD+:} false; then : - $as_echo_n "(cached) " >&6 -else - case $MAGIC_CMD in -[\\/*] | ?:[\\/]*) - lt_cv_path_MAGIC_CMD=$MAGIC_CMD # Let the user override the test with a path. - ;; -*) - lt_save_MAGIC_CMD=$MAGIC_CMD - lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR - ac_dummy="/usr/bin$PATH_SEPARATOR$PATH" - for ac_dir in $ac_dummy; do - IFS=$lt_save_ifs - test -z "$ac_dir" && ac_dir=. - if test -f "$ac_dir/${ac_tool_prefix}file"; then - lt_cv_path_MAGIC_CMD=$ac_dir/"${ac_tool_prefix}file" - if test -n "$file_magic_test_file"; then - case $deplibs_check_method in - "file_magic "*) - file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` - MAGIC_CMD=$lt_cv_path_MAGIC_CMD - if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | - $EGREP "$file_magic_regex" > /dev/null; then - : - else - cat <<_LT_EOF 1>&2 - -*** Warning: the command libtool uses to detect shared libraries, -*** $file_magic_cmd, produces output that libtool cannot recognize. -*** The result is that libtool may fail to recognize shared libraries -*** as such. This will affect the creation of libtool libraries that -*** depend on shared libraries, but programs linked with such libtool -*** libraries will work regardless of this problem. Nevertheless, you -*** may want to report the problem to your system manager and/or to -*** bug-libtool@gnu.org - -_LT_EOF - fi ;; - esac - fi - break - fi - done - IFS=$lt_save_ifs - MAGIC_CMD=$lt_save_MAGIC_CMD - ;; -esac -fi - -MAGIC_CMD=$lt_cv_path_MAGIC_CMD -if test -n "$MAGIC_CMD"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MAGIC_CMD" >&5 -$as_echo "$MAGIC_CMD" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - - - -if test -z "$lt_cv_path_MAGIC_CMD"; then - if test -n "$ac_tool_prefix"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for file" >&5 -$as_echo_n "checking for file... " >&6; } -if ${lt_cv_path_MAGIC_CMD+:} false; then : - $as_echo_n "(cached) " >&6 -else - case $MAGIC_CMD in -[\\/*] | ?:[\\/]*) - lt_cv_path_MAGIC_CMD=$MAGIC_CMD # Let the user override the test with a path. - ;; -*) - lt_save_MAGIC_CMD=$MAGIC_CMD - lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR - ac_dummy="/usr/bin$PATH_SEPARATOR$PATH" - for ac_dir in $ac_dummy; do - IFS=$lt_save_ifs - test -z "$ac_dir" && ac_dir=. - if test -f "$ac_dir/file"; then - lt_cv_path_MAGIC_CMD=$ac_dir/"file" - if test -n "$file_magic_test_file"; then - case $deplibs_check_method in - "file_magic "*) - file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` - MAGIC_CMD=$lt_cv_path_MAGIC_CMD - if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | - $EGREP "$file_magic_regex" > /dev/null; then - : - else - cat <<_LT_EOF 1>&2 - -*** Warning: the command libtool uses to detect shared libraries, -*** $file_magic_cmd, produces output that libtool cannot recognize. -*** The result is that libtool may fail to recognize shared libraries -*** as such. This will affect the creation of libtool libraries that -*** depend on shared libraries, but programs linked with such libtool -*** libraries will work regardless of this problem. Nevertheless, you -*** may want to report the problem to your system manager and/or to -*** bug-libtool@gnu.org - -_LT_EOF - fi ;; - esac - fi - break - fi - done - IFS=$lt_save_ifs - MAGIC_CMD=$lt_save_MAGIC_CMD - ;; -esac -fi - -MAGIC_CMD=$lt_cv_path_MAGIC_CMD -if test -n "$MAGIC_CMD"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MAGIC_CMD" >&5 -$as_echo "$MAGIC_CMD" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - else - MAGIC_CMD=: - fi -fi - - fi - ;; -esac - -# Use C for the default configuration in the libtool script - -lt_save_CC=$CC -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - - -# Source file extension for C test sources. -ac_ext=c - -# Object file extension for compiled C test sources. -objext=o -objext=$objext - -# Code to be used in simple compile tests -lt_simple_compile_test_code="int some_variable = 0;" - -# Code to be used in simple link tests -lt_simple_link_test_code='int main(){return(0);}' - - - - - - - -# If no C compiler was specified, use CC. -LTCC=${LTCC-"$CC"} - -# If no C compiler flags were specified, use CFLAGS. -LTCFLAGS=${LTCFLAGS-"$CFLAGS"} - -# Allow CC to be a program name with arguments. -compiler=$CC - -# Save the default compiler, since it gets overwritten when the other -# tags are being tested, and _LT_TAGVAR(compiler, []) is a NOP. -compiler_DEFAULT=$CC - -# save warnings/boilerplate of simple test code -ac_outfile=conftest.$ac_objext -echo "$lt_simple_compile_test_code" >conftest.$ac_ext -eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err -_lt_compiler_boilerplate=`cat conftest.err` -$RM conftest* - -ac_outfile=conftest.$ac_objext -echo "$lt_simple_link_test_code" >conftest.$ac_ext -eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err -_lt_linker_boilerplate=`cat conftest.err` -$RM -r conftest* - - -if test -n "$compiler"; then - -lt_prog_compiler_no_builtin_flag= - -if test yes = "$GCC"; then - case $cc_basename in - nvcc*) - lt_prog_compiler_no_builtin_flag=' -Xcompiler -fno-builtin' ;; - *) - lt_prog_compiler_no_builtin_flag=' -fno-builtin' ;; - esac - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -fno-rtti -fno-exceptions" >&5 -$as_echo_n "checking if $compiler supports -fno-rtti -fno-exceptions... " >&6; } -if ${lt_cv_prog_compiler_rtti_exceptions+:} false; then : - $as_echo_n "(cached) " >&6 -else - lt_cv_prog_compiler_rtti_exceptions=no - ac_outfile=conftest.$ac_objext - echo "$lt_simple_compile_test_code" > conftest.$ac_ext - lt_compiler_flag="-fno-rtti -fno-exceptions" ## exclude from sc_useless_quotes_in_assignment - # Insert the option either (1) after the last *FLAGS variable, or - # (2) before a word containing "conftest.", or (3) at the end. - # Note that $ac_compile itself does not contain backslashes and begins - # with a dollar sign (not a hyphen), so the echo should work correctly. - # The option is referenced via a variable to avoid confusing sed. - lt_compile=`echo "$ac_compile" | $SED \ - -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ - -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ - -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) - (eval "$lt_compile" 2>conftest.err) - ac_status=$? - cat conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - if (exit $ac_status) && test -s "$ac_outfile"; then - # The compiler can only warn and ignore the option if not recognized - # So say no if there are warnings other than the usual output. - $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp - $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 - if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then - lt_cv_prog_compiler_rtti_exceptions=yes - fi - fi - $RM conftest* - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_rtti_exceptions" >&5 -$as_echo "$lt_cv_prog_compiler_rtti_exceptions" >&6; } - -if test yes = "$lt_cv_prog_compiler_rtti_exceptions"; then - lt_prog_compiler_no_builtin_flag="$lt_prog_compiler_no_builtin_flag -fno-rtti -fno-exceptions" -else - : -fi - -fi - - - - - - - lt_prog_compiler_wl= -lt_prog_compiler_pic= -lt_prog_compiler_static= - - - if test yes = "$GCC"; then - lt_prog_compiler_wl='-Wl,' - lt_prog_compiler_static='-static' - - case $host_os in - aix*) - # All AIX code is PIC. - if test ia64 = "$host_cpu"; then - # AIX 5 now supports IA64 processor - lt_prog_compiler_static='-Bstatic' - fi - lt_prog_compiler_pic='-fPIC' - ;; - - amigaos*) - case $host_cpu in - powerpc) - # see comment about AmigaOS4 .so support - lt_prog_compiler_pic='-fPIC' - ;; - m68k) - # FIXME: we need at least 68020 code to build shared libraries, but - # adding the '-m68020' flag to GCC prevents building anything better, - # like '-m68040'. - lt_prog_compiler_pic='-m68020 -resident32 -malways-restore-a4' - ;; - esac - ;; - - beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) - # PIC is the default for these OSes. - ;; - - mingw* | cygwin* | pw32* | os2* | cegcc*) - # This hack is so that the source file can tell whether it is being - # built for inclusion in a dll (and should export symbols for example). - # Although the cygwin gcc ignores -fPIC, still need this for old-style - # (--disable-auto-import) libraries - lt_prog_compiler_pic='-DDLL_EXPORT' - case $host_os in - os2*) - lt_prog_compiler_static='$wl-static' - ;; - esac - ;; - - darwin* | rhapsody*) - # PIC is the default on this platform - # Common symbols not allowed in MH_DYLIB files - lt_prog_compiler_pic='-fno-common' - ;; - - haiku*) - # PIC is the default for Haiku. - # The "-static" flag exists, but is broken. - lt_prog_compiler_static= - ;; - - hpux*) - # PIC is the default for 64-bit PA HP-UX, but not for 32-bit - # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag - # sets the default TLS model and affects inlining. - case $host_cpu in - hppa*64*) - # +Z the default - ;; - *) - lt_prog_compiler_pic='-fPIC' - ;; - esac - ;; - - interix[3-9]*) - # Interix 3.x gcc -fpic/-fPIC options generate broken code. - # Instead, we relocate shared libraries at runtime. - ;; - - msdosdjgpp*) - # Just because we use GCC doesn't mean we suddenly get shared libraries - # on systems that don't support them. - lt_prog_compiler_can_build_shared=no - enable_shared=no - ;; - - *nto* | *qnx*) - # QNX uses GNU C++, but need to define -shared option too, otherwise - # it will coredump. - lt_prog_compiler_pic='-fPIC -shared' - ;; - - sysv4*MP*) - if test -d /usr/nec; then - lt_prog_compiler_pic=-Kconform_pic - fi - ;; - - *) - lt_prog_compiler_pic='-fPIC' - ;; - esac - - case $cc_basename in - nvcc*) # Cuda Compiler Driver 2.2 - lt_prog_compiler_wl='-Xlinker ' - if test -n "$lt_prog_compiler_pic"; then - lt_prog_compiler_pic="-Xcompiler $lt_prog_compiler_pic" - fi - ;; - esac - else - # PORTME Check for flag to pass linker flags through the system compiler. - case $host_os in - aix*) - lt_prog_compiler_wl='-Wl,' - if test ia64 = "$host_cpu"; then - # AIX 5 now supports IA64 processor - lt_prog_compiler_static='-Bstatic' - else - lt_prog_compiler_static='-bnso -bI:/lib/syscalls.exp' - fi - ;; - - darwin* | rhapsody*) - # PIC is the default on this platform - # Common symbols not allowed in MH_DYLIB files - lt_prog_compiler_pic='-fno-common' - case $cc_basename in - nagfor*) - # NAG Fortran compiler - lt_prog_compiler_wl='-Wl,-Wl,,' - lt_prog_compiler_pic='-PIC' - lt_prog_compiler_static='-Bstatic' - ;; - esac - ;; - - mingw* | cygwin* | pw32* | os2* | cegcc*) - # This hack is so that the source file can tell whether it is being - # built for inclusion in a dll (and should export symbols for example). - lt_prog_compiler_pic='-DDLL_EXPORT' - case $host_os in - os2*) - lt_prog_compiler_static='$wl-static' - ;; - esac - ;; - - hpux9* | hpux10* | hpux11*) - lt_prog_compiler_wl='-Wl,' - # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but - # not for PA HP-UX. - case $host_cpu in - hppa*64*|ia64*) - # +Z the default - ;; - *) - lt_prog_compiler_pic='+Z' - ;; - esac - # Is there a better lt_prog_compiler_static that works with the bundled CC? - lt_prog_compiler_static='$wl-a ${wl}archive' - ;; - - irix5* | irix6* | nonstopux*) - lt_prog_compiler_wl='-Wl,' - # PIC (with -KPIC) is the default. - lt_prog_compiler_static='-non_shared' - ;; - - linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) - case $cc_basename in - # old Intel for x86_64, which still supported -KPIC. - ecc*) - lt_prog_compiler_wl='-Wl,' - lt_prog_compiler_pic='-KPIC' - lt_prog_compiler_static='-static' - ;; - # flang / f18. f95 an alias for gfortran or flang on Debian - flang* | f18* | f95*) - lt_prog_compiler_wl='-Wl,' - lt_prog_compiler_pic='-fPIC' - lt_prog_compiler_static='-static' - ;; - # icc used to be incompatible with GCC. - # ICC 10 doesn't accept -KPIC any more. - icc* | ifort*) - lt_prog_compiler_wl='-Wl,' - lt_prog_compiler_pic='-fPIC' - lt_prog_compiler_static='-static' - ;; - # Lahey Fortran 8.1. - lf95*) - lt_prog_compiler_wl='-Wl,' - lt_prog_compiler_pic='--shared' - lt_prog_compiler_static='--static' - ;; - nagfor*) - # NAG Fortran compiler - lt_prog_compiler_wl='-Wl,-Wl,,' - lt_prog_compiler_pic='-PIC' - lt_prog_compiler_static='-Bstatic' - ;; - tcc*) - # Fabrice Bellard et al's Tiny C Compiler - lt_prog_compiler_wl='-Wl,' - lt_prog_compiler_pic='-fPIC' - lt_prog_compiler_static='-static' - ;; - pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*) - # Portland Group compilers (*not* the Pentium gcc compiler, - # which looks to be a dead project) - lt_prog_compiler_wl='-Wl,' - lt_prog_compiler_pic='-fpic' - lt_prog_compiler_static='-Bstatic' - ;; - ccc*) - lt_prog_compiler_wl='-Wl,' - # All Alpha code is PIC. - lt_prog_compiler_static='-non_shared' - ;; - xl* | bgxl* | bgf* | mpixl*) - # IBM XL C 8.0/Fortran 10.1, 11.1 on PPC and BlueGene - lt_prog_compiler_wl='-Wl,' - lt_prog_compiler_pic='-qpic' - lt_prog_compiler_static='-qstaticlink' - ;; - *) - case `$CC -V 2>&1 | sed 5q` in - *Sun\ Ceres\ Fortran* | *Sun*Fortran*\ [1-7].* | *Sun*Fortran*\ 8.[0-3]*) - # Sun Fortran 8.3 passes all unrecognized flags to the linker - lt_prog_compiler_pic='-KPIC' - lt_prog_compiler_static='-Bstatic' - lt_prog_compiler_wl='' - ;; - *Sun\ F* | *Sun*Fortran*) - lt_prog_compiler_pic='-KPIC' - lt_prog_compiler_static='-Bstatic' - lt_prog_compiler_wl='-Qoption ld ' - ;; - *Sun\ C*) - # Sun C 5.9 - lt_prog_compiler_pic='-KPIC' - lt_prog_compiler_static='-Bstatic' - lt_prog_compiler_wl='-Wl,' - ;; - *Intel*\ [CF]*Compiler*) - lt_prog_compiler_wl='-Wl,' - lt_prog_compiler_pic='-fPIC' - lt_prog_compiler_static='-static' - ;; - *Portland\ Group*) - lt_prog_compiler_wl='-Wl,' - lt_prog_compiler_pic='-fpic' - lt_prog_compiler_static='-Bstatic' - ;; - esac - ;; - esac - ;; - - newsos6) - lt_prog_compiler_pic='-KPIC' - lt_prog_compiler_static='-Bstatic' - ;; - - *nto* | *qnx*) - # QNX uses GNU C++, but need to define -shared option too, otherwise - # it will coredump. - lt_prog_compiler_pic='-fPIC -shared' - ;; - - osf3* | osf4* | osf5*) - lt_prog_compiler_wl='-Wl,' - # All OSF/1 code is PIC. - lt_prog_compiler_static='-non_shared' - ;; - - rdos*) - lt_prog_compiler_static='-non_shared' - ;; - - solaris*) - lt_prog_compiler_pic='-KPIC' - lt_prog_compiler_static='-Bstatic' - case $cc_basename in - f77* | f90* | f95* | sunf77* | sunf90* | sunf95*) - lt_prog_compiler_wl='-Qoption ld ';; - *) - lt_prog_compiler_wl='-Wl,';; - esac - ;; - - sunos4*) - lt_prog_compiler_wl='-Qoption ld ' - lt_prog_compiler_pic='-PIC' - lt_prog_compiler_static='-Bstatic' - ;; - - sysv4 | sysv4.2uw2* | sysv4.3*) - lt_prog_compiler_wl='-Wl,' - lt_prog_compiler_pic='-KPIC' - lt_prog_compiler_static='-Bstatic' - ;; - - sysv4*MP*) - if test -d /usr/nec; then - lt_prog_compiler_pic='-Kconform_pic' - lt_prog_compiler_static='-Bstatic' - fi - ;; - - sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) - lt_prog_compiler_wl='-Wl,' - lt_prog_compiler_pic='-KPIC' - lt_prog_compiler_static='-Bstatic' - ;; - - unicos*) - lt_prog_compiler_wl='-Wl,' - lt_prog_compiler_can_build_shared=no - ;; - - uts4*) - lt_prog_compiler_pic='-pic' - lt_prog_compiler_static='-Bstatic' - ;; - - *) - lt_prog_compiler_can_build_shared=no - ;; - esac - fi - -case $host_os in - # For platforms that do not support PIC, -DPIC is meaningless: - *djgpp*) - lt_prog_compiler_pic= - ;; - *) - lt_prog_compiler_pic="$lt_prog_compiler_pic -DPIC" - ;; -esac - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 -$as_echo_n "checking for $compiler option to produce PIC... " >&6; } -if ${lt_cv_prog_compiler_pic+:} false; then : - $as_echo_n "(cached) " >&6 -else - lt_cv_prog_compiler_pic=$lt_prog_compiler_pic -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic" >&5 -$as_echo "$lt_cv_prog_compiler_pic" >&6; } -lt_prog_compiler_pic=$lt_cv_prog_compiler_pic - -# -# Check to make sure the PIC flag actually works. -# -if test -n "$lt_prog_compiler_pic"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler PIC flag $lt_prog_compiler_pic works" >&5 -$as_echo_n "checking if $compiler PIC flag $lt_prog_compiler_pic works... " >&6; } -if ${lt_cv_prog_compiler_pic_works+:} false; then : - $as_echo_n "(cached) " >&6 -else - lt_cv_prog_compiler_pic_works=no - ac_outfile=conftest.$ac_objext - echo "$lt_simple_compile_test_code" > conftest.$ac_ext - lt_compiler_flag="$lt_prog_compiler_pic -DPIC" ## exclude from sc_useless_quotes_in_assignment - # Insert the option either (1) after the last *FLAGS variable, or - # (2) before a word containing "conftest.", or (3) at the end. - # Note that $ac_compile itself does not contain backslashes and begins - # with a dollar sign (not a hyphen), so the echo should work correctly. - # The option is referenced via a variable to avoid confusing sed. - lt_compile=`echo "$ac_compile" | $SED \ - -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ - -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ - -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) - (eval "$lt_compile" 2>conftest.err) - ac_status=$? - cat conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - if (exit $ac_status) && test -s "$ac_outfile"; then - # The compiler can only warn and ignore the option if not recognized - # So say no if there are warnings other than the usual output. - $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp - $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 - if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then - lt_cv_prog_compiler_pic_works=yes - fi - fi - $RM conftest* - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_works" >&5 -$as_echo "$lt_cv_prog_compiler_pic_works" >&6; } - -if test yes = "$lt_cv_prog_compiler_pic_works"; then - case $lt_prog_compiler_pic in - "" | " "*) ;; - *) lt_prog_compiler_pic=" $lt_prog_compiler_pic" ;; - esac -else - lt_prog_compiler_pic= - lt_prog_compiler_can_build_shared=no -fi - -fi - - - - - - - - - - - -# -# Check to make sure the static flag actually works. -# -wl=$lt_prog_compiler_wl eval lt_tmp_static_flag=\"$lt_prog_compiler_static\" -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler static flag $lt_tmp_static_flag works" >&5 -$as_echo_n "checking if $compiler static flag $lt_tmp_static_flag works... " >&6; } -if ${lt_cv_prog_compiler_static_works+:} false; then : - $as_echo_n "(cached) " >&6 -else - lt_cv_prog_compiler_static_works=no - save_LDFLAGS=$LDFLAGS - LDFLAGS="$LDFLAGS $lt_tmp_static_flag" - echo "$lt_simple_link_test_code" > conftest.$ac_ext - if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then - # The linker can only warn and ignore the option if not recognized - # So say no if there are warnings - if test -s conftest.err; then - # Append any errors to the config.log. - cat conftest.err 1>&5 - $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp - $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 - if diff conftest.exp conftest.er2 >/dev/null; then - lt_cv_prog_compiler_static_works=yes - fi - else - lt_cv_prog_compiler_static_works=yes - fi - fi - $RM -r conftest* - LDFLAGS=$save_LDFLAGS - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_static_works" >&5 -$as_echo "$lt_cv_prog_compiler_static_works" >&6; } - -if test yes = "$lt_cv_prog_compiler_static_works"; then - : -else - lt_prog_compiler_static= -fi - - - - - - - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 -$as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } -if ${lt_cv_prog_compiler_c_o+:} false; then : - $as_echo_n "(cached) " >&6 -else - lt_cv_prog_compiler_c_o=no - $RM -r conftest 2>/dev/null - mkdir conftest - cd conftest - mkdir out - echo "$lt_simple_compile_test_code" > conftest.$ac_ext - - lt_compiler_flag="-o out/conftest2.$ac_objext" - # Insert the option either (1) after the last *FLAGS variable, or - # (2) before a word containing "conftest.", or (3) at the end. - # Note that $ac_compile itself does not contain backslashes and begins - # with a dollar sign (not a hyphen), so the echo should work correctly. - lt_compile=`echo "$ac_compile" | $SED \ - -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ - -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ - -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) - (eval "$lt_compile" 2>out/conftest.err) - ac_status=$? - cat out/conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - if (exit $ac_status) && test -s out/conftest2.$ac_objext - then - # The compiler can only warn and ignore the option if not recognized - # So say no if there are warnings - $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp - $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 - if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then - lt_cv_prog_compiler_c_o=yes - fi - fi - chmod u+w . 2>&5 - $RM conftest* - # SGI C++ compiler will create directory out/ii_files/ for - # template instantiation - test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files - $RM out/* && rmdir out - cd .. - $RM -r conftest - $RM conftest* - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o" >&5 -$as_echo "$lt_cv_prog_compiler_c_o" >&6; } - - - - - - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 -$as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } -if ${lt_cv_prog_compiler_c_o+:} false; then : - $as_echo_n "(cached) " >&6 -else - lt_cv_prog_compiler_c_o=no - $RM -r conftest 2>/dev/null - mkdir conftest - cd conftest - mkdir out - echo "$lt_simple_compile_test_code" > conftest.$ac_ext - - lt_compiler_flag="-o out/conftest2.$ac_objext" - # Insert the option either (1) after the last *FLAGS variable, or - # (2) before a word containing "conftest.", or (3) at the end. - # Note that $ac_compile itself does not contain backslashes and begins - # with a dollar sign (not a hyphen), so the echo should work correctly. - lt_compile=`echo "$ac_compile" | $SED \ - -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ - -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ - -e 's:$: $lt_compiler_flag:'` - (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) - (eval "$lt_compile" 2>out/conftest.err) - ac_status=$? - cat out/conftest.err >&5 - echo "$as_me:$LINENO: \$? = $ac_status" >&5 - if (exit $ac_status) && test -s out/conftest2.$ac_objext - then - # The compiler can only warn and ignore the option if not recognized - # So say no if there are warnings - $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp - $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 - if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then - lt_cv_prog_compiler_c_o=yes - fi - fi - chmod u+w . 2>&5 - $RM conftest* - # SGI C++ compiler will create directory out/ii_files/ for - # template instantiation - test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files - $RM out/* && rmdir out - cd .. - $RM -r conftest - $RM conftest* - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o" >&5 -$as_echo "$lt_cv_prog_compiler_c_o" >&6; } - - - - -hard_links=nottested -if test no = "$lt_cv_prog_compiler_c_o" && test no != "$need_locks"; then - # do not overwrite the value of need_locks provided by the user - { $as_echo "$as_me:${as_lineno-$LINENO}: checking if we can lock with hard links" >&5 -$as_echo_n "checking if we can lock with hard links... " >&6; } - hard_links=yes - $RM conftest* - ln conftest.a conftest.b 2>/dev/null && hard_links=no - touch conftest.a - ln conftest.a conftest.b 2>&5 || hard_links=no - ln conftest.a conftest.b 2>/dev/null && hard_links=no - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $hard_links" >&5 -$as_echo "$hard_links" >&6; } - if test no = "$hard_links"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: '$CC' does not support '-c -o', so 'make -j' may be unsafe" >&5 -$as_echo "$as_me: WARNING: '$CC' does not support '-c -o', so 'make -j' may be unsafe" >&2;} - need_locks=warn - fi -else - need_locks=no -fi - - - - - - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5 -$as_echo_n "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; } - - runpath_var= - allow_undefined_flag= - always_export_symbols=no - archive_cmds= - archive_expsym_cmds= - compiler_needs_object=no - enable_shared_with_static_runtimes=no - export_dynamic_flag_spec= - export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' - hardcode_automatic=no - hardcode_direct=no - hardcode_direct_absolute=no - hardcode_libdir_flag_spec= - hardcode_libdir_separator= - hardcode_minus_L=no - hardcode_shlibpath_var=unsupported - inherit_rpath=no - link_all_deplibs=unknown - module_cmds= - module_expsym_cmds= - old_archive_from_new_cmds= - old_archive_from_expsyms_cmds= - thread_safe_flag_spec= - whole_archive_flag_spec= - # include_expsyms should be a list of space-separated symbols to be *always* - # included in the symbol list - include_expsyms= - # exclude_expsyms can be an extended regexp of symbols to exclude - # it will be wrapped by ' (' and ')$', so one must not match beginning or - # end of line. Example: 'a|bc|.*d.*' will exclude the symbols 'a' and 'bc', - # as well as any symbol that contains 'd'. - exclude_expsyms='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*' - # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out - # platforms (ab)use it in PIC code, but their linkers get confused if - # the symbol is explicitly referenced. Since portable code cannot - # rely on this symbol name, it's probably fine to never include it in - # preloaded symbol tables. - # Exclude shared library initialization/finalization symbols. - extract_expsyms_cmds= - - case $host_os in - cygwin* | mingw* | pw32* | cegcc*) - # FIXME: the MSVC++ port hasn't been tested in a loooong time - # When not using gcc, we currently assume that we are using - # Microsoft Visual C++. - if test yes != "$GCC"; then - with_gnu_ld=no - fi - ;; - interix*) - # we just hope/assume this is gcc and not c89 (= MSVC++) - with_gnu_ld=yes - ;; - openbsd* | bitrig*) - with_gnu_ld=no - ;; - linux* | k*bsd*-gnu | gnu*) - link_all_deplibs=no - ;; - esac - - ld_shlibs=yes - - # On some targets, GNU ld is compatible enough with the native linker - # that we're better off using the native interface for both. - lt_use_gnu_ld_interface=no - if test yes = "$with_gnu_ld"; then - case $host_os in - aix*) - # The AIX port of GNU ld has always aspired to compatibility - # with the native linker. However, as the warning in the GNU ld - # block says, versions before 2.19.5* couldn't really create working - # shared libraries, regardless of the interface used. - case `$LD -v 2>&1` in - *\ \(GNU\ Binutils\)\ 2.19.5*) ;; - *\ \(GNU\ Binutils\)\ 2.[2-9]*) ;; - *\ \(GNU\ Binutils\)\ [3-9]*) ;; - *) - lt_use_gnu_ld_interface=yes - ;; - esac - ;; - *) - lt_use_gnu_ld_interface=yes - ;; - esac - fi - - if test yes = "$lt_use_gnu_ld_interface"; then - # If archive_cmds runs LD, not CC, wlarc should be empty - wlarc='$wl' - - # Set some defaults for GNU ld with shared library support. These - # are reset later if shared libraries are not supported. Putting them - # here allows them to be overridden if necessary. - runpath_var=LD_RUN_PATH - hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' - export_dynamic_flag_spec='$wl--export-dynamic' - # ancient GNU ld didn't support --whole-archive et. al. - if $LD --help 2>&1 | $GREP 'no-whole-archive' > /dev/null; then - whole_archive_flag_spec=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive' - else - whole_archive_flag_spec= - fi - supports_anon_versioning=no - case `$LD -v | $SED -e 's/(^)\+)\s\+//' 2>&1` in - *GNU\ gold*) supports_anon_versioning=yes ;; - *\ [01].* | *\ 2.[0-9].* | *\ 2.10.*) ;; # catch versions < 2.11 - *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... - *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... - *\ 2.11.*) ;; # other 2.11 versions - *) supports_anon_versioning=yes ;; - esac - - # See if GNU ld supports shared libraries. - case $host_os in - aix[3-9]*) - # On AIX/PPC, the GNU linker is very broken - if test ia64 != "$host_cpu"; then - ld_shlibs=no - cat <<_LT_EOF 1>&2 - -*** Warning: the GNU linker, at least up to release 2.19, is reported -*** to be unable to reliably create shared libraries on AIX. -*** Therefore, libtool is disabling shared libraries support. If you -*** really care for shared libraries, you may want to install binutils -*** 2.20 or above, or modify your PATH so that a non-GNU linker is found. -*** You will then need to restart the configuration process. - -_LT_EOF - fi - ;; - - amigaos*) - case $host_cpu in - powerpc) - # see comment about AmigaOS4 .so support - archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' - archive_expsym_cmds='' - ;; - m68k) - archive_cmds='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' - hardcode_libdir_flag_spec='-L$libdir' - hardcode_minus_L=yes - ;; - esac - ;; - - beos*) - if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then - allow_undefined_flag=unsupported - # Joseph Beckenbach says some releases of gcc - # support --undefined. This deserves some investigation. FIXME - archive_cmds='$CC -nostart $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' - else - ld_shlibs=no - fi - ;; - - cygwin* | mingw* | pw32* | cegcc*) - # _LT_TAGVAR(hardcode_libdir_flag_spec, ) is actually meaningless, - # as there is no search path for DLLs. - hardcode_libdir_flag_spec='-L$libdir' - export_dynamic_flag_spec='$wl--export-all-symbols' - allow_undefined_flag=unsupported - always_export_symbols=no - enable_shared_with_static_runtimes=yes - export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols' - exclude_expsyms='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname' - - if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then - archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' - # If the export-symbols file already is a .def file, use it as - # is; otherwise, prepend EXPORTS... - archive_expsym_cmds='if test DEF = "`$SED -n -e '\''s/^[ ]*//'\'' -e '\''/^\(;.*\)*$/d'\'' -e '\''s/^\(EXPORTS\|LIBRARY\)\([ ].*\)*$/DEF/p'\'' -e q $export_symbols`" ; then - cp $export_symbols $output_objdir/$soname.def; - else - echo EXPORTS > $output_objdir/$soname.def; - cat $export_symbols >> $output_objdir/$soname.def; - fi~ - $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' - else - ld_shlibs=no - fi - ;; - - haiku*) - archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' - link_all_deplibs=yes - ;; - - os2*) - hardcode_libdir_flag_spec='-L$libdir' - hardcode_minus_L=yes - allow_undefined_flag=unsupported - shrext_cmds=.dll - archive_cmds='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ - $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ - $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ - $ECHO EXPORTS >> $output_objdir/$libname.def~ - emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~ - $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ - emximp -o $lib $output_objdir/$libname.def' - archive_expsym_cmds='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ - $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ - $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ - $ECHO EXPORTS >> $output_objdir/$libname.def~ - prefix_cmds="$SED"~ - if test EXPORTS = "`$SED 1q $export_symbols`"; then - prefix_cmds="$prefix_cmds -e 1d"; - fi~ - prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~ - cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~ - $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ - emximp -o $lib $output_objdir/$libname.def' - old_archive_From_new_cmds='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def' - enable_shared_with_static_runtimes=yes - ;; - - interix[3-9]*) - hardcode_direct=no - hardcode_shlibpath_var=no - hardcode_libdir_flag_spec='$wl-rpath,$libdir' - export_dynamic_flag_spec='$wl-E' - # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. - # Instead, shared libraries are loaded at an image base (0x10000000 by - # default) and relocated if they conflict, which is a slow very memory - # consuming and fragmenting process. To avoid this, we pick a random, - # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link - # time. Moving up from 0x10000000 also allows more sbrk(2) space. - archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' - archive_expsym_cmds='sed "s|^|_|" $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--retain-symbols-file,$output_objdir/$soname.expsym $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' - ;; - - gnu* | linux* | tpf* | k*bsd*-gnu | kopensolaris*-gnu) - tmp_diet=no - if test linux-dietlibc = "$host_os"; then - case $cc_basename in - diet\ *) tmp_diet=yes;; # linux-dietlibc with static linking (!diet-dyn) - esac - fi - if $LD --help 2>&1 | $EGREP ': supported targets:.* elf' > /dev/null \ - && test no = "$tmp_diet" - then - tmp_addflag=' $pic_flag' - tmp_sharedflag='-shared' - case $cc_basename,$host_cpu in - pgcc*) # Portland Group C compiler - whole_archive_flag_spec='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' - tmp_addflag=' $pic_flag' - ;; - pgf77* | pgf90* | pgf95* | pgfortran*) - # Portland Group f77 and f90 compilers - whole_archive_flag_spec='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' - tmp_addflag=' $pic_flag -Mnomain' ;; - ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64 - tmp_addflag=' -i_dynamic' ;; - efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64 - tmp_addflag=' -i_dynamic -nofor_main' ;; - ifc* | ifort*) # Intel Fortran compiler - tmp_addflag=' -nofor_main' ;; - lf95*) # Lahey Fortran 8.1 - whole_archive_flag_spec= - tmp_sharedflag='--shared' ;; - nagfor*) # NAGFOR 5.3 - tmp_sharedflag='-Wl,-shared' ;; - xl[cC]* | bgxl[cC]* | mpixl[cC]*) # IBM XL C 8.0 on PPC (deal with xlf below) - tmp_sharedflag='-qmkshrobj' - tmp_addflag= ;; - nvcc*) # Cuda Compiler Driver 2.2 - whole_archive_flag_spec='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' - compiler_needs_object=yes - ;; - esac - case `$CC -V 2>&1 | sed 5q` in - *Sun\ C*) # Sun C 5.9 - whole_archive_flag_spec='$wl--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' - compiler_needs_object=yes - tmp_sharedflag='-G' ;; - *Sun\ F*) # Sun Fortran 8.3 - tmp_sharedflag='-G' ;; - esac - archive_cmds='$CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' - - if test yes = "$supports_anon_versioning"; then - archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~ - cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ - echo "local: *; };" >> $output_objdir/$libname.ver~ - $CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-version-script $wl$output_objdir/$libname.ver -o $lib' - fi - - case $cc_basename in - tcc*) - export_dynamic_flag_spec='-rdynamic' - ;; - xlf* | bgf* | bgxlf* | mpixlf*) - # IBM XL Fortran 10.1 on PPC cannot create shared libs itself - whole_archive_flag_spec='--whole-archive$convenience --no-whole-archive' - hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' - archive_cmds='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib' - if test yes = "$supports_anon_versioning"; then - archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~ - cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ - echo "local: *; };" >> $output_objdir/$libname.ver~ - $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' - fi - ;; - esac - else - ld_shlibs=no - fi - ;; - - netbsd* | netbsdelf*-gnu) - if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then - archive_cmds='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' - wlarc= - else - archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' - archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' - fi - ;; - - solaris*) - if $LD -v 2>&1 | $GREP 'BFD 2\.8' > /dev/null; then - ld_shlibs=no - cat <<_LT_EOF 1>&2 - -*** Warning: The releases 2.8.* of the GNU linker cannot reliably -*** create shared libraries on Solaris systems. Therefore, libtool -*** is disabling shared libraries support. We urge you to upgrade GNU -*** binutils to release 2.9.1 or newer. Another option is to modify -*** your PATH or compiler configuration so that the native linker is -*** used, and then restart. - -_LT_EOF - elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then - archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' - archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' - else - ld_shlibs=no - fi - ;; - - sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*) - case `$LD -v 2>&1` in - *\ [01].* | *\ 2.[0-9].* | *\ 2.1[0-5].*) - ld_shlibs=no - cat <<_LT_EOF 1>&2 - -*** Warning: Releases of the GNU linker prior to 2.16.91.0.3 cannot -*** reliably create shared libraries on SCO systems. Therefore, libtool -*** is disabling shared libraries support. We urge you to upgrade GNU -*** binutils to release 2.16.91.0.3 or newer. Another option is to modify -*** your PATH or compiler configuration so that the native linker is -*** used, and then restart. - -_LT_EOF - ;; - *) - # For security reasons, it is highly recommended that you always - # use absolute paths for naming shared libraries, and exclude the - # DT_RUNPATH tag from executables and libraries. But doing so - # requires that you compile everything twice, which is a pain. - if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then - hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' - archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' - archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' - else - ld_shlibs=no - fi - ;; - esac - ;; - - sunos4*) - archive_cmds='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags' - wlarc= - hardcode_direct=yes - hardcode_shlibpath_var=no - ;; - - *) - if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then - archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' - archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' - else - ld_shlibs=no - fi - ;; - esac - - if test no = "$ld_shlibs"; then - runpath_var= - hardcode_libdir_flag_spec= - export_dynamic_flag_spec= - whole_archive_flag_spec= - fi - else - # PORTME fill in a description of your system's linker (not GNU ld) - case $host_os in - aix3*) - allow_undefined_flag=unsupported - always_export_symbols=yes - archive_expsym_cmds='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname' - # Note: this linker hardcodes the directories in LIBPATH if there - # are no directories specified by -L. - hardcode_minus_L=yes - if test yes = "$GCC" && test -z "$lt_prog_compiler_static"; then - # Neither direct hardcoding nor static linking is supported with a - # broken collect2. - hardcode_direct=unsupported - fi - ;; - - aix[4-9]*) - if test ia64 = "$host_cpu"; then - # On IA64, the linker does run time linking by default, so we don't - # have to do anything special. - aix_use_runtimelinking=no - exp_sym_flag='-Bexport' - no_entry_flag= - else - # If we're using GNU nm, then we don't want the "-C" option. - # -C means demangle to GNU nm, but means don't demangle to AIX nm. - # Without the "-l" option, or with the "-B" option, AIX nm treats - # weak defined symbols like other global defined symbols, whereas - # GNU nm marks them as "W". - # While the 'weak' keyword is ignored in the Export File, we need - # it in the Import File for the 'aix-soname' feature, so we have - # to replace the "-B" option with "-P" for AIX nm. - if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then - export_symbols_cmds='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && (substr(\$ 3,1,1) != ".")) { if (\$ 2 == "W") { print \$ 3 " weak" } else { print \$ 3 } } }'\'' | sort -u > $export_symbols' - else - export_symbols_cmds='`func_echo_all $NM | $SED -e '\''s/B\([^B]*\)$/P\1/'\''` -PCpgl $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) && (substr(\$ 1,1,1) != ".")) { if ((\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) { print \$ 1 " weak" } else { print \$ 1 } } }'\'' | sort -u > $export_symbols' - fi - aix_use_runtimelinking=no - - # Test if we are trying to use run time linking or normal - # AIX style linking. If -brtl is somewhere in LDFLAGS, we - # have runtime linking enabled, and use it for executables. - # For shared libraries, we enable/disable runtime linking - # depending on the kind of the shared library created - - # when "with_aix_soname,aix_use_runtimelinking" is: - # "aix,no" lib.a(lib.so.V) shared, rtl:no, for executables - # "aix,yes" lib.so shared, rtl:yes, for executables - # lib.a static archive - # "both,no" lib.so.V(shr.o) shared, rtl:yes - # lib.a(lib.so.V) shared, rtl:no, for executables - # "both,yes" lib.so.V(shr.o) shared, rtl:yes, for executables - # lib.a(lib.so.V) shared, rtl:no - # "svr4,*" lib.so.V(shr.o) shared, rtl:yes, for executables - # lib.a static archive - case $host_os in aix4.[23]|aix4.[23].*|aix[5-9]*) - for ld_flag in $LDFLAGS; do - if (test x-brtl = "x$ld_flag" || test x-Wl,-brtl = "x$ld_flag"); then - aix_use_runtimelinking=yes - break - fi - done - if test svr4,no = "$with_aix_soname,$aix_use_runtimelinking"; then - # With aix-soname=svr4, we create the lib.so.V shared archives only, - # so we don't have lib.a shared libs to link our executables. - # We have to force runtime linking in this case. - aix_use_runtimelinking=yes - LDFLAGS="$LDFLAGS -Wl,-brtl" - fi - ;; - esac - - exp_sym_flag='-bexport' - no_entry_flag='-bnoentry' - fi - - # When large executables or shared objects are built, AIX ld can - # have problems creating the table of contents. If linking a library - # or program results in "error TOC overflow" add -mminimal-toc to - # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not - # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. - - archive_cmds='' - hardcode_direct=yes - hardcode_direct_absolute=yes - hardcode_libdir_separator=':' - link_all_deplibs=yes - file_list_spec='$wl-f,' - case $with_aix_soname,$aix_use_runtimelinking in - aix,*) ;; # traditional, no import file - svr4,* | *,yes) # use import file - # The Import File defines what to hardcode. - hardcode_direct=no - hardcode_direct_absolute=no - ;; - esac - - if test yes = "$GCC"; then - case $host_os in aix4.[012]|aix4.[012].*) - # We only want to do this on AIX 4.2 and lower, the check - # below for broken collect2 doesn't work under 4.3+ - collect2name=`$CC -print-prog-name=collect2` - if test -f "$collect2name" && - strings "$collect2name" | $GREP resolve_lib_name >/dev/null - then - # We have reworked collect2 - : - else - # We have old collect2 - hardcode_direct=unsupported - # It fails to find uninstalled libraries when the uninstalled - # path is not listed in the libpath. Setting hardcode_minus_L - # to unsupported forces relinking - hardcode_minus_L=yes - hardcode_libdir_flag_spec='-L$libdir' - hardcode_libdir_separator= - fi - ;; - esac - shared_flag='-shared' - if test yes = "$aix_use_runtimelinking"; then - shared_flag="$shared_flag "'$wl-G' - fi - # Need to ensure runtime linking is disabled for the traditional - # shared library, or the linker may eventually find shared libraries - # /with/ Import File - we do not want to mix them. - shared_flag_aix='-shared' - shared_flag_svr4='-shared $wl-G' - else - # not using gcc - if test ia64 = "$host_cpu"; then - # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release - # chokes on -Wl,-G. The following line is correct: - shared_flag='-G' - else - if test yes = "$aix_use_runtimelinking"; then - shared_flag='$wl-G' - else - shared_flag='$wl-bM:SRE' - fi - shared_flag_aix='$wl-bM:SRE' - shared_flag_svr4='$wl-G' - fi - fi - - export_dynamic_flag_spec='$wl-bexpall' - # It seems that -bexpall does not export symbols beginning with - # underscore (_), so it is better to generate a list of symbols to export. - always_export_symbols=yes - if test aix,yes = "$with_aix_soname,$aix_use_runtimelinking"; then - # Warning - without using the other runtime loading flags (-brtl), - # -berok will link without error, but may produce a broken library. - allow_undefined_flag='-berok' - # Determine the default libpath from the value encoded in an - # empty executable. - if test set = "${lt_cv_aix_libpath+set}"; then - aix_libpath=$lt_cv_aix_libpath -else - if ${lt_cv_aix_libpath_+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - - lt_aix_libpath_sed=' - /Import File Strings/,/^$/ { - /^0/ { - s/^0 *\([^ ]*\) *$/\1/ - p - } - }' - lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` - # Check for a 64-bit object if we didn't find anything. - if test -z "$lt_cv_aix_libpath_"; then - lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` - fi -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext - if test -z "$lt_cv_aix_libpath_"; then - lt_cv_aix_libpath_=/usr/lib:/lib - fi - -fi - - aix_libpath=$lt_cv_aix_libpath_ -fi - - hardcode_libdir_flag_spec='$wl-blibpath:$libdir:'"$aix_libpath" - archive_expsym_cmds='$CC -o $output_objdir/$soname $libobjs $deplibs $wl'$no_entry_flag' $compiler_flags `if test -n "$allow_undefined_flag"; then func_echo_all "$wl$allow_undefined_flag"; else :; fi` $wl'$exp_sym_flag:\$export_symbols' '$shared_flag - else - if test ia64 = "$host_cpu"; then - hardcode_libdir_flag_spec='$wl-R $libdir:/usr/lib:/lib' - allow_undefined_flag="-z nodefs" - archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\$wl$no_entry_flag"' $compiler_flags $wl$allow_undefined_flag '"\$wl$exp_sym_flag:\$export_symbols" - else - # Determine the default libpath from the value encoded in an - # empty executable. - if test set = "${lt_cv_aix_libpath+set}"; then - aix_libpath=$lt_cv_aix_libpath -else - if ${lt_cv_aix_libpath_+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - - lt_aix_libpath_sed=' - /Import File Strings/,/^$/ { - /^0/ { - s/^0 *\([^ ]*\) *$/\1/ - p - } - }' - lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` - # Check for a 64-bit object if we didn't find anything. - if test -z "$lt_cv_aix_libpath_"; then - lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` - fi -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext - if test -z "$lt_cv_aix_libpath_"; then - lt_cv_aix_libpath_=/usr/lib:/lib - fi - -fi - - aix_libpath=$lt_cv_aix_libpath_ -fi - - hardcode_libdir_flag_spec='$wl-blibpath:$libdir:'"$aix_libpath" - # Warning - without using the other run time loading flags, - # -berok will link without error, but may produce a broken library. - no_undefined_flag=' $wl-bernotok' - allow_undefined_flag=' $wl-berok' - if test yes = "$with_gnu_ld"; then - # We only use this code for GNU lds that support --whole-archive. - whole_archive_flag_spec='$wl--whole-archive$convenience $wl--no-whole-archive' - else - # Exported symbols can be pulled into shared objects from archives - whole_archive_flag_spec='$convenience' - fi - archive_cmds_need_lc=yes - archive_expsym_cmds='$RM -r $output_objdir/$realname.d~$MKDIR $output_objdir/$realname.d' - # -brtl affects multiple linker settings, -berok does not and is overridden later - compiler_flags_filtered='`func_echo_all "$compiler_flags " | $SED -e "s%-brtl\\([, ]\\)%-berok\\1%g"`' - if test svr4 != "$with_aix_soname"; then - # This is similar to how AIX traditionally builds its shared libraries. - archive_expsym_cmds="$archive_expsym_cmds"'~$CC '$shared_flag_aix' -o $output_objdir/$realname.d/$soname $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$realname.d/$soname' - fi - if test aix != "$with_aix_soname"; then - archive_expsym_cmds="$archive_expsym_cmds"'~$CC '$shared_flag_svr4' -o $output_objdir/$realname.d/$shared_archive_member_spec.o $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$STRIP -e $output_objdir/$realname.d/$shared_archive_member_spec.o~( func_echo_all "#! $soname($shared_archive_member_spec.o)"; if test shr_64 = "$shared_archive_member_spec"; then func_echo_all "# 64"; else func_echo_all "# 32"; fi; cat $export_symbols ) > $output_objdir/$realname.d/$shared_archive_member_spec.imp~$AR $AR_FLAGS $output_objdir/$soname $output_objdir/$realname.d/$shared_archive_member_spec.o $output_objdir/$realname.d/$shared_archive_member_spec.imp' - else - # used by -dlpreopen to get the symbols - archive_expsym_cmds="$archive_expsym_cmds"'~$MV $output_objdir/$realname.d/$soname $output_objdir' - fi - archive_expsym_cmds="$archive_expsym_cmds"'~$RM -r $output_objdir/$realname.d' - fi - fi - ;; - - amigaos*) - case $host_cpu in - powerpc) - # see comment about AmigaOS4 .so support - archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' - archive_expsym_cmds='' - ;; - m68k) - archive_cmds='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' - hardcode_libdir_flag_spec='-L$libdir' - hardcode_minus_L=yes - ;; - esac - ;; - - bsdi[45]*) - export_dynamic_flag_spec=-rdynamic - ;; - - cygwin* | mingw* | pw32* | cegcc*) - # When not using gcc, we currently assume that we are using - # Microsoft Visual C++. - # hardcode_libdir_flag_spec is actually meaningless, as there is - # no search path for DLLs. - case $cc_basename in - cl*) - # Native MSVC - hardcode_libdir_flag_spec=' ' - allow_undefined_flag=unsupported - always_export_symbols=yes - file_list_spec='@' - # Tell ltmain to make .lib files, not .a files. - libext=lib - # Tell ltmain to make .dll files, not .so files. - shrext_cmds=.dll - # FIXME: Setting linknames here is a bad hack. - archive_cmds='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~linknames=' - archive_expsym_cmds='if test DEF = "`$SED -n -e '\''s/^[ ]*//'\'' -e '\''/^\(;.*\)*$/d'\'' -e '\''s/^\(EXPORTS\|LIBRARY\)\([ ].*\)*$/DEF/p'\'' -e q $export_symbols`" ; then - cp "$export_symbols" "$output_objdir/$soname.def"; - echo "$tool_output_objdir$soname.def" > "$output_objdir/$soname.exp"; - else - $SED -e '\''s/^/-link -EXPORT:/'\'' < $export_symbols > $output_objdir/$soname.exp; - fi~ - $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ - linknames=' - # The linker will not automatically build a static lib if we build a DLL. - # _LT_TAGVAR(old_archive_from_new_cmds, )='true' - enable_shared_with_static_runtimes=yes - exclude_expsyms='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*' - export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1,DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols' - # Don't use ranlib - old_postinstall_cmds='chmod 644 $oldlib' - postlink_cmds='lt_outputfile="@OUTPUT@"~ - lt_tool_outputfile="@TOOL_OUTPUT@"~ - case $lt_outputfile in - *.exe|*.EXE) ;; - *) - lt_outputfile=$lt_outputfile.exe - lt_tool_outputfile=$lt_tool_outputfile.exe - ;; - esac~ - if test : != "$MANIFEST_TOOL" && test -f "$lt_outputfile.manifest"; then - $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; - $RM "$lt_outputfile.manifest"; - fi' - ;; - *) - # Assume MSVC wrapper - hardcode_libdir_flag_spec=' ' - allow_undefined_flag=unsupported - # Tell ltmain to make .lib files, not .a files. - libext=lib - # Tell ltmain to make .dll files, not .so files. - shrext_cmds=.dll - # FIXME: Setting linknames here is a bad hack. - archive_cmds='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' - # The linker will automatically build a .lib file if we build a DLL. - old_archive_from_new_cmds='true' - # FIXME: Should let the user specify the lib program. - old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs' - enable_shared_with_static_runtimes=yes - ;; - esac - ;; - - darwin* | rhapsody*) - - - archive_cmds_need_lc=no - hardcode_direct=no - hardcode_automatic=yes - hardcode_shlibpath_var=unsupported - if test yes = "$lt_cv_ld_force_load"; then - whole_archive_flag_spec='`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience $wl-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`' - - else - whole_archive_flag_spec='' - fi - link_all_deplibs=yes - allow_undefined_flag=$_lt_dar_allow_undefined - case $cc_basename in - ifort*|nagfor*) _lt_dar_can_shared=yes ;; - *) _lt_dar_can_shared=$GCC ;; - esac - if test yes = "$_lt_dar_can_shared"; then - output_verbose_link_cmd=func_echo_all - archive_cmds="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dsymutil" - module_cmds="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dsymutil" - archive_expsym_cmds="sed 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dar_export_syms$_lt_dsymutil" - module_expsym_cmds="sed -e 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dar_export_syms$_lt_dsymutil" - - else - ld_shlibs=no - fi - - ;; - - dgux*) - archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - hardcode_libdir_flag_spec='-L$libdir' - hardcode_shlibpath_var=no - ;; - - # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor - # support. Future versions do this automatically, but an explicit c++rt0.o - # does not break anything, and helps significantly (at the cost of a little - # extra space). - freebsd2.2*) - archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o' - hardcode_libdir_flag_spec='-R$libdir' - hardcode_direct=yes - hardcode_shlibpath_var=no - ;; - - # Unfortunately, older versions of FreeBSD 2 do not have this feature. - freebsd2.*) - archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' - hardcode_direct=yes - hardcode_minus_L=yes - hardcode_shlibpath_var=no - ;; - - # FreeBSD 3 and greater uses gcc -shared to do shared libraries. - freebsd* | dragonfly*) - archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' - hardcode_libdir_flag_spec='-R$libdir' - hardcode_direct=yes - hardcode_shlibpath_var=no - ;; - - hpux9*) - if test yes = "$GCC"; then - archive_cmds='$RM $output_objdir/$soname~$CC -shared $pic_flag $wl+b $wl$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' - else - archive_cmds='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' - fi - hardcode_libdir_flag_spec='$wl+b $wl$libdir' - hardcode_libdir_separator=: - hardcode_direct=yes - - # hardcode_minus_L: Not really in the search PATH, - # but as the default location of the library. - hardcode_minus_L=yes - export_dynamic_flag_spec='$wl-E' - ;; - - hpux10*) - if test yes,no = "$GCC,$with_gnu_ld"; then - archive_cmds='$CC -shared $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags' - else - archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' - fi - if test no = "$with_gnu_ld"; then - hardcode_libdir_flag_spec='$wl+b $wl$libdir' - hardcode_libdir_separator=: - hardcode_direct=yes - hardcode_direct_absolute=yes - export_dynamic_flag_spec='$wl-E' - # hardcode_minus_L: Not really in the search PATH, - # but as the default location of the library. - hardcode_minus_L=yes - fi - ;; - - hpux11*) - if test yes,no = "$GCC,$with_gnu_ld"; then - case $host_cpu in - hppa*64*) - archive_cmds='$CC -shared $wl+h $wl$soname -o $lib $libobjs $deplibs $compiler_flags' - ;; - ia64*) - archive_cmds='$CC -shared $pic_flag $wl+h $wl$soname $wl+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' - ;; - *) - archive_cmds='$CC -shared $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags' - ;; - esac - else - case $host_cpu in - hppa*64*) - archive_cmds='$CC -b $wl+h $wl$soname -o $lib $libobjs $deplibs $compiler_flags' - ;; - ia64*) - archive_cmds='$CC -b $wl+h $wl$soname $wl+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' - ;; - *) - - # Older versions of the 11.00 compiler do not understand -b yet - # (HP92453-01 A.11.01.20 doesn't, HP92453-01 B.11.X.35175-35176.GP does) - { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CC understands -b" >&5 -$as_echo_n "checking if $CC understands -b... " >&6; } -if ${lt_cv_prog_compiler__b+:} false; then : - $as_echo_n "(cached) " >&6 -else - lt_cv_prog_compiler__b=no - save_LDFLAGS=$LDFLAGS - LDFLAGS="$LDFLAGS -b" - echo "$lt_simple_link_test_code" > conftest.$ac_ext - if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then - # The linker can only warn and ignore the option if not recognized - # So say no if there are warnings - if test -s conftest.err; then - # Append any errors to the config.log. - cat conftest.err 1>&5 - $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp - $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 - if diff conftest.exp conftest.er2 >/dev/null; then - lt_cv_prog_compiler__b=yes - fi - else - lt_cv_prog_compiler__b=yes - fi - fi - $RM -r conftest* - LDFLAGS=$save_LDFLAGS - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler__b" >&5 -$as_echo "$lt_cv_prog_compiler__b" >&6; } - -if test yes = "$lt_cv_prog_compiler__b"; then - archive_cmds='$CC -b $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags' -else - archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' -fi - - ;; - esac - fi - if test no = "$with_gnu_ld"; then - hardcode_libdir_flag_spec='$wl+b $wl$libdir' - hardcode_libdir_separator=: - - case $host_cpu in - hppa*64*|ia64*) - hardcode_direct=no - hardcode_shlibpath_var=no - ;; - *) - hardcode_direct=yes - hardcode_direct_absolute=yes - export_dynamic_flag_spec='$wl-E' - - # hardcode_minus_L: Not really in the search PATH, - # but as the default location of the library. - hardcode_minus_L=yes - ;; - esac - fi - ;; - - irix5* | irix6* | nonstopux*) - if test yes = "$GCC"; then - archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' - # Try to use the -exported_symbol ld option, if it does not - # work, assume that -exports_file does not work either and - # implicitly export all symbols. - # This should be the same for all languages, so no per-tag cache variable. - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $host_os linker accepts -exported_symbol" >&5 -$as_echo_n "checking whether the $host_os linker accepts -exported_symbol... " >&6; } -if ${lt_cv_irix_exported_symbol+:} false; then : - $as_echo_n "(cached) " >&6 -else - save_LDFLAGS=$LDFLAGS - LDFLAGS="$LDFLAGS -shared $wl-exported_symbol ${wl}foo $wl-update_registry $wl/dev/null" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -int foo (void) { return 0; } -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - lt_cv_irix_exported_symbol=yes -else - lt_cv_irix_exported_symbol=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext - LDFLAGS=$save_LDFLAGS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_irix_exported_symbol" >&5 -$as_echo "$lt_cv_irix_exported_symbol" >&6; } - if test yes = "$lt_cv_irix_exported_symbol"; then - archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations $wl-exports_file $wl$export_symbols -o $lib' - fi - link_all_deplibs=no - else - archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' - archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -exports_file $export_symbols -o $lib' - fi - archive_cmds_need_lc='no' - hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' - hardcode_libdir_separator=: - inherit_rpath=yes - link_all_deplibs=yes - ;; - - linux*) - case $cc_basename in - tcc*) - # Fabrice Bellard et al's Tiny C Compiler - ld_shlibs=yes - archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' - ;; - esac - ;; - - netbsd* | netbsdelf*-gnu) - if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then - archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out - else - archive_cmds='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF - fi - hardcode_libdir_flag_spec='-R$libdir' - hardcode_direct=yes - hardcode_shlibpath_var=no - ;; - - newsos6) - archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - hardcode_direct=yes - hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' - hardcode_libdir_separator=: - hardcode_shlibpath_var=no - ;; - - *nto* | *qnx*) - ;; - - openbsd* | bitrig*) - if test -f /usr/libexec/ld.so; then - hardcode_direct=yes - hardcode_shlibpath_var=no - hardcode_direct_absolute=yes - if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then - archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' - archive_expsym_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags $wl-retain-symbols-file,$export_symbols' - hardcode_libdir_flag_spec='$wl-rpath,$libdir' - export_dynamic_flag_spec='$wl-E' - else - archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' - hardcode_libdir_flag_spec='$wl-rpath,$libdir' - fi - else - ld_shlibs=no - fi - ;; - - os2*) - hardcode_libdir_flag_spec='-L$libdir' - hardcode_minus_L=yes - allow_undefined_flag=unsupported - shrext_cmds=.dll - archive_cmds='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ - $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ - $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ - $ECHO EXPORTS >> $output_objdir/$libname.def~ - emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~ - $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ - emximp -o $lib $output_objdir/$libname.def' - archive_expsym_cmds='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ - $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ - $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ - $ECHO EXPORTS >> $output_objdir/$libname.def~ - prefix_cmds="$SED"~ - if test EXPORTS = "`$SED 1q $export_symbols`"; then - prefix_cmds="$prefix_cmds -e 1d"; - fi~ - prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~ - cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~ - $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ - emximp -o $lib $output_objdir/$libname.def' - old_archive_From_new_cmds='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def' - enable_shared_with_static_runtimes=yes - ;; - - osf3*) - if test yes = "$GCC"; then - allow_undefined_flag=' $wl-expect_unresolved $wl\*' - archive_cmds='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' - else - allow_undefined_flag=' -expect_unresolved \*' - archive_cmds='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' - fi - archive_cmds_need_lc='no' - hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' - hardcode_libdir_separator=: - ;; - - osf4* | osf5*) # as osf3* with the addition of -msym flag - if test yes = "$GCC"; then - allow_undefined_flag=' $wl-expect_unresolved $wl\*' - archive_cmds='$CC -shared$allow_undefined_flag $pic_flag $libobjs $deplibs $compiler_flags $wl-msym $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' - hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' - else - allow_undefined_flag=' -expect_unresolved \*' - archive_cmds='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' - archive_expsym_cmds='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; printf "%s\\n" "-hidden">> $lib.exp~ - $CC -shared$allow_undefined_flag $wl-input $wl$lib.exp $compiler_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib~$RM $lib.exp' - - # Both c and cxx compiler support -rpath directly - hardcode_libdir_flag_spec='-rpath $libdir' - fi - archive_cmds_need_lc='no' - hardcode_libdir_separator=: - ;; - - solaris*) - no_undefined_flag=' -z defs' - if test yes = "$GCC"; then - wlarc='$wl' - archive_cmds='$CC -shared $pic_flag $wl-z ${wl}text $wl-h $wl$soname -o $lib $libobjs $deplibs $compiler_flags' - archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ - $CC -shared $pic_flag $wl-z ${wl}text $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' - else - case `$CC -V 2>&1` in - *"Compilers 5.0"*) - wlarc='' - archive_cmds='$LD -G$allow_undefined_flag -h $soname -o $lib $libobjs $deplibs $linker_flags' - archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ - $LD -G$allow_undefined_flag -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$RM $lib.exp' - ;; - *) - wlarc='$wl' - archive_cmds='$CC -G$allow_undefined_flag -h $soname -o $lib $libobjs $deplibs $compiler_flags' - archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ - $CC -G$allow_undefined_flag -M $lib.exp -h $soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' - ;; - esac - fi - hardcode_libdir_flag_spec='-R$libdir' - hardcode_shlibpath_var=no - case $host_os in - solaris2.[0-5] | solaris2.[0-5].*) ;; - *) - # The compiler driver will combine and reorder linker options, - # but understands '-z linker_flag'. GCC discards it without '$wl', - # but is careful enough not to reorder. - # Supported since Solaris 2.6 (maybe 2.5.1?) - if test yes = "$GCC"; then - whole_archive_flag_spec='$wl-z ${wl}allextract$convenience $wl-z ${wl}defaultextract' - else - whole_archive_flag_spec='-z allextract$convenience -z defaultextract' - fi - ;; - esac - link_all_deplibs=yes - ;; - - sunos4*) - if test sequent = "$host_vendor"; then - # Use $CC to link under sequent, because it throws in some extra .o - # files that make .init and .fini sections work. - archive_cmds='$CC -G $wl-h $soname -o $lib $libobjs $deplibs $compiler_flags' - else - archive_cmds='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags' - fi - hardcode_libdir_flag_spec='-L$libdir' - hardcode_direct=yes - hardcode_minus_L=yes - hardcode_shlibpath_var=no - ;; - - sysv4) - case $host_vendor in - sni) - archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - hardcode_direct=yes # is this really true??? - ;; - siemens) - ## LD is ld it makes a PLAMLIB - ## CC just makes a GrossModule. - archive_cmds='$LD -G -o $lib $libobjs $deplibs $linker_flags' - reload_cmds='$CC -r -o $output$reload_objs' - hardcode_direct=no - ;; - motorola) - archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - hardcode_direct=no #Motorola manual says yes, but my tests say they lie - ;; - esac - runpath_var='LD_RUN_PATH' - hardcode_shlibpath_var=no - ;; - - sysv4.3*) - archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - hardcode_shlibpath_var=no - export_dynamic_flag_spec='-Bexport' - ;; - - sysv4*MP*) - if test -d /usr/nec; then - archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - hardcode_shlibpath_var=no - runpath_var=LD_RUN_PATH - hardcode_runpath_var=yes - ld_shlibs=yes - fi - ;; - - sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*) - no_undefined_flag='$wl-z,text' - archive_cmds_need_lc=no - hardcode_shlibpath_var=no - runpath_var='LD_RUN_PATH' - - if test yes = "$GCC"; then - archive_cmds='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - archive_expsym_cmds='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - else - archive_cmds='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - archive_expsym_cmds='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - fi - ;; - - sysv5* | sco3.2v5* | sco5v6*) - # Note: We CANNOT use -z defs as we might desire, because we do not - # link with -lc, and that would cause any symbols used from libc to - # always be unresolved, which means just about no library would - # ever link correctly. If we're not using GNU ld we use -z text - # though, which does catch some bad symbols but isn't as heavy-handed - # as -z defs. - no_undefined_flag='$wl-z,text' - allow_undefined_flag='$wl-z,nodefs' - archive_cmds_need_lc=no - hardcode_shlibpath_var=no - hardcode_libdir_flag_spec='$wl-R,$libdir' - hardcode_libdir_separator=':' - link_all_deplibs=yes - export_dynamic_flag_spec='$wl-Bexport' - runpath_var='LD_RUN_PATH' - - if test yes = "$GCC"; then - archive_cmds='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - archive_expsym_cmds='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - else - archive_cmds='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - archive_expsym_cmds='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' - fi - ;; - - uts4*) - archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' - hardcode_libdir_flag_spec='-L$libdir' - hardcode_shlibpath_var=no - ;; - - *) - ld_shlibs=no - ;; - esac - - if test sni = "$host_vendor"; then - case $host in - sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) - export_dynamic_flag_spec='$wl-Blargedynsym' - ;; - esac - fi - fi - -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs" >&5 -$as_echo "$ld_shlibs" >&6; } -test no = "$ld_shlibs" && can_build_shared=no - -with_gnu_ld=$with_gnu_ld - - - - - - - - - - - - - - - -# -# Do we need to explicitly link libc? -# -case "x$archive_cmds_need_lc" in -x|xyes) - # Assume -lc should be added - archive_cmds_need_lc=yes - - if test yes,yes = "$GCC,$enable_shared"; then - case $archive_cmds in - *'~'*) - # FIXME: we may have to deal with multi-command sequences. - ;; - '$CC '*) - # Test whether the compiler implicitly links with -lc since on some - # systems, -lgcc has to come before -lc. If gcc already passes -lc - # to ld, don't add -lc before -lgcc. - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether -lc should be explicitly linked in" >&5 -$as_echo_n "checking whether -lc should be explicitly linked in... " >&6; } -if ${lt_cv_archive_cmds_need_lc+:} false; then : - $as_echo_n "(cached) " >&6 -else - $RM conftest* - echo "$lt_simple_compile_test_code" > conftest.$ac_ext - - if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 - (eval $ac_compile) 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } 2>conftest.err; then - soname=conftest - lib=conftest - libobjs=conftest.$ac_objext - deplibs= - wl=$lt_prog_compiler_wl - pic_flag=$lt_prog_compiler_pic - compiler_flags=-v - linker_flags=-v - verstring= - output_objdir=. - libname=conftest - lt_save_allow_undefined_flag=$allow_undefined_flag - allow_undefined_flag= - if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1\""; } >&5 - (eval $archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } - then - lt_cv_archive_cmds_need_lc=no - else - lt_cv_archive_cmds_need_lc=yes - fi - allow_undefined_flag=$lt_save_allow_undefined_flag - else - cat conftest.err 1>&5 - fi - $RM conftest* - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_archive_cmds_need_lc" >&5 -$as_echo "$lt_cv_archive_cmds_need_lc" >&6; } - archive_cmds_need_lc=$lt_cv_archive_cmds_need_lc - ;; - esac - fi - ;; -esac - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking dynamic linker characteristics" >&5 -$as_echo_n "checking dynamic linker characteristics... " >&6; } - -if test yes = "$GCC"; then - case $host_os in - darwin*) lt_awk_arg='/^libraries:/,/LR/' ;; - *) lt_awk_arg='/^libraries:/' ;; - esac - case $host_os in - mingw* | cegcc*) lt_sed_strip_eq='s|=\([A-Za-z]:\)|\1|g' ;; - *) lt_sed_strip_eq='s|=/|/|g' ;; - esac - lt_search_path_spec=`$CC -print-search-dirs | awk $lt_awk_arg | $SED -e "s/^libraries://" -e $lt_sed_strip_eq` - case $lt_search_path_spec in - *\;*) - # if the path contains ";" then we assume it to be the separator - # otherwise default to the standard path separator (i.e. ":") - it is - # assumed that no part of a normal pathname contains ";" but that should - # okay in the real world where ";" in dirpaths is itself problematic. - lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED 's/;/ /g'` - ;; - *) - lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED "s/$PATH_SEPARATOR/ /g"` - ;; - esac - # Ok, now we have the path, separated by spaces, we can step through it - # and add multilib dir if necessary... - lt_tmp_lt_search_path_spec= - lt_multi_os_dir=/`$CC $CPPFLAGS $CFLAGS $LDFLAGS -print-multi-os-directory 2>/dev/null` - # ...but if some path component already ends with the multilib dir we assume - # that all is fine and trust -print-search-dirs as is (GCC 4.2? or newer). - case "$lt_multi_os_dir; $lt_search_path_spec " in - "/; "* | "/.; "* | "/./; "* | *"$lt_multi_os_dir "* | *"$lt_multi_os_dir/ "*) - lt_multi_os_dir= - ;; - esac - for lt_sys_path in $lt_search_path_spec; do - if test -d "$lt_sys_path$lt_multi_os_dir"; then - lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path$lt_multi_os_dir" - elif test -n "$lt_multi_os_dir"; then - test -d "$lt_sys_path" && \ - lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path" - fi - done - lt_search_path_spec=`$ECHO "$lt_tmp_lt_search_path_spec" | awk ' -BEGIN {RS = " "; FS = "/|\n";} { - lt_foo = ""; - lt_count = 0; - for (lt_i = NF; lt_i > 0; lt_i--) { - if ($lt_i != "" && $lt_i != ".") { - if ($lt_i == "..") { - lt_count++; - } else { - if (lt_count == 0) { - lt_foo = "/" $lt_i lt_foo; - } else { - lt_count--; - } - } - } - } - if (lt_foo != "") { lt_freq[lt_foo]++; } - if (lt_freq[lt_foo] == 1) { print lt_foo; } -}'` - # AWK program above erroneously prepends '/' to C:/dos/paths - # for these hosts. - case $host_os in - mingw* | cegcc*) lt_search_path_spec=`$ECHO "$lt_search_path_spec" |\ - $SED 's|/\([A-Za-z]:\)|\1|g'` ;; - esac - sys_lib_search_path_spec=`$ECHO "$lt_search_path_spec" | $lt_NL2SP` -else - sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" -fi -library_names_spec= -libname_spec='lib$name' -soname_spec= -shrext_cmds=.so -postinstall_cmds= -postuninstall_cmds= -finish_cmds= -finish_eval= -shlibpath_var= -shlibpath_overrides_runpath=unknown -version_type=none -dynamic_linker="$host_os ld.so" -sys_lib_dlsearch_path_spec="/lib /usr/lib" -need_lib_prefix=unknown -hardcode_into_libs=no - -# when you set need_version to no, make sure it does not cause -set_version -# flags to be left without arguments -need_version=unknown - - - -case $host_os in -aix3*) - version_type=linux # correct to gnu/linux during the next big refactor - library_names_spec='$libname$release$shared_ext$versuffix $libname.a' - shlibpath_var=LIBPATH - - # AIX 3 has no versioning support, so we append a major version to the name. - soname_spec='$libname$release$shared_ext$major' - ;; - -aix[4-9]*) - version_type=linux # correct to gnu/linux during the next big refactor - need_lib_prefix=no - need_version=no - hardcode_into_libs=yes - if test ia64 = "$host_cpu"; then - # AIX 5 supports IA64 - library_names_spec='$libname$release$shared_ext$major $libname$release$shared_ext$versuffix $libname$shared_ext' - shlibpath_var=LD_LIBRARY_PATH - else - # With GCC up to 2.95.x, collect2 would create an import file - # for dependence libraries. The import file would start with - # the line '#! .'. This would cause the generated library to - # depend on '.', always an invalid library. This was fixed in - # development snapshots of GCC prior to 3.0. - case $host_os in - aix4 | aix4.[01] | aix4.[01].*) - if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' - echo ' yes ' - echo '#endif'; } | $CC -E - | $GREP yes > /dev/null; then - : - else - can_build_shared=no - fi - ;; - esac - # Using Import Files as archive members, it is possible to support - # filename-based versioning of shared library archives on AIX. While - # this would work for both with and without runtime linking, it will - # prevent static linking of such archives. So we do filename-based - # shared library versioning with .so extension only, which is used - # when both runtime linking and shared linking is enabled. - # Unfortunately, runtime linking may impact performance, so we do - # not want this to be the default eventually. Also, we use the - # versioned .so libs for executables only if there is the -brtl - # linker flag in LDFLAGS as well, or --with-aix-soname=svr4 only. - # To allow for filename-based versioning support, we need to create - # libNAME.so.V as an archive file, containing: - # *) an Import File, referring to the versioned filename of the - # archive as well as the shared archive member, telling the - # bitwidth (32 or 64) of that shared object, and providing the - # list of exported symbols of that shared object, eventually - # decorated with the 'weak' keyword - # *) the shared object with the F_LOADONLY flag set, to really avoid - # it being seen by the linker. - # At run time we better use the real file rather than another symlink, - # but for link time we create the symlink libNAME.so -> libNAME.so.V - - case $with_aix_soname,$aix_use_runtimelinking in - # AIX (on Power*) has no versioning support, so currently we cannot hardcode correct - # soname into executable. Probably we can add versioning support to - # collect2, so additional links can be useful in future. - aix,yes) # traditional libtool - dynamic_linker='AIX unversionable lib.so' - # If using run time linking (on AIX 4.2 or later) use lib.so - # instead of lib.a to let people know that these are not - # typical AIX shared libraries. - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' - ;; - aix,no) # traditional AIX only - dynamic_linker='AIX lib.a(lib.so.V)' - # We preserve .a as extension for shared libraries through AIX4.2 - # and later when we are not doing run time linking. - library_names_spec='$libname$release.a $libname.a' - soname_spec='$libname$release$shared_ext$major' - ;; - svr4,*) # full svr4 only - dynamic_linker="AIX lib.so.V($shared_archive_member_spec.o)" - library_names_spec='$libname$release$shared_ext$major $libname$shared_ext' - # We do not specify a path in Import Files, so LIBPATH fires. - shlibpath_overrides_runpath=yes - ;; - *,yes) # both, prefer svr4 - dynamic_linker="AIX lib.so.V($shared_archive_member_spec.o), lib.a(lib.so.V)" - library_names_spec='$libname$release$shared_ext$major $libname$shared_ext' - # unpreferred sharedlib libNAME.a needs extra handling - postinstall_cmds='test -n "$linkname" || linkname="$realname"~func_stripname "" ".so" "$linkname"~$install_shared_prog "$dir/$func_stripname_result.$libext" "$destdir/$func_stripname_result.$libext"~test -z "$tstripme" || test -z "$striplib" || $striplib "$destdir/$func_stripname_result.$libext"' - postuninstall_cmds='for n in $library_names $old_library; do :; done~func_stripname "" ".so" "$n"~test "$func_stripname_result" = "$n" || func_append rmfiles " $odir/$func_stripname_result.$libext"' - # We do not specify a path in Import Files, so LIBPATH fires. - shlibpath_overrides_runpath=yes - ;; - *,no) # both, prefer aix - dynamic_linker="AIX lib.a(lib.so.V), lib.so.V($shared_archive_member_spec.o)" - library_names_spec='$libname$release.a $libname.a' - soname_spec='$libname$release$shared_ext$major' - # unpreferred sharedlib libNAME.so.V and symlink libNAME.so need extra handling - postinstall_cmds='test -z "$dlname" || $install_shared_prog $dir/$dlname $destdir/$dlname~test -z "$tstripme" || test -z "$striplib" || $striplib $destdir/$dlname~test -n "$linkname" || linkname=$realname~func_stripname "" ".a" "$linkname"~(cd "$destdir" && $LN_S -f $dlname $func_stripname_result.so)' - postuninstall_cmds='test -z "$dlname" || func_append rmfiles " $odir/$dlname"~for n in $old_library $library_names; do :; done~func_stripname "" ".a" "$n"~func_append rmfiles " $odir/$func_stripname_result.so"' - ;; - esac - shlibpath_var=LIBPATH - fi - ;; - -amigaos*) - case $host_cpu in - powerpc) - # Since July 2007 AmigaOS4 officially supports .so libraries. - # When compiling the executable, add -use-dynld -Lsobjs: to the compileline. - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' - ;; - m68k) - library_names_spec='$libname.ixlibrary $libname.a' - # Create ${libname}_ixlibrary.a entries in /sys/libs. - finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' - ;; - esac - ;; - -beos*) - library_names_spec='$libname$shared_ext' - dynamic_linker="$host_os ld.so" - shlibpath_var=LIBRARY_PATH - ;; - -bsdi[45]*) - version_type=linux # correct to gnu/linux during the next big refactor - need_version=no - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' - soname_spec='$libname$release$shared_ext$major' - finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' - shlibpath_var=LD_LIBRARY_PATH - sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" - sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" - # the default ld.so.conf also contains /usr/contrib/lib and - # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow - # libtool to hard-code these into programs - ;; - -cygwin* | mingw* | pw32* | cegcc*) - version_type=windows - shrext_cmds=.dll - need_version=no - need_lib_prefix=no - - case $GCC,$cc_basename in - yes,*) - # gcc - library_names_spec='$libname.dll.a' - # DLL is installed to $(libdir)/../bin by postinstall_cmds - postinstall_cmds='base_file=`basename \$file`~ - dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~ - dldir=$destdir/`dirname \$dlpath`~ - test -d \$dldir || mkdir -p \$dldir~ - $install_prog $dir/$dlname \$dldir/$dlname~ - chmod a+x \$dldir/$dlname~ - if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then - eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; - fi' - postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ - dlpath=$dir/\$dldll~ - $RM \$dlpath' - shlibpath_overrides_runpath=yes - - case $host_os in - cygwin*) - # Cygwin DLLs use 'cyg' prefix rather than 'lib' - soname_spec='`echo $libname | sed -e 's/^lib/cyg/'``echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' - - sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/lib/w32api" - ;; - mingw* | cegcc*) - # MinGW DLLs use traditional 'lib' prefix - soname_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' - ;; - pw32*) - # pw32 DLLs use 'pw' prefix rather than 'lib' - library_names_spec='`echo $libname | sed -e 's/^lib/pw/'``echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' - ;; - esac - dynamic_linker='Win32 ld.exe' - ;; - - *,cl*) - # Native MSVC - libname_spec='$name' - soname_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' - library_names_spec='$libname.dll.lib' - - case $build_os in - mingw*) - sys_lib_search_path_spec= - lt_save_ifs=$IFS - IFS=';' - for lt_path in $LIB - do - IFS=$lt_save_ifs - # Let DOS variable expansion print the short 8.3 style file name. - lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` - sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" - done - IFS=$lt_save_ifs - # Convert to MSYS style. - sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'` - ;; - cygwin*) - # Convert to unix form, then to dos form, then back to unix form - # but this time dos style (no spaces!) so that the unix form looks - # like /cygdrive/c/PROGRA~1:/cygdr... - sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` - sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` - sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` - ;; - *) - sys_lib_search_path_spec=$LIB - if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then - # It is most probably a Windows format PATH. - sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` - else - sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` - fi - # FIXME: find the short name or the path components, as spaces are - # common. (e.g. "Program Files" -> "PROGRA~1") - ;; - esac - - # DLL is installed to $(libdir)/../bin by postinstall_cmds - postinstall_cmds='base_file=`basename \$file`~ - dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~ - dldir=$destdir/`dirname \$dlpath`~ - test -d \$dldir || mkdir -p \$dldir~ - $install_prog $dir/$dlname \$dldir/$dlname' - postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ - dlpath=$dir/\$dldll~ - $RM \$dlpath' - shlibpath_overrides_runpath=yes - dynamic_linker='Win32 link.exe' - ;; - - *) - # Assume MSVC wrapper - library_names_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext $libname.lib' - dynamic_linker='Win32 ld.exe' - ;; - esac - # FIXME: first we should search . and the directory the executable is in - shlibpath_var=PATH - ;; - -darwin* | rhapsody*) - dynamic_linker="$host_os dyld" - version_type=darwin - need_lib_prefix=no - need_version=no - library_names_spec='$libname$release$major$shared_ext $libname$shared_ext' - soname_spec='$libname$release$major$shared_ext' - shlibpath_overrides_runpath=yes - shlibpath_var=DYLD_LIBRARY_PATH - shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' - - sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/local/lib" - sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' - ;; - -dgux*) - version_type=linux # correct to gnu/linux during the next big refactor - need_lib_prefix=no - need_version=no - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' - soname_spec='$libname$release$shared_ext$major' - shlibpath_var=LD_LIBRARY_PATH - ;; - -freebsd* | dragonfly*) - # DragonFly does not have aout. When/if they implement a new - # versioning mechanism, adjust this. - if test -x /usr/bin/objformat; then - objformat=`/usr/bin/objformat` - else - case $host_os in - freebsd[23].*) objformat=aout ;; - *) objformat=elf ;; - esac - fi - version_type=freebsd-$objformat - case $version_type in - freebsd-elf*) - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' - soname_spec='$libname$release$shared_ext$major' - need_version=no - need_lib_prefix=no - ;; - freebsd-*) - library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' - need_version=yes - ;; - esac - shlibpath_var=LD_LIBRARY_PATH - case $host_os in - freebsd2.*) - shlibpath_overrides_runpath=yes - ;; - freebsd3.[01]* | freebsdelf3.[01]*) - shlibpath_overrides_runpath=yes - hardcode_into_libs=yes - ;; - freebsd3.[2-9]* | freebsdelf3.[2-9]* | \ - freebsd4.[0-5] | freebsdelf4.[0-5] | freebsd4.1.1 | freebsdelf4.1.1) - shlibpath_overrides_runpath=no - hardcode_into_libs=yes - ;; - *) # from 4.6 on, and DragonFly - shlibpath_overrides_runpath=yes - hardcode_into_libs=yes - ;; - esac - ;; - -haiku*) - version_type=linux # correct to gnu/linux during the next big refactor - need_lib_prefix=no - need_version=no - dynamic_linker="$host_os runtime_loader" - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' - soname_spec='$libname$release$shared_ext$major' - shlibpath_var=LIBRARY_PATH - shlibpath_overrides_runpath=no - sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' - hardcode_into_libs=yes - ;; - -hpux9* | hpux10* | hpux11*) - # Give a soname corresponding to the major version so that dld.sl refuses to - # link against other versions. - version_type=sunos - need_lib_prefix=no - need_version=no - case $host_cpu in - ia64*) - shrext_cmds='.so' - hardcode_into_libs=yes - dynamic_linker="$host_os dld.so" - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' - soname_spec='$libname$release$shared_ext$major' - if test 32 = "$HPUX_IA64_MODE"; then - sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" - sys_lib_dlsearch_path_spec=/usr/lib/hpux32 - else - sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" - sys_lib_dlsearch_path_spec=/usr/lib/hpux64 - fi - ;; - hppa*64*) - shrext_cmds='.sl' - hardcode_into_libs=yes - dynamic_linker="$host_os dld.sl" - shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH - shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' - soname_spec='$libname$release$shared_ext$major' - sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" - sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec - ;; - *) - shrext_cmds='.sl' - dynamic_linker="$host_os dld.sl" - shlibpath_var=SHLIB_PATH - shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' - soname_spec='$libname$release$shared_ext$major' - ;; - esac - # HP-UX runs *really* slowly unless shared libraries are mode 555, ... - postinstall_cmds='chmod 555 $lib' - # or fails outright, so override atomically: - install_override_mode=555 - ;; - -interix[3-9]*) - version_type=linux # correct to gnu/linux during the next big refactor - need_lib_prefix=no - need_version=no - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' - soname_spec='$libname$release$shared_ext$major' - dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=no - hardcode_into_libs=yes - ;; - -irix5* | irix6* | nonstopux*) - case $host_os in - nonstopux*) version_type=nonstopux ;; - *) - if test yes = "$lt_cv_prog_gnu_ld"; then - version_type=linux # correct to gnu/linux during the next big refactor - else - version_type=irix - fi ;; - esac - need_lib_prefix=no - need_version=no - soname_spec='$libname$release$shared_ext$major' - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$release$shared_ext $libname$shared_ext' - case $host_os in - irix5* | nonstopux*) - libsuff= shlibsuff= - ;; - *) - case $LD in # libtool.m4 will add one of these switches to LD - *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") - libsuff= shlibsuff= libmagic=32-bit;; - *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") - libsuff=32 shlibsuff=N32 libmagic=N32;; - *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") - libsuff=64 shlibsuff=64 libmagic=64-bit;; - *) libsuff= shlibsuff= libmagic=never-match;; - esac - ;; - esac - shlibpath_var=LD_LIBRARY${shlibsuff}_PATH - shlibpath_overrides_runpath=no - sys_lib_search_path_spec="/usr/lib$libsuff /lib$libsuff /usr/local/lib$libsuff" - sys_lib_dlsearch_path_spec="/usr/lib$libsuff /lib$libsuff" - hardcode_into_libs=yes - ;; - -# No shared lib support for Linux oldld, aout, or coff. -linux*oldld* | linux*aout* | linux*coff*) - dynamic_linker=no - ;; - -linux*android*) - version_type=none # Android doesn't support versioned libraries. - need_lib_prefix=no - need_version=no - library_names_spec='$libname$release$shared_ext' - soname_spec='$libname$release$shared_ext' - finish_cmds= - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes - - # This implies no fast_install, which is unacceptable. - # Some rework will be needed to allow for fast_install - # before this can be enabled. - hardcode_into_libs=yes - - dynamic_linker='Android linker' - # Don't embed -rpath directories since the linker doesn't support them. - hardcode_libdir_flag_spec='-L$libdir' - ;; - -# This must be glibc/ELF. -linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) - version_type=linux # correct to gnu/linux during the next big refactor - need_lib_prefix=no - need_version=no - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' - soname_spec='$libname$release$shared_ext$major' - finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=no - - # Some binutils ld are patched to set DT_RUNPATH - if ${lt_cv_shlibpath_overrides_runpath+:} false; then : - $as_echo_n "(cached) " >&6 -else - lt_cv_shlibpath_overrides_runpath=no - save_LDFLAGS=$LDFLAGS - save_libdir=$libdir - eval "libdir=/foo; wl=\"$lt_prog_compiler_wl\"; \ - LDFLAGS=\"\$LDFLAGS $hardcode_libdir_flag_spec\"" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - if ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null; then : - lt_cv_shlibpath_overrides_runpath=yes -fi -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext - LDFLAGS=$save_LDFLAGS - libdir=$save_libdir - -fi - - shlibpath_overrides_runpath=$lt_cv_shlibpath_overrides_runpath - - # This implies no fast_install, which is unacceptable. - # Some rework will be needed to allow for fast_install - # before this can be enabled. - hardcode_into_libs=yes - - # Ideally, we could use ldconfig to report *all* directores which are - # searched for libraries, however this is still not possible. Aside from not - # being certain /sbin/ldconfig is available, command - # 'ldconfig -N -X -v | grep ^/' on 64bit Fedora does not report /usr/lib64, - # even though it is searched at run-time. Try to do the best guess by - # appending ld.so.conf contents (and includes) to the search path. - if test -f /etc/ld.so.conf; then - lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '` - sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra" - fi - - # We used to test for /lib/ld.so.1 and disable shared libraries on - # powerpc, because MkLinux only supported shared libraries with the - # GNU dynamic linker. Since this was broken with cross compilers, - # most powerpc-linux boxes support dynamic linking these days and - # people can always --disable-shared, the test was removed, and we - # assume the GNU/Linux dynamic linker is in use. - dynamic_linker='GNU/Linux ld.so' - ;; - -netbsdelf*-gnu) - version_type=linux - need_lib_prefix=no - need_version=no - library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' - soname_spec='${libname}${release}${shared_ext}$major' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=no - hardcode_into_libs=yes - dynamic_linker='NetBSD ld.elf_so' - ;; - -netbsd*) - version_type=sunos - need_lib_prefix=no - need_version=no - if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then - library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' - finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' - dynamic_linker='NetBSD (a.out) ld.so' - else - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' - soname_spec='$libname$release$shared_ext$major' - dynamic_linker='NetBSD ld.elf_so' - fi - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes - hardcode_into_libs=yes - ;; - -newsos6) - version_type=linux # correct to gnu/linux during the next big refactor - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes - ;; - -*nto* | *qnx*) - version_type=qnx - need_lib_prefix=no - need_version=no - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' - soname_spec='$libname$release$shared_ext$major' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=no - hardcode_into_libs=yes - dynamic_linker='ldqnx.so' - ;; - -openbsd* | bitrig*) - version_type=sunos - sys_lib_dlsearch_path_spec=/usr/lib - need_lib_prefix=no - if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then - need_version=no - else - need_version=yes - fi - library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' - finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes - ;; - -os2*) - libname_spec='$name' - version_type=windows - shrext_cmds=.dll - need_version=no - need_lib_prefix=no - # OS/2 can only load a DLL with a base name of 8 characters or less. - soname_spec='`test -n "$os2dllname" && libname="$os2dllname"; - v=$($ECHO $release$versuffix | tr -d .-); - n=$($ECHO $libname | cut -b -$((8 - ${#v})) | tr . _); - $ECHO $n$v`$shared_ext' - library_names_spec='${libname}_dll.$libext' - dynamic_linker='OS/2 ld.exe' - shlibpath_var=BEGINLIBPATH - sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" - sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec - postinstall_cmds='base_file=`basename \$file`~ - dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; $ECHO \$dlname'\''`~ - dldir=$destdir/`dirname \$dlpath`~ - test -d \$dldir || mkdir -p \$dldir~ - $install_prog $dir/$dlname \$dldir/$dlname~ - chmod a+x \$dldir/$dlname~ - if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then - eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; - fi' - postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; $ECHO \$dlname'\''`~ - dlpath=$dir/\$dldll~ - $RM \$dlpath' - ;; - -osf3* | osf4* | osf5*) - version_type=osf - need_lib_prefix=no - need_version=no - soname_spec='$libname$release$shared_ext$major' - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' - shlibpath_var=LD_LIBRARY_PATH - sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" - sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec - ;; - -rdos*) - dynamic_linker=no - ;; - -solaris*) - version_type=linux # correct to gnu/linux during the next big refactor - need_lib_prefix=no - need_version=no - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' - soname_spec='$libname$release$shared_ext$major' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes - hardcode_into_libs=yes - # ldd complains unless libraries are executable - postinstall_cmds='chmod +x $lib' - ;; - -sunos4*) - version_type=sunos - library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' - finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes - if test yes = "$with_gnu_ld"; then - need_lib_prefix=no - fi - need_version=yes - ;; - -sysv4 | sysv4.3*) - version_type=linux # correct to gnu/linux during the next big refactor - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' - soname_spec='$libname$release$shared_ext$major' - shlibpath_var=LD_LIBRARY_PATH - case $host_vendor in - sni) - shlibpath_overrides_runpath=no - need_lib_prefix=no - runpath_var=LD_RUN_PATH - ;; - siemens) - need_lib_prefix=no - ;; - motorola) - need_lib_prefix=no - need_version=no - shlibpath_overrides_runpath=no - sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' - ;; - esac - ;; - -sysv4*MP*) - if test -d /usr/nec; then - version_type=linux # correct to gnu/linux during the next big refactor - library_names_spec='$libname$shared_ext.$versuffix $libname$shared_ext.$major $libname$shared_ext' - soname_spec='$libname$shared_ext.$major' - shlibpath_var=LD_LIBRARY_PATH - fi - ;; - -sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) - version_type=sco - need_lib_prefix=no - need_version=no - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext $libname$shared_ext' - soname_spec='$libname$release$shared_ext$major' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=yes - hardcode_into_libs=yes - if test yes = "$with_gnu_ld"; then - sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' - else - sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' - case $host_os in - sco3.2v5*) - sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" - ;; - esac - fi - sys_lib_dlsearch_path_spec='/usr/lib' - ;; - -tpf*) - # TPF is a cross-target only. Preferred cross-host = GNU/Linux. - version_type=linux # correct to gnu/linux during the next big refactor - need_lib_prefix=no - need_version=no - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' - shlibpath_var=LD_LIBRARY_PATH - shlibpath_overrides_runpath=no - hardcode_into_libs=yes - ;; - -uts4*) - version_type=linux # correct to gnu/linux during the next big refactor - library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' - soname_spec='$libname$release$shared_ext$major' - shlibpath_var=LD_LIBRARY_PATH - ;; - -*) - dynamic_linker=no - ;; -esac -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $dynamic_linker" >&5 -$as_echo "$dynamic_linker" >&6; } -test no = "$dynamic_linker" && can_build_shared=no - -variables_saved_for_relink="PATH $shlibpath_var $runpath_var" -if test yes = "$GCC"; then - variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" -fi - -if test set = "${lt_cv_sys_lib_search_path_spec+set}"; then - sys_lib_search_path_spec=$lt_cv_sys_lib_search_path_spec -fi - -if test set = "${lt_cv_sys_lib_dlsearch_path_spec+set}"; then - sys_lib_dlsearch_path_spec=$lt_cv_sys_lib_dlsearch_path_spec -fi - -# remember unaugmented sys_lib_dlsearch_path content for libtool script decls... -configure_time_dlsearch_path=$sys_lib_dlsearch_path_spec - -# ... but it needs LT_SYS_LIBRARY_PATH munging for other configure-time code -func_munge_path_list sys_lib_dlsearch_path_spec "$LT_SYS_LIBRARY_PATH" - -# to be used as default LT_SYS_LIBRARY_PATH value in generated libtool -configure_time_lt_sys_library_path=$LT_SYS_LIBRARY_PATH - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to hardcode library paths into programs" >&5 -$as_echo_n "checking how to hardcode library paths into programs... " >&6; } -hardcode_action= -if test -n "$hardcode_libdir_flag_spec" || - test -n "$runpath_var" || - test yes = "$hardcode_automatic"; then - - # We can hardcode non-existent directories. - if test no != "$hardcode_direct" && - # If the only mechanism to avoid hardcoding is shlibpath_var, we - # have to relink, otherwise we might link with an installed library - # when we should be linking with a yet-to-be-installed one - ## test no != "$_LT_TAGVAR(hardcode_shlibpath_var, )" && - test no != "$hardcode_minus_L"; then - # Linking always hardcodes the temporary library directory. - hardcode_action=relink - else - # We can link without hardcoding, and we can hardcode nonexisting dirs. - hardcode_action=immediate - fi -else - # We cannot hardcode anything, or else we can only hardcode existing - # directories. - hardcode_action=unsupported -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $hardcode_action" >&5 -$as_echo "$hardcode_action" >&6; } - -if test relink = "$hardcode_action" || - test yes = "$inherit_rpath"; then - # Fast installation is not supported - enable_fast_install=no -elif test yes = "$shlibpath_overrides_runpath" || - test no = "$enable_shared"; then - # Fast installation is not necessary - enable_fast_install=needless -fi - - - - - - - if test yes != "$enable_dlopen"; then - enable_dlopen=unknown - enable_dlopen_self=unknown - enable_dlopen_self_static=unknown -else - lt_cv_dlopen=no - lt_cv_dlopen_libs= - - case $host_os in - beos*) - lt_cv_dlopen=load_add_on - lt_cv_dlopen_libs= - lt_cv_dlopen_self=yes - ;; - - mingw* | pw32* | cegcc*) - lt_cv_dlopen=LoadLibrary - lt_cv_dlopen_libs= - ;; - - cygwin*) - lt_cv_dlopen=dlopen - lt_cv_dlopen_libs= - ;; - - darwin*) - # if libdl is installed we need to link against it - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5 -$as_echo_n "checking for dlopen in -ldl... " >&6; } -if ${ac_cv_lib_dl_dlopen+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_check_lib_save_LIBS=$LIBS -LIBS="-ldl $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char dlopen (); -int -main () -{ -return dlopen (); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_dl_dlopen=yes -else - ac_cv_lib_dl_dlopen=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5 -$as_echo "$ac_cv_lib_dl_dlopen" >&6; } -if test "x$ac_cv_lib_dl_dlopen" = xyes; then : - lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-ldl -else - - lt_cv_dlopen=dyld - lt_cv_dlopen_libs= - lt_cv_dlopen_self=yes - -fi - - ;; - - tpf*) - # Don't try to run any link tests for TPF. We know it's impossible - # because TPF is a cross-compiler, and we know how we open DSOs. - lt_cv_dlopen=dlopen - lt_cv_dlopen_libs= - lt_cv_dlopen_self=no - ;; - - *) - ac_fn_c_check_func "$LINENO" "shl_load" "ac_cv_func_shl_load" -if test "x$ac_cv_func_shl_load" = xyes; then : - lt_cv_dlopen=shl_load -else - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for shl_load in -ldld" >&5 -$as_echo_n "checking for shl_load in -ldld... " >&6; } -if ${ac_cv_lib_dld_shl_load+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_check_lib_save_LIBS=$LIBS -LIBS="-ldld $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char shl_load (); -int -main () -{ -return shl_load (); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_dld_shl_load=yes -else - ac_cv_lib_dld_shl_load=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_shl_load" >&5 -$as_echo "$ac_cv_lib_dld_shl_load" >&6; } -if test "x$ac_cv_lib_dld_shl_load" = xyes; then : - lt_cv_dlopen=shl_load lt_cv_dlopen_libs=-ldld -else - ac_fn_c_check_func "$LINENO" "dlopen" "ac_cv_func_dlopen" -if test "x$ac_cv_func_dlopen" = xyes; then : - lt_cv_dlopen=dlopen -else - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5 -$as_echo_n "checking for dlopen in -ldl... " >&6; } -if ${ac_cv_lib_dl_dlopen+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_check_lib_save_LIBS=$LIBS -LIBS="-ldl $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char dlopen (); -int -main () -{ -return dlopen (); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_dl_dlopen=yes -else - ac_cv_lib_dl_dlopen=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5 -$as_echo "$ac_cv_lib_dl_dlopen" >&6; } -if test "x$ac_cv_lib_dl_dlopen" = xyes; then : - lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-ldl -else - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -lsvld" >&5 -$as_echo_n "checking for dlopen in -lsvld... " >&6; } -if ${ac_cv_lib_svld_dlopen+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_check_lib_save_LIBS=$LIBS -LIBS="-lsvld $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char dlopen (); -int -main () -{ -return dlopen (); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_svld_dlopen=yes -else - ac_cv_lib_svld_dlopen=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_svld_dlopen" >&5 -$as_echo "$ac_cv_lib_svld_dlopen" >&6; } -if test "x$ac_cv_lib_svld_dlopen" = xyes; then : - lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-lsvld -else - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dld_link in -ldld" >&5 -$as_echo_n "checking for dld_link in -ldld... " >&6; } -if ${ac_cv_lib_dld_dld_link+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_check_lib_save_LIBS=$LIBS -LIBS="-ldld $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char dld_link (); -int -main () -{ -return dld_link (); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_dld_dld_link=yes -else - ac_cv_lib_dld_dld_link=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_dld_link" >&5 -$as_echo "$ac_cv_lib_dld_dld_link" >&6; } -if test "x$ac_cv_lib_dld_dld_link" = xyes; then : - lt_cv_dlopen=dld_link lt_cv_dlopen_libs=-ldld -fi - - -fi - - -fi - - -fi - - -fi - - -fi - - ;; - esac - - if test no = "$lt_cv_dlopen"; then - enable_dlopen=no - else - enable_dlopen=yes - fi - - case $lt_cv_dlopen in - dlopen) - save_CPPFLAGS=$CPPFLAGS - test yes = "$ac_cv_header_dlfcn_h" && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H" - - save_LDFLAGS=$LDFLAGS - wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\" - - save_LIBS=$LIBS - LIBS="$lt_cv_dlopen_libs $LIBS" - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a program can dlopen itself" >&5 -$as_echo_n "checking whether a program can dlopen itself... " >&6; } -if ${lt_cv_dlopen_self+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test yes = "$cross_compiling"; then : - lt_cv_dlopen_self=cross -else - lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 - lt_status=$lt_dlunknown - cat > conftest.$ac_ext <<_LT_EOF -#line $LINENO "configure" -#include "confdefs.h" - -#if HAVE_DLFCN_H -#include -#endif - -#include - -#ifdef RTLD_GLOBAL -# define LT_DLGLOBAL RTLD_GLOBAL -#else -# ifdef DL_GLOBAL -# define LT_DLGLOBAL DL_GLOBAL -# else -# define LT_DLGLOBAL 0 -# endif -#endif - -/* We may have to define LT_DLLAZY_OR_NOW in the command line if we - find out it does not work in some platform. */ -#ifndef LT_DLLAZY_OR_NOW -# ifdef RTLD_LAZY -# define LT_DLLAZY_OR_NOW RTLD_LAZY -# else -# ifdef DL_LAZY -# define LT_DLLAZY_OR_NOW DL_LAZY -# else -# ifdef RTLD_NOW -# define LT_DLLAZY_OR_NOW RTLD_NOW -# else -# ifdef DL_NOW -# define LT_DLLAZY_OR_NOW DL_NOW -# else -# define LT_DLLAZY_OR_NOW 0 -# endif -# endif -# endif -# endif -#endif - -/* When -fvisibility=hidden is used, assume the code has been annotated - correspondingly for the symbols needed. */ -#if defined __GNUC__ && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) -int fnord () __attribute__((visibility("default"))); -#endif - -int fnord () { return 42; } -int main () -{ - void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); - int status = $lt_dlunknown; - - if (self) - { - if (dlsym (self,"fnord")) status = $lt_dlno_uscore; - else - { - if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; - else puts (dlerror ()); - } - /* dlclose (self); */ - } - else - puts (dlerror ()); - - return status; -} -_LT_EOF - if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 - (eval $ac_link) 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } && test -s "conftest$ac_exeext" 2>/dev/null; then - (./conftest; exit; ) >&5 2>/dev/null - lt_status=$? - case x$lt_status in - x$lt_dlno_uscore) lt_cv_dlopen_self=yes ;; - x$lt_dlneed_uscore) lt_cv_dlopen_self=yes ;; - x$lt_dlunknown|x*) lt_cv_dlopen_self=no ;; - esac - else : - # compilation failed - lt_cv_dlopen_self=no - fi -fi -rm -fr conftest* - - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_dlopen_self" >&5 -$as_echo "$lt_cv_dlopen_self" >&6; } - - if test yes = "$lt_cv_dlopen_self"; then - wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\" - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a statically linked program can dlopen itself" >&5 -$as_echo_n "checking whether a statically linked program can dlopen itself... " >&6; } -if ${lt_cv_dlopen_self_static+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test yes = "$cross_compiling"; then : - lt_cv_dlopen_self_static=cross -else - lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 - lt_status=$lt_dlunknown - cat > conftest.$ac_ext <<_LT_EOF -#line $LINENO "configure" -#include "confdefs.h" - -#if HAVE_DLFCN_H -#include -#endif - -#include - -#ifdef RTLD_GLOBAL -# define LT_DLGLOBAL RTLD_GLOBAL -#else -# ifdef DL_GLOBAL -# define LT_DLGLOBAL DL_GLOBAL -# else -# define LT_DLGLOBAL 0 -# endif -#endif - -/* We may have to define LT_DLLAZY_OR_NOW in the command line if we - find out it does not work in some platform. */ -#ifndef LT_DLLAZY_OR_NOW -# ifdef RTLD_LAZY -# define LT_DLLAZY_OR_NOW RTLD_LAZY -# else -# ifdef DL_LAZY -# define LT_DLLAZY_OR_NOW DL_LAZY -# else -# ifdef RTLD_NOW -# define LT_DLLAZY_OR_NOW RTLD_NOW -# else -# ifdef DL_NOW -# define LT_DLLAZY_OR_NOW DL_NOW -# else -# define LT_DLLAZY_OR_NOW 0 -# endif -# endif -# endif -# endif -#endif - -/* When -fvisibility=hidden is used, assume the code has been annotated - correspondingly for the symbols needed. */ -#if defined __GNUC__ && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) -int fnord () __attribute__((visibility("default"))); -#endif - -int fnord () { return 42; } -int main () -{ - void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); - int status = $lt_dlunknown; - - if (self) - { - if (dlsym (self,"fnord")) status = $lt_dlno_uscore; - else - { - if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; - else puts (dlerror ()); - } - /* dlclose (self); */ - } - else - puts (dlerror ()); - - return status; -} -_LT_EOF - if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 - (eval $ac_link) 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } && test -s "conftest$ac_exeext" 2>/dev/null; then - (./conftest; exit; ) >&5 2>/dev/null - lt_status=$? - case x$lt_status in - x$lt_dlno_uscore) lt_cv_dlopen_self_static=yes ;; - x$lt_dlneed_uscore) lt_cv_dlopen_self_static=yes ;; - x$lt_dlunknown|x*) lt_cv_dlopen_self_static=no ;; - esac - else : - # compilation failed - lt_cv_dlopen_self_static=no - fi -fi -rm -fr conftest* - - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_dlopen_self_static" >&5 -$as_echo "$lt_cv_dlopen_self_static" >&6; } - fi - - CPPFLAGS=$save_CPPFLAGS - LDFLAGS=$save_LDFLAGS - LIBS=$save_LIBS - ;; - esac - - case $lt_cv_dlopen_self in - yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;; - *) enable_dlopen_self=unknown ;; - esac - - case $lt_cv_dlopen_self_static in - yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;; - *) enable_dlopen_self_static=unknown ;; - esac -fi - - - - - - - - - - - - - - - - - -striplib= -old_striplib= -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether stripping libraries is possible" >&5 -$as_echo_n "checking whether stripping libraries is possible... " >&6; } -if test -n "$STRIP" && $STRIP -V 2>&1 | $GREP "GNU strip" >/dev/null; then - test -z "$old_striplib" && old_striplib="$STRIP --strip-debug" - test -z "$striplib" && striplib="$STRIP --strip-unneeded" - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } -else -# FIXME - insert some real tests, host_os isn't really good enough - case $host_os in - darwin*) - if test -n "$STRIP"; then - striplib="$STRIP -x" - old_striplib="$STRIP -S" - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } - else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } - fi - ;; - *) - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } - ;; - esac -fi - - - - - - - - - - - - - # Report what library types will actually be built - { $as_echo "$as_me:${as_lineno-$LINENO}: checking if libtool supports shared libraries" >&5 -$as_echo_n "checking if libtool supports shared libraries... " >&6; } - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $can_build_shared" >&5 -$as_echo "$can_build_shared" >&6; } - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build shared libraries" >&5 -$as_echo_n "checking whether to build shared libraries... " >&6; } - test no = "$can_build_shared" && enable_shared=no - - # On AIX, shared libraries and static libraries use the same namespace, and - # are all built from PIC. - case $host_os in - aix3*) - test yes = "$enable_shared" && enable_static=no - if test -n "$RANLIB"; then - archive_cmds="$archive_cmds~\$RANLIB \$lib" - postinstall_cmds='$RANLIB $lib' - fi - ;; - - aix[4-9]*) - if test ia64 != "$host_cpu"; then - case $enable_shared,$with_aix_soname,$aix_use_runtimelinking in - yes,aix,yes) ;; # shared object as lib.so file only - yes,svr4,*) ;; # shared object as lib.so archive member only - yes,*) enable_static=no ;; # shared object in lib.a archive as well - esac - fi - ;; - esac - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_shared" >&5 -$as_echo "$enable_shared" >&6; } - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build static libraries" >&5 -$as_echo_n "checking whether to build static libraries... " >&6; } - # Make sure either enable_shared or enable_static is yes. - test yes = "$enable_shared" || enable_static=yes - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_static" >&5 -$as_echo "$enable_static" >&6; } - - - - -fi -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - -CC=$lt_save_CC - - - - - - - - - - - - - - - - ac_config_commands="$ac_config_commands libtool" - - - - -# Only expand once: - - -# Find a good install program. We prefer a C program (faster), -# so one script is as good as another. But avoid the broken or -# incompatible versions: -# SysV /etc/install, /usr/sbin/install -# SunOS /usr/etc/install -# IRIX /sbin/install -# AIX /bin/install -# AmigaOS /C/install, which installs bootblocks on floppy discs -# AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag -# AFS /usr/afsws/bin/install, which mishandles nonexistent args -# SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff" -# OS/2's system install, which has a completely different semantic -# ./install, which can be erroneously created by make from ./install.sh. -# Reject install programs that cannot install multiple files. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a BSD-compatible install" >&5 -$as_echo_n "checking for a BSD-compatible install... " >&6; } -if test -z "$INSTALL"; then -if ${ac_cv_path_install+:} false; then : - $as_echo_n "(cached) " >&6 -else - as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - # Account for people who put trailing slashes in PATH elements. -case $as_dir/ in #(( - ./ | .// | /[cC]/* | \ - /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \ - ?:[\\/]os2[\\/]install[\\/]* | ?:[\\/]OS2[\\/]INSTALL[\\/]* | \ - /usr/ucb/* ) ;; - *) - # OSF1 and SCO ODT 3.0 have their own names for install. - # Don't use installbsd from OSF since it installs stuff as root - # by default. - for ac_prog in ginstall scoinst install; do - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_prog$ac_exec_ext"; then - if test $ac_prog = install && - grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then - # AIX install. It has an incompatible calling convention. - : - elif test $ac_prog = install && - grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then - # program-specific install script used by HP pwplus--don't use. - : - else - rm -rf conftest.one conftest.two conftest.dir - echo one > conftest.one - echo two > conftest.two - mkdir conftest.dir - if "$as_dir/$ac_prog$ac_exec_ext" -c conftest.one conftest.two "`pwd`/conftest.dir" && - test -s conftest.one && test -s conftest.two && - test -s conftest.dir/conftest.one && - test -s conftest.dir/conftest.two - then - ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c" - break 3 - fi - fi - fi - done - done - ;; -esac - - done -IFS=$as_save_IFS - -rm -rf conftest.one conftest.two conftest.dir - -fi - if test "${ac_cv_path_install+set}" = set; then - INSTALL=$ac_cv_path_install - else - # As a last resort, use the slow shell script. Don't cache a - # value for INSTALL within a source directory, because that will - # break other packages using the cache if that directory is - # removed, or if the value is a relative name. - INSTALL=$ac_install_sh - fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $INSTALL" >&5 -$as_echo "$INSTALL" >&6; } - -# Use test -z because SunOS4 sh mishandles braces in ${var-val}. -# It thinks the first close brace ends the variable substitution. -test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}' - -test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}' - -test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644' - - -######### -# Enable large file support (if special flags are necessary) -# -# Check whether --enable-largefile was given. -if test "${enable_largefile+set}" = set; then : - enableval=$enable_largefile; -fi - -if test "$enable_largefile" != no; then - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for special C compiler options needed for large files" >&5 -$as_echo_n "checking for special C compiler options needed for large files... " >&6; } -if ${ac_cv_sys_largefile_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_cv_sys_largefile_CC=no - if test "$GCC" != yes; then - ac_save_CC=$CC - while :; do - # IRIX 6.2 and later do not support large files by default, - # so use the C compiler's -n32 option if that helps. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include - /* Check that off_t can represent 2**63 - 1 correctly. - We can't simply define LARGE_OFF_T to be 9223372036854775807, - since some C++ compilers masquerading as C compilers - incorrectly reject 9223372036854775807. */ -#define LARGE_OFF_T ((((off_t) 1 << 31) << 31) - 1 + (((off_t) 1 << 31) << 31)) - int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 - && LARGE_OFF_T % 2147483647 == 1) - ? 1 : -1]; -int -main () -{ - - ; - return 0; -} -_ACEOF - if ac_fn_c_try_compile "$LINENO"; then : - break -fi -rm -f core conftest.err conftest.$ac_objext - CC="$CC -n32" - if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_sys_largefile_CC=' -n32'; break -fi -rm -f core conftest.err conftest.$ac_objext - break - done - CC=$ac_save_CC - rm -f conftest.$ac_ext - fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_largefile_CC" >&5 -$as_echo "$ac_cv_sys_largefile_CC" >&6; } - if test "$ac_cv_sys_largefile_CC" != no; then - CC=$CC$ac_cv_sys_largefile_CC - fi - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for _FILE_OFFSET_BITS value needed for large files" >&5 -$as_echo_n "checking for _FILE_OFFSET_BITS value needed for large files... " >&6; } -if ${ac_cv_sys_file_offset_bits+:} false; then : - $as_echo_n "(cached) " >&6 -else - while :; do - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include - /* Check that off_t can represent 2**63 - 1 correctly. - We can't simply define LARGE_OFF_T to be 9223372036854775807, - since some C++ compilers masquerading as C compilers - incorrectly reject 9223372036854775807. */ -#define LARGE_OFF_T ((((off_t) 1 << 31) << 31) - 1 + (((off_t) 1 << 31) << 31)) - int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 - && LARGE_OFF_T % 2147483647 == 1) - ? 1 : -1]; -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_sys_file_offset_bits=no; break -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#define _FILE_OFFSET_BITS 64 -#include - /* Check that off_t can represent 2**63 - 1 correctly. - We can't simply define LARGE_OFF_T to be 9223372036854775807, - since some C++ compilers masquerading as C compilers - incorrectly reject 9223372036854775807. */ -#define LARGE_OFF_T ((((off_t) 1 << 31) << 31) - 1 + (((off_t) 1 << 31) << 31)) - int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 - && LARGE_OFF_T % 2147483647 == 1) - ? 1 : -1]; -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_sys_file_offset_bits=64; break -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - ac_cv_sys_file_offset_bits=unknown - break -done -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_file_offset_bits" >&5 -$as_echo "$ac_cv_sys_file_offset_bits" >&6; } -case $ac_cv_sys_file_offset_bits in #( - no | unknown) ;; - *) -cat >>confdefs.h <<_ACEOF -#define _FILE_OFFSET_BITS $ac_cv_sys_file_offset_bits -_ACEOF -;; -esac -rm -rf conftest* - if test $ac_cv_sys_file_offset_bits = unknown; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for _LARGE_FILES value needed for large files" >&5 -$as_echo_n "checking for _LARGE_FILES value needed for large files... " >&6; } -if ${ac_cv_sys_large_files+:} false; then : - $as_echo_n "(cached) " >&6 -else - while :; do - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include - /* Check that off_t can represent 2**63 - 1 correctly. - We can't simply define LARGE_OFF_T to be 9223372036854775807, - since some C++ compilers masquerading as C compilers - incorrectly reject 9223372036854775807. */ -#define LARGE_OFF_T ((((off_t) 1 << 31) << 31) - 1 + (((off_t) 1 << 31) << 31)) - int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 - && LARGE_OFF_T % 2147483647 == 1) - ? 1 : -1]; -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_sys_large_files=no; break -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#define _LARGE_FILES 1 -#include - /* Check that off_t can represent 2**63 - 1 correctly. - We can't simply define LARGE_OFF_T to be 9223372036854775807, - since some C++ compilers masquerading as C compilers - incorrectly reject 9223372036854775807. */ -#define LARGE_OFF_T ((((off_t) 1 << 31) << 31) - 1 + (((off_t) 1 << 31) << 31)) - int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 - && LARGE_OFF_T % 2147483647 == 1) - ? 1 : -1]; -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_sys_large_files=1; break -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - ac_cv_sys_large_files=unknown - break -done -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_large_files" >&5 -$as_echo "$ac_cv_sys_large_files" >&6; } -case $ac_cv_sys_large_files in #( - no | unknown) ;; - *) -cat >>confdefs.h <<_ACEOF -#define _LARGE_FILES $ac_cv_sys_large_files -_ACEOF -;; -esac -rm -rf conftest* - fi - - -fi - - -######### -# Check for needed/wanted data types -ac_fn_c_check_type "$LINENO" "int8_t" "ac_cv_type_int8_t" "$ac_includes_default" -if test "x$ac_cv_type_int8_t" = xyes; then : - -cat >>confdefs.h <<_ACEOF -#define HAVE_INT8_T 1 -_ACEOF - - -fi -ac_fn_c_check_type "$LINENO" "int16_t" "ac_cv_type_int16_t" "$ac_includes_default" -if test "x$ac_cv_type_int16_t" = xyes; then : - -cat >>confdefs.h <<_ACEOF -#define HAVE_INT16_T 1 -_ACEOF - - -fi -ac_fn_c_check_type "$LINENO" "int32_t" "ac_cv_type_int32_t" "$ac_includes_default" -if test "x$ac_cv_type_int32_t" = xyes; then : - -cat >>confdefs.h <<_ACEOF -#define HAVE_INT32_T 1 -_ACEOF - - -fi -ac_fn_c_check_type "$LINENO" "int64_t" "ac_cv_type_int64_t" "$ac_includes_default" -if test "x$ac_cv_type_int64_t" = xyes; then : - -cat >>confdefs.h <<_ACEOF -#define HAVE_INT64_T 1 -_ACEOF - - -fi -ac_fn_c_check_type "$LINENO" "intptr_t" "ac_cv_type_intptr_t" "$ac_includes_default" -if test "x$ac_cv_type_intptr_t" = xyes; then : - -cat >>confdefs.h <<_ACEOF -#define HAVE_INTPTR_T 1 -_ACEOF - - -fi -ac_fn_c_check_type "$LINENO" "uint8_t" "ac_cv_type_uint8_t" "$ac_includes_default" -if test "x$ac_cv_type_uint8_t" = xyes; then : - -cat >>confdefs.h <<_ACEOF -#define HAVE_UINT8_T 1 -_ACEOF - - -fi -ac_fn_c_check_type "$LINENO" "uint16_t" "ac_cv_type_uint16_t" "$ac_includes_default" -if test "x$ac_cv_type_uint16_t" = xyes; then : - -cat >>confdefs.h <<_ACEOF -#define HAVE_UINT16_T 1 -_ACEOF - - -fi -ac_fn_c_check_type "$LINENO" "uint32_t" "ac_cv_type_uint32_t" "$ac_includes_default" -if test "x$ac_cv_type_uint32_t" = xyes; then : - -cat >>confdefs.h <<_ACEOF -#define HAVE_UINT32_T 1 -_ACEOF - - -fi -ac_fn_c_check_type "$LINENO" "uint64_t" "ac_cv_type_uint64_t" "$ac_includes_default" -if test "x$ac_cv_type_uint64_t" = xyes; then : - -cat >>confdefs.h <<_ACEOF -#define HAVE_UINT64_T 1 -_ACEOF - - -fi -ac_fn_c_check_type "$LINENO" "uintptr_t" "ac_cv_type_uintptr_t" "$ac_includes_default" -if test "x$ac_cv_type_uintptr_t" = xyes; then : - -cat >>confdefs.h <<_ACEOF -#define HAVE_UINTPTR_T 1 -_ACEOF - - -fi - - -######### -# Check for needed/wanted headers -for ac_header in sys/types.h stdlib.h stdint.h inttypes.h malloc.h -do : - as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` -ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default" -if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : - cat >>confdefs.h <<_ACEOF -#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 -_ACEOF - -fi - -done - - -######### -# Figure out whether or not we have these functions -# -for ac_func in fdatasync gmtime_r isnan localtime_r localtime_s malloc_usable_size strchrnul usleep utime pread pread64 pwrite pwrite64 -do : - as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` -ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" -if eval test \"x\$"$as_ac_var"\" = x"yes"; then : - cat >>confdefs.h <<_ACEOF -#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 -_ACEOF - -fi -done - - -######### -# By default, we use the amalgamation (this may be changed below...) -# -USE_AMALGAMATION=1 - -######### -# See whether we can run specific tclsh versions known to work well; -# if not, then we fall back to plain tclsh. -# TODO: try other versions before falling back? -# -for ac_prog in tclsh8.7 tclsh8.6 tclsh8.5 tclsh -do - # Extract the first word of "$ac_prog", so it can be a program name with args. -set dummy $ac_prog; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_TCLSH_CMD+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$TCLSH_CMD"; then - ac_cv_prog_TCLSH_CMD="$TCLSH_CMD" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_TCLSH_CMD="$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -TCLSH_CMD=$ac_cv_prog_TCLSH_CMD -if test -n "$TCLSH_CMD"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $TCLSH_CMD" >&5 -$as_echo "$TCLSH_CMD" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - test -n "$TCLSH_CMD" && break -done -test -n "$TCLSH_CMD" || TCLSH_CMD="none" - -if test "$TCLSH_CMD" = "none"; then - # If we can't find a local tclsh, then building the amalgamation will fail. - # We act as though --disable-amalgamation has been used. - echo "Warning: can't find tclsh - defaulting to non-amalgamation build." - USE_AMALGAMATION=0 - TCLSH_CMD="tclsh" -fi - - - -if test "x${TCLLIBDIR+set}" != "xset" ; then - TCLLIBDIR='$(libdir)' - for i in `echo 'puts stdout $auto_path' | ${TCLSH_CMD}` ; do - if test -d $i ; then - TCLLIBDIR=$i - break - fi - done - TCLLIBDIR="${TCLLIBDIR}/sqlite3" -fi - -######### -# Set up an appropriate program prefix -# -if test "$program_prefix" = "NONE"; then - program_prefix="" -fi - - -VERSION=`cat $srcdir/VERSION | sed 's/^\([0-9]*\.*[0-9]*\).*/\1/'` -{ $as_echo "$as_me:${as_lineno-$LINENO}: Version set to $VERSION" >&5 -$as_echo "$as_me: Version set to $VERSION" >&6;} - -RELEASE=`cat $srcdir/VERSION` -{ $as_echo "$as_me:${as_lineno-$LINENO}: Release set to $RELEASE" >&5 -$as_echo "$as_me: Release set to $RELEASE" >&6;} - - -######### -# Locate a compiler for the build machine. This compiler should -# generate command-line programs that run on the build machine. -# -if test x"$cross_compiling" = xno; then - BUILD_CC=$CC - BUILD_CFLAGS=$CFLAGS -else - if test "${BUILD_CC+set}" != set; then - for ac_prog in gcc cc cl -do - # Extract the first word of "$ac_prog", so it can be a program name with args. -set dummy $ac_prog; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_BUILD_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$BUILD_CC"; then - ac_cv_prog_BUILD_CC="$BUILD_CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_BUILD_CC="$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - -fi -fi -BUILD_CC=$ac_cv_prog_BUILD_CC -if test -n "$BUILD_CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $BUILD_CC" >&5 -$as_echo "$BUILD_CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - test -n "$BUILD_CC" && break -done - - fi - if test "${BUILD_CFLAGS+set}" != set; then - BUILD_CFLAGS="-g" - fi -fi - - -########## -# Do we want to support multithreaded use of sqlite -# -# Check whether --enable-threadsafe was given. -if test "${enable_threadsafe+set}" = set; then : - enableval=$enable_threadsafe; -fi - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to support threadsafe operation" >&5 -$as_echo_n "checking whether to support threadsafe operation... " >&6; } -if test "$enable_threadsafe" = "no"; then - SQLITE_THREADSAFE=0 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -else - SQLITE_THREADSAFE=1 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } -fi - - -if test "$SQLITE_THREADSAFE" = "1"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing pthread_create" >&5 -$as_echo_n "checking for library containing pthread_create... " >&6; } -if ${ac_cv_search_pthread_create+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_func_search_save_LIBS=$LIBS -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char pthread_create (); -int -main () -{ -return pthread_create (); - ; - return 0; -} -_ACEOF -for ac_lib in '' pthread; do - if test -z "$ac_lib"; then - ac_res="none required" - else - ac_res=-l$ac_lib - LIBS="-l$ac_lib $ac_func_search_save_LIBS" - fi - if ac_fn_c_try_link "$LINENO"; then : - ac_cv_search_pthread_create=$ac_res -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext - if ${ac_cv_search_pthread_create+:} false; then : - break -fi -done -if ${ac_cv_search_pthread_create+:} false; then : - -else - ac_cv_search_pthread_create=no -fi -rm conftest.$ac_ext -LIBS=$ac_func_search_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_pthread_create" >&5 -$as_echo "$ac_cv_search_pthread_create" >&6; } -ac_res=$ac_cv_search_pthread_create -if test "$ac_res" != no; then : - test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" - -fi - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing pthread_mutexattr_init" >&5 -$as_echo_n "checking for library containing pthread_mutexattr_init... " >&6; } -if ${ac_cv_search_pthread_mutexattr_init+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_func_search_save_LIBS=$LIBS -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char pthread_mutexattr_init (); -int -main () -{ -return pthread_mutexattr_init (); - ; - return 0; -} -_ACEOF -for ac_lib in '' pthread; do - if test -z "$ac_lib"; then - ac_res="none required" - else - ac_res=-l$ac_lib - LIBS="-l$ac_lib $ac_func_search_save_LIBS" - fi - if ac_fn_c_try_link "$LINENO"; then : - ac_cv_search_pthread_mutexattr_init=$ac_res -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext - if ${ac_cv_search_pthread_mutexattr_init+:} false; then : - break -fi -done -if ${ac_cv_search_pthread_mutexattr_init+:} false; then : - -else - ac_cv_search_pthread_mutexattr_init=no -fi -rm conftest.$ac_ext -LIBS=$ac_func_search_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_pthread_mutexattr_init" >&5 -$as_echo "$ac_cv_search_pthread_mutexattr_init" >&6; } -ac_res=$ac_cv_search_pthread_mutexattr_init -if test "$ac_res" != no; then : - test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" - -fi - -fi - -########## -# Which crypto library do we use -# - -# Check whether --with-crypto-lib was given. -if test "${with_crypto_lib+set}" = set; then : - withval=$with_crypto_lib; crypto_lib=$withval -fi - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for crypto library to use" >&5 -$as_echo_n "checking for crypto library to use... " >&6; } -if test "$crypto_lib" = "none"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: none" >&5 -$as_echo "none" >&6; } -else - if test "$crypto_lib" = "commoncrypto"; then - CFLAGS="$CFLAGS -DSQLCIPHER_CRYPTO_CC" - BUILD_CFLAGS="$BUILD_CFLAGS -DSQLCIPHER_CRYPTO_CC" - { $as_echo "$as_me:${as_lineno-$LINENO}: result: commoncrypto" >&5 -$as_echo "commoncrypto" >&6; } - else - if test "$crypto_lib" = "libtomcrypt"; then - CFLAGS="$CFLAGS -DSQLCIPHER_CRYPTO_LIBTOMCRYPT" - BUILD_CFLAGS="$BUILD_CFLAGS -DSQLCIPHER_CRYPTO_LIBTOMCRYPT" - { $as_echo "$as_me:${as_lineno-$LINENO}: result: libtomcrypt" >&5 -$as_echo "libtomcrypt" >&6; } - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for register_cipher in -ltomcrypt" >&5 -$as_echo_n "checking for register_cipher in -ltomcrypt... " >&6; } -if ${ac_cv_lib_tomcrypt_register_cipher+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_check_lib_save_LIBS=$LIBS -LIBS="-ltomcrypt $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char register_cipher (); -int -main () -{ -return register_cipher (); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_tomcrypt_register_cipher=yes -else - ac_cv_lib_tomcrypt_register_cipher=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_tomcrypt_register_cipher" >&5 -$as_echo "$ac_cv_lib_tomcrypt_register_cipher" >&6; } -if test "x$ac_cv_lib_tomcrypt_register_cipher" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_LIBTOMCRYPT 1 -_ACEOF - - LIBS="-ltomcrypt $LIBS" - -else - as_fn_error $? "Library crypto not found. Install libtomcrypt!\"" "$LINENO" 5 -fi - - else - if test "$crypto_lib" = "nss"; then - CFLAGS="$CFLAGS -DSQLCIPHER_CRYPTO_NSS" - BUILD_CFLAGS="$BUILD_CFLAGS -DSQLCIPHER_CRYPTO_NSS" - { $as_echo "$as_me:${as_lineno-$LINENO}: result: nss3" >&5 -$as_echo "nss3" >&6; } - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for PK11_Decrypt in -lnss3" >&5 -$as_echo_n "checking for PK11_Decrypt in -lnss3... " >&6; } -if ${ac_cv_lib_nss3_PK11_Decrypt+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_check_lib_save_LIBS=$LIBS -LIBS="-lnss3 $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char PK11_Decrypt (); -int -main () -{ -return PK11_Decrypt (); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_nss3_PK11_Decrypt=yes -else - ac_cv_lib_nss3_PK11_Decrypt=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_nss3_PK11_Decrypt" >&5 -$as_echo "$ac_cv_lib_nss3_PK11_Decrypt" >&6; } -if test "x$ac_cv_lib_nss3_PK11_Decrypt" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_LIBNSS3 1 -_ACEOF - - LIBS="-lnss3 $LIBS" - -else - as_fn_error $? "Library crypto not found. Install nss!\"" "$LINENO" 5 -fi - - else - CFLAGS="$CFLAGS -DSQLCIPHER_CRYPTO_OPENSSL" - BUILD_CFLAGS="$BUILD_CFLAGS -DSQLCIPHER_CRYPTO_OPENSSL" - { $as_echo "$as_me:${as_lineno-$LINENO}: result: openssl" >&5 -$as_echo "openssl" >&6; } - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for HMAC_Init_ex in -lcrypto" >&5 -$as_echo_n "checking for HMAC_Init_ex in -lcrypto... " >&6; } -if ${ac_cv_lib_crypto_HMAC_Init_ex+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_check_lib_save_LIBS=$LIBS -LIBS="-lcrypto $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char HMAC_Init_ex (); -int -main () -{ -return HMAC_Init_ex (); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_crypto_HMAC_Init_ex=yes -else - ac_cv_lib_crypto_HMAC_Init_ex=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_crypto_HMAC_Init_ex" >&5 -$as_echo "$ac_cv_lib_crypto_HMAC_Init_ex" >&6; } -if test "x$ac_cv_lib_crypto_HMAC_Init_ex" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_LIBCRYPTO 1 -_ACEOF - - LIBS="-lcrypto $LIBS" - -else - as_fn_error $? "Library crypto not found. Install openssl!\"" "$LINENO" 5 -fi - - fi - fi - fi -fi - -########## -# Do we want to allow a connection created in one thread to be used -# in another thread. This does not work on many Linux systems (ex: RedHat 9) -# due to bugs in the threading implementations. This is thus off by default. -# -# Check whether --enable-cross-thread-connections was given. -if test "${enable_cross_thread_connections+set}" = set; then : - enableval=$enable_cross_thread_connections; -else - enable_xthreadconnect=no -fi - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to allow connections to be shared across threads" >&5 -$as_echo_n "checking whether to allow connections to be shared across threads... " >&6; } -if test "$enable_xthreadconnect" = "no"; then - XTHREADCONNECT='' - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -else - XTHREADCONNECT='-DSQLITE_ALLOW_XTHREAD_CONNECT=1' - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } -fi - - -########## -# Do we want to support release -# -# Check whether --enable-releasemode was given. -if test "${enable_releasemode+set}" = set; then : - enableval=$enable_releasemode; -else - enable_releasemode=no -fi - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to support shared library linked as release mode or not" >&5 -$as_echo_n "checking whether to support shared library linked as release mode or not... " >&6; } -if test "$enable_releasemode" = "no"; then - ALLOWRELEASE="" - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -else - ALLOWRELEASE="-release `cat $srcdir/VERSION`" - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } -fi - - -########## -# Do we want temporary databases in memory -# -# Check whether --enable-tempstore was given. -if test "${enable_tempstore+set}" = set; then : - enableval=$enable_tempstore; -else - enable_tempstore=no -fi - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to use an in-ram database for temporary tables" >&5 -$as_echo_n "checking whether to use an in-ram database for temporary tables... " >&6; } -case "$enable_tempstore" in - never ) - TEMP_STORE=0 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: never" >&5 -$as_echo "never" >&6; } - ;; - no ) - TEMP_STORE=1 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } - ;; - yes ) - TEMP_STORE=2 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } - ;; - always ) - TEMP_STORE=3 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: always" >&5 -$as_echo "always" >&6; } - ;; - * ) - TEMP_STORE=1 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } - ;; -esac - - - -########### -# Lots of things are different if we are compiling for Windows using -# the CYGWIN environment. So check for that special case and handle -# things accordingly. -# -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if executables have the .exe suffix" >&5 -$as_echo_n "checking if executables have the .exe suffix... " >&6; } -if test "$config_BUILD_EXEEXT" = ".exe"; then - CYGWIN=yes - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: unknown" >&5 -$as_echo "unknown" >&6; } -fi -if test "$CYGWIN" != "yes"; then - -case $host_os in - *cygwin* ) CYGWIN=yes;; - * ) CYGWIN=no;; -esac - -fi -if test "$CYGWIN" = "yes"; then - BUILD_EXEEXT=.exe -else - BUILD_EXEEXT=$EXEEXT -fi -if test x"$cross_compiling" = xno; then - TARGET_EXEEXT=$BUILD_EXEEXT -else - TARGET_EXEEXT=$config_TARGET_EXEEXT -fi -if test "$TARGET_EXEEXT" = ".exe"; then - SQLITE_OS_UNIX=0 - SQLITE_OS_WIN=1 - CFLAGS="$CFLAGS -DSQLITE_OS_WIN=1" -else - SQLITE_OS_UNIX=1 - SQLITE_OS_WIN=0 - CFLAGS="$CFLAGS -DSQLITE_OS_UNIX=1" -fi - - - - - - -########## -# Figure out all the parameters needed to compile against Tcl. -# -# This code is derived from the SC_PATH_TCLCONFIG and SC_LOAD_TCLCONFIG -# macros in the in the tcl.m4 file of the standard TCL distribution. -# Those macros could not be used directly since we have to make some -# minor changes to accomodate systems that do not have TCL installed. -# -# Check whether --enable-tcl was given. -if test "${enable_tcl+set}" = set; then : - enableval=$enable_tcl; use_tcl=$enableval -else - use_tcl=yes -fi - -if test "${use_tcl}" = "yes" ; then - -# Check whether --with-tcl was given. -if test "${with_tcl+set}" = set; then : - withval=$with_tcl; with_tclconfig=${withval} -fi - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for Tcl configuration" >&5 -$as_echo_n "checking for Tcl configuration... " >&6; } - if ${ac_cv_c_tclconfig+:} false; then : - $as_echo_n "(cached) " >&6 -else - - # First check to see if --with-tcl was specified. - if test x"${with_tclconfig}" != x ; then - if test -f "${with_tclconfig}/tclConfig.sh" ; then - ac_cv_c_tclconfig=`(cd ${with_tclconfig}; pwd)` - else - as_fn_error $? "${with_tclconfig} directory doesn't contain tclConfig.sh" "$LINENO" 5 - fi - fi - - # Start autosearch by asking tclsh - if test x"${ac_cv_c_tclconfig}" = x ; then - if test x"$cross_compiling" = xno; then - for i in `echo 'puts stdout $auto_path' | ${TCLSH_CMD}` - do - if test -f "$i/tclConfig.sh" ; then - ac_cv_c_tclconfig="$i" - break - fi - done - fi - fi - - # On ubuntu 14.10, $auto_path on tclsh is not quite correct. - # So try again after applying corrections. - if test x"${ac_cv_c_tclconfig}" = x ; then - if test x"$cross_compiling" = xno; then - for i in `echo 'puts stdout $auto_path' | ${TCLSH_CMD} | sed 's,/tcltk/tcl,/tcl,g'` - do - if test -f "$i/tclConfig.sh" ; then - ac_cv_c_tclconfig="$i" - break - fi - done - fi - fi - - # Recent versions of Xcode on Macs hid the tclConfig.sh file - # in a strange place. - if test x"${ac_cv_c_tclconfig}" = x ; then - if test x"$cross_compiling" = xno; then - for i in /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX*.sdk/usr/lib - do - if test -f "$i/tclConfig.sh" ; then - ac_cv_c_tclconfig="$i" - break - fi - done - fi - fi - - # then check for a private Tcl installation - if test x"${ac_cv_c_tclconfig}" = x ; then - for i in \ - ../tcl \ - `ls -dr ../tcl[8-9].[0-9].[0-9]* 2>/dev/null` \ - `ls -dr ../tcl[8-9].[0-9] 2>/dev/null` \ - `ls -dr ../tcl[8-9].[0-9]* 2>/dev/null` \ - ../../tcl \ - `ls -dr ../../tcl[8-9].[0-9].[0-9]* 2>/dev/null` \ - `ls -dr ../../tcl[8-9].[0-9] 2>/dev/null` \ - `ls -dr ../../tcl[8-9].[0-9]* 2>/dev/null` \ - ../../../tcl \ - `ls -dr ../../../tcl[8-9].[0-9].[0-9]* 2>/dev/null` \ - `ls -dr ../../../tcl[8-9].[0-9] 2>/dev/null` \ - `ls -dr ../../../tcl[8-9].[0-9]* 2>/dev/null` - do - if test -f "$i/unix/tclConfig.sh" ; then - ac_cv_c_tclconfig=`(cd $i/unix; pwd)` - break - fi - done - fi - - # check in a few common install locations - if test x"${ac_cv_c_tclconfig}" = x ; then - for i in \ - `ls -d ${libdir} 2>/dev/null` \ - `ls -d /usr/local/lib 2>/dev/null` \ - `ls -d /usr/contrib/lib 2>/dev/null` \ - `ls -d /usr/lib 2>/dev/null` - do - if test -f "$i/tclConfig.sh" ; then - ac_cv_c_tclconfig=`(cd $i; pwd)` - break - fi - done - fi - - # check in a few other private locations - if test x"${ac_cv_c_tclconfig}" = x ; then - for i in \ - ${srcdir}/../tcl \ - `ls -dr ${srcdir}/../tcl[8-9].[0-9].[0-9]* 2>/dev/null` \ - `ls -dr ${srcdir}/../tcl[8-9].[0-9] 2>/dev/null` \ - `ls -dr ${srcdir}/../tcl[8-9].[0-9]* 2>/dev/null` - do - if test -f "$i/unix/tclConfig.sh" ; then - ac_cv_c_tclconfig=`(cd $i/unix; pwd)` - break - fi - done - fi - -fi - - - if test x"${ac_cv_c_tclconfig}" = x ; then - use_tcl=no - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Can't find Tcl configuration definitions" >&5 -$as_echo "$as_me: WARNING: Can't find Tcl configuration definitions" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: *** Without Tcl the regression tests cannot be executed ***" >&5 -$as_echo "$as_me: WARNING: *** Without Tcl the regression tests cannot be executed ***" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: *** Consider using --with-tcl=... to define location of Tcl ***" >&5 -$as_echo "$as_me: WARNING: *** Consider using --with-tcl=... to define location of Tcl ***" >&2;} - else - TCL_BIN_DIR=${ac_cv_c_tclconfig} - { $as_echo "$as_me:${as_lineno-$LINENO}: result: found $TCL_BIN_DIR/tclConfig.sh" >&5 -$as_echo "found $TCL_BIN_DIR/tclConfig.sh" >&6; } - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for existence of $TCL_BIN_DIR/tclConfig.sh" >&5 -$as_echo_n "checking for existence of $TCL_BIN_DIR/tclConfig.sh... " >&6; } - if test -f "$TCL_BIN_DIR/tclConfig.sh" ; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: loading" >&5 -$as_echo "loading" >&6; } - . $TCL_BIN_DIR/tclConfig.sh - else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: file not found" >&5 -$as_echo "file not found" >&6; } - fi - - # - # If the TCL_BIN_DIR is the build directory (not the install directory), - # then set the common variable name to the value of the build variables. - # For example, the variable TCL_LIB_SPEC will be set to the value - # of TCL_BUILD_LIB_SPEC. An extension should make use of TCL_LIB_SPEC - # instead of TCL_BUILD_LIB_SPEC since it will work with both an - # installed and uninstalled version of Tcl. - # - - if test -f $TCL_BIN_DIR/Makefile ; then - TCL_LIB_SPEC=${TCL_BUILD_LIB_SPEC} - TCL_STUB_LIB_SPEC=${TCL_BUILD_STUB_LIB_SPEC} - TCL_STUB_LIB_PATH=${TCL_BUILD_STUB_LIB_PATH} - fi - - # - # eval is required to do the TCL_DBGX substitution - # - - eval "TCL_LIB_FILE=\"${TCL_LIB_FILE}\"" - eval "TCL_LIB_FLAG=\"${TCL_LIB_FLAG}\"" - eval "TCL_LIB_SPEC=\"${TCL_LIB_SPEC}\"" - - eval "TCL_STUB_LIB_FILE=\"${TCL_STUB_LIB_FILE}\"" - eval "TCL_STUB_LIB_FLAG=\"${TCL_STUB_LIB_FLAG}\"" - eval "TCL_STUB_LIB_SPEC=\"${TCL_STUB_LIB_SPEC}\"" - - - - - - - - - - - - - - - fi -fi -if test "${use_tcl}" = "no" ; then - HAVE_TCL="" -else - HAVE_TCL=1 -fi - - -########## -# Figure out what C libraries are required to compile programs -# that use "readline()" library. -# -TARGET_READLINE_LIBS="" -TARGET_READLINE_INC="" -TARGET_HAVE_READLINE=0 -TARGET_HAVE_EDITLINE=0 -# Check whether --enable-editline was given. -if test "${enable_editline+set}" = set; then : - enableval=$enable_editline; with_editline=$enableval -else - with_editline=auto -fi - -# Check whether --enable-readline was given. -if test "${enable_readline+set}" = set; then : - enableval=$enable_readline; with_readline=$enableval -else - with_readline=auto -fi - - -if test x"$with_editline" != xno; then - sLIBS=$LIBS - LIBS="" - TARGET_HAVE_EDITLINE=1 - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing readline" >&5 -$as_echo_n "checking for library containing readline... " >&6; } -if ${ac_cv_search_readline+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_func_search_save_LIBS=$LIBS -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char readline (); -int -main () -{ -return readline (); - ; - return 0; -} -_ACEOF -for ac_lib in '' edit; do - if test -z "$ac_lib"; then - ac_res="none required" - else - ac_res=-l$ac_lib - LIBS="-l$ac_lib $ac_func_search_save_LIBS" - fi - if ac_fn_c_try_link "$LINENO"; then : - ac_cv_search_readline=$ac_res -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext - if ${ac_cv_search_readline+:} false; then : - break -fi -done -if ${ac_cv_search_readline+:} false; then : - -else - ac_cv_search_readline=no -fi -rm conftest.$ac_ext -LIBS=$ac_func_search_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_readline" >&5 -$as_echo "$ac_cv_search_readline" >&6; } -ac_res=$ac_cv_search_readline -if test "$ac_res" != no; then : - test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" - with_readline=no -else - TARGET_HAVE_EDITLINE=0 -fi - - TARGET_READLINE_LIBS=$LIBS - LIBS=$sLIBS -fi -if test x"$with_readline" != xno; then - found="yes" - - -# Check whether --with-readline-lib was given. -if test "${with_readline_lib+set}" = set; then : - withval=$with_readline_lib; with_readline_lib=$withval -else - with_readline_lib="auto" -fi - - if test "x$with_readline_lib" = xauto; then - save_LIBS="$LIBS" - LIBS="" - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing tgetent" >&5 -$as_echo_n "checking for library containing tgetent... " >&6; } -if ${ac_cv_search_tgetent+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_func_search_save_LIBS=$LIBS -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char tgetent (); -int -main () -{ -return tgetent (); - ; - return 0; -} -_ACEOF -for ac_lib in '' readline ncurses curses termcap; do - if test -z "$ac_lib"; then - ac_res="none required" - else - ac_res=-l$ac_lib - LIBS="-l$ac_lib $ac_func_search_save_LIBS" - fi - if ac_fn_c_try_link "$LINENO"; then : - ac_cv_search_tgetent=$ac_res -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext - if ${ac_cv_search_tgetent+:} false; then : - break -fi -done -if ${ac_cv_search_tgetent+:} false; then : - -else - ac_cv_search_tgetent=no -fi -rm conftest.$ac_ext -LIBS=$ac_func_search_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_tgetent" >&5 -$as_echo "$ac_cv_search_tgetent" >&6; } -ac_res=$ac_cv_search_tgetent -if test "$ac_res" != no; then : - test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" - term_LIBS="$LIBS" -else - term_LIBS="" -fi - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for readline in -lreadline" >&5 -$as_echo_n "checking for readline in -lreadline... " >&6; } -if ${ac_cv_lib_readline_readline+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_check_lib_save_LIBS=$LIBS -LIBS="-lreadline $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char readline (); -int -main () -{ -return readline (); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_readline_readline=yes -else - ac_cv_lib_readline_readline=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_readline_readline" >&5 -$as_echo "$ac_cv_lib_readline_readline" >&6; } -if test "x$ac_cv_lib_readline_readline" = xyes; then : - TARGET_READLINE_LIBS="-lreadline" -else - found="no" -fi - - TARGET_READLINE_LIBS="$TARGET_READLINE_LIBS $term_LIBS" - LIBS="$save_LIBS" - else - TARGET_READLINE_LIBS="$with_readline_lib" - fi - - -# Check whether --with-readline-inc was given. -if test "${with_readline_inc+set}" = set; then : - withval=$with_readline_inc; with_readline_inc=$withval -else - with_readline_inc="auto" -fi - - if test "x$with_readline_inc" = xauto; then - ac_fn_c_check_header_mongrel "$LINENO" "readline.h" "ac_cv_header_readline_h" "$ac_includes_default" -if test "x$ac_cv_header_readline_h" = xyes; then : - found="yes" -else - - found="no" - if test "$cross_compiling" != yes; then - for dir in /usr /usr/local /usr/local/readline /usr/contrib /mingw; do - for subdir in include include/readline; do - as_ac_File=`$as_echo "ac_cv_file_$dir/$subdir/readline.h" | $as_tr_sh` -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $dir/$subdir/readline.h" >&5 -$as_echo_n "checking for $dir/$subdir/readline.h... " >&6; } -if eval \${$as_ac_File+:} false; then : - $as_echo_n "(cached) " >&6 -else - test "$cross_compiling" = yes && - as_fn_error $? "cannot check for file existence when cross compiling" "$LINENO" 5 -if test -r "$dir/$subdir/readline.h"; then - eval "$as_ac_File=yes" -else - eval "$as_ac_File=no" -fi -fi -eval ac_res=\$$as_ac_File - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } -if eval test \"x\$"$as_ac_File"\" = x"yes"; then : - found=yes -fi - - if test "$found" = "yes"; then - TARGET_READLINE_INC="-I$dir/$subdir" - break - fi - done - test "$found" = "yes" && break - done - fi - -fi - - - else - TARGET_READLINE_INC="$with_readline_inc" - fi - - if test x"$found" = xno; then - TARGET_READLINE_LIBS="" - TARGET_READLINE_INC="" - TARGET_HAVE_READLINE=0 - else - TARGET_HAVE_READLINE=1 - fi -fi - - - - - - -########## -# Figure out what C libraries are required to compile programs -# that use "fdatasync()" function. -# -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing fdatasync" >&5 -$as_echo_n "checking for library containing fdatasync... " >&6; } -if ${ac_cv_search_fdatasync+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_func_search_save_LIBS=$LIBS -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char fdatasync (); -int -main () -{ -return fdatasync (); - ; - return 0; -} -_ACEOF -for ac_lib in '' rt; do - if test -z "$ac_lib"; then - ac_res="none required" - else - ac_res=-l$ac_lib - LIBS="-l$ac_lib $ac_func_search_save_LIBS" - fi - if ac_fn_c_try_link "$LINENO"; then : - ac_cv_search_fdatasync=$ac_res -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext - if ${ac_cv_search_fdatasync+:} false; then : - break -fi -done -if ${ac_cv_search_fdatasync+:} false; then : - -else - ac_cv_search_fdatasync=no -fi -rm conftest.$ac_ext -LIBS=$ac_func_search_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_fdatasync" >&5 -$as_echo "$ac_cv_search_fdatasync" >&6; } -ac_res=$ac_cv_search_fdatasync -if test "$ac_res" != no; then : - test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" - -fi - - -######### -# check for debug enabled -# Check whether --enable-debug was given. -if test "${enable_debug+set}" = set; then : - enableval=$enable_debug; -fi - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking build type" >&5 -$as_echo_n "checking build type... " >&6; } -if test "${enable_debug}" = "yes" ; then - TARGET_DEBUG="-DSQLITE_DEBUG=1 -DSQLITE_ENABLE_SELECTTRACE -DSQLITE_ENABLE_WHERETRACE -O0" - { $as_echo "$as_me:${as_lineno-$LINENO}: result: debug" >&5 -$as_echo "debug" >&6; } -else - TARGET_DEBUG="-DNDEBUG" - { $as_echo "$as_me:${as_lineno-$LINENO}: result: release" >&5 -$as_echo "release" >&6; } -fi - - -######### -# See whether we should use the amalgamation to build -# Check whether --enable-amalgamation was given. -if test "${enable_amalgamation+set}" = set; then : - enableval=$enable_amalgamation; -fi - -if test "${enable_amalgamation}" = "no" ; then - USE_AMALGAMATION=0 -fi - - -######### -# By default, amalgamation sqlite3.c will have #line directives. -# This is a build option not shown by ./configure --help -# To control it, use configure option: amalgamation_line_macros=? -# where ? is no to suppress #line directives or yes to create them. -AMALGAMATION_LINE_MACROS=--linemacros=1 - - -if test "${amalgamation_line_macros+set}" = set; then : - enableval=$amalgamation_line_macros; -fi -if test "${amalgamation_line_macros}" = "yes" ; then - AMALGAMATION_LINE_MACROS=--linemacros=1 -fi -if test "${amalgamation_line_macros}" = "no" ; then - AMALGAMATION_LINE_MACROS=--linemacros=0 -fi - -######### -# Look for zlib. Only needed by extensions and by the sqlite3.exe shell -for ac_header in zlib.h -do : - ac_fn_c_check_header_mongrel "$LINENO" "zlib.h" "ac_cv_header_zlib_h" "$ac_includes_default" -if test "x$ac_cv_header_zlib_h" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_ZLIB_H 1 -_ACEOF - -fi - -done - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing deflate" >&5 -$as_echo_n "checking for library containing deflate... " >&6; } -if ${ac_cv_search_deflate+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_func_search_save_LIBS=$LIBS -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char deflate (); -int -main () -{ -return deflate (); - ; - return 0; -} -_ACEOF -for ac_lib in '' z; do - if test -z "$ac_lib"; then - ac_res="none required" - else - ac_res=-l$ac_lib - LIBS="-l$ac_lib $ac_func_search_save_LIBS" - fi - if ac_fn_c_try_link "$LINENO"; then : - ac_cv_search_deflate=$ac_res -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext - if ${ac_cv_search_deflate+:} false; then : - break -fi -done -if ${ac_cv_search_deflate+:} false; then : - -else - ac_cv_search_deflate=no -fi -rm conftest.$ac_ext -LIBS=$ac_func_search_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_deflate" >&5 -$as_echo "$ac_cv_search_deflate" >&6; } -ac_res=$ac_cv_search_deflate -if test "$ac_res" != no; then : - test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" - HAVE_ZLIB="-DSQLITE_HAVE_ZLIB=1" -else - HAVE_ZLIB="" -fi - - - -######### -# See whether we should allow loadable extensions -# Check whether --enable-load-extension was given. -if test "${enable_load_extension+set}" = set; then : - enableval=$enable_load_extension; -else - enable_load_extension=yes -fi - -if test "${enable_load_extension}" = "yes" ; then - OPT_FEATURE_FLAGS="" - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing dlopen" >&5 -$as_echo_n "checking for library containing dlopen... " >&6; } -if ${ac_cv_search_dlopen+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_func_search_save_LIBS=$LIBS -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char dlopen (); -int -main () -{ -return dlopen (); - ; - return 0; -} -_ACEOF -for ac_lib in '' dl; do - if test -z "$ac_lib"; then - ac_res="none required" - else - ac_res=-l$ac_lib - LIBS="-l$ac_lib $ac_func_search_save_LIBS" - fi - if ac_fn_c_try_link "$LINENO"; then : - ac_cv_search_dlopen=$ac_res -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext - if ${ac_cv_search_dlopen+:} false; then : - break -fi -done -if ${ac_cv_search_dlopen+:} false; then : - -else - ac_cv_search_dlopen=no -fi -rm conftest.$ac_ext -LIBS=$ac_func_search_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_dlopen" >&5 -$as_echo "$ac_cv_search_dlopen" >&6; } -ac_res=$ac_cv_search_dlopen -if test "$ac_res" != no; then : - test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" - -fi - -else - OPT_FEATURE_FLAGS="-DSQLITE_OMIT_LOAD_EXTENSION=1" -fi - -########## -# Do we want to support math functions -# -# Check whether --enable-math was given. -if test "${enable_math+set}" = set; then : - enableval=$enable_math; -fi - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to support math functions" >&5 -$as_echo_n "checking whether to support math functions... " >&6; } -if test "$enable_math" = "no"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } - OPT_FEATURE_FLAGS="${OPT_FEATURE_FLAGS} -DSQLITE_ENABLE_MATH_FUNCTIONS" - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing ceil" >&5 -$as_echo_n "checking for library containing ceil... " >&6; } -if ${ac_cv_search_ceil+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_func_search_save_LIBS=$LIBS -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char ceil (); -int -main () -{ -return ceil (); - ; - return 0; -} -_ACEOF -for ac_lib in '' m; do - if test -z "$ac_lib"; then - ac_res="none required" - else - ac_res=-l$ac_lib - LIBS="-l$ac_lib $ac_func_search_save_LIBS" - fi - if ac_fn_c_try_link "$LINENO"; then : - ac_cv_search_ceil=$ac_res -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext - if ${ac_cv_search_ceil+:} false; then : - break -fi -done -if ${ac_cv_search_ceil+:} false; then : - -else - ac_cv_search_ceil=no -fi -rm conftest.$ac_ext -LIBS=$ac_func_search_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_ceil" >&5 -$as_echo "$ac_cv_search_ceil" >&6; } -ac_res=$ac_cv_search_ceil -if test "$ac_res" != no; then : - test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" - -fi - -fi - - -######## -# The --enable-all argument is short-hand to enable -# multiple extensions. -# Check whether --enable-all was given. -if test "${enable_all+set}" = set; then : - enableval=$enable_all; -fi - - -########## -# Do we want to support memsys3 and/or memsys5 -# -# Check whether --enable-memsys5 was given. -if test "${enable_memsys5+set}" = set; then : - enableval=$enable_memsys5; -fi - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to support MEMSYS5" >&5 -$as_echo_n "checking whether to support MEMSYS5... " >&6; } -if test "${enable_memsys5}" = "yes"; then - OPT_FEATURE_FLAGS="${OPT_FEATURE_FLAGS} -DSQLITE_ENABLE_MEMSYS5" - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi -# Check whether --enable-memsys3 was given. -if test "${enable_memsys3+set}" = set; then : - enableval=$enable_memsys3; -fi - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to support MEMSYS3" >&5 -$as_echo_n "checking whether to support MEMSYS3... " >&6; } -if test "${enable_memsys3}" = "yes" -a "${enable_memsys5}" = "no"; then - OPT_FEATURE_FLAGS="${OPT_FEATURE_FLAGS} -DSQLITE_ENABLE_MEMSYS3" - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - -######### -# See whether we should enable Full Text Search extensions -# Check whether --enable-fts3 was given. -if test "${enable_fts3+set}" = set; then : - enableval=$enable_fts3; -fi - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to support FTS3" >&5 -$as_echo_n "checking whether to support FTS3... " >&6; } -if test "${enable_fts3}" = "yes" ; then - OPT_FEATURE_FLAGS="${OPT_FEATURE_FLAGS} -DSQLITE_ENABLE_FTS3" - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi -# Check whether --enable-fts4 was given. -if test "${enable_fts4+set}" = set; then : - enableval=$enable_fts4; -fi - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to support FTS4" >&5 -$as_echo_n "checking whether to support FTS4... " >&6; } -if test "${enable_fts4}" = "yes" -o "${enable_all}" = "yes" ; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } - OPT_FEATURE_FLAGS="${OPT_FEATURE_FLAGS} -DSQLITE_ENABLE_FTS4" - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing log" >&5 -$as_echo_n "checking for library containing log... " >&6; } -if ${ac_cv_search_log+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_func_search_save_LIBS=$LIBS -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char log (); -int -main () -{ -return log (); - ; - return 0; -} -_ACEOF -for ac_lib in '' m; do - if test -z "$ac_lib"; then - ac_res="none required" - else - ac_res=-l$ac_lib - LIBS="-l$ac_lib $ac_func_search_save_LIBS" - fi - if ac_fn_c_try_link "$LINENO"; then : - ac_cv_search_log=$ac_res -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext - if ${ac_cv_search_log+:} false; then : - break -fi -done -if ${ac_cv_search_log+:} false; then : - -else - ac_cv_search_log=no -fi -rm conftest.$ac_ext -LIBS=$ac_func_search_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_log" >&5 -$as_echo "$ac_cv_search_log" >&6; } -ac_res=$ac_cv_search_log -if test "$ac_res" != no; then : - test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" - -fi - -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi -# Check whether --enable-fts5 was given. -if test "${enable_fts5+set}" = set; then : - enableval=$enable_fts5; -fi - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to support FTS5" >&5 -$as_echo_n "checking whether to support FTS5... " >&6; } -if test "${enable_fts5}" = "yes" -o "${enable_all}" = "yes" ; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } - OPT_FEATURE_FLAGS="${OPT_FEATURE_FLAGS} -DSQLITE_ENABLE_FTS5" - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing log" >&5 -$as_echo_n "checking for library containing log... " >&6; } -if ${ac_cv_search_log+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_func_search_save_LIBS=$LIBS -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char log (); -int -main () -{ -return log (); - ; - return 0; -} -_ACEOF -for ac_lib in '' m; do - if test -z "$ac_lib"; then - ac_res="none required" - else - ac_res=-l$ac_lib - LIBS="-l$ac_lib $ac_func_search_save_LIBS" - fi - if ac_fn_c_try_link "$LINENO"; then : - ac_cv_search_log=$ac_res -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext - if ${ac_cv_search_log+:} false; then : - break -fi -done -if ${ac_cv_search_log+:} false; then : - -else - ac_cv_search_log=no -fi -rm conftest.$ac_ext -LIBS=$ac_func_search_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_log" >&5 -$as_echo "$ac_cv_search_log" >&6; } -ac_res=$ac_cv_search_log -if test "$ac_res" != no; then : - test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" - -fi - -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - -######### -# See whether we should enable JSON1 -# Check whether --enable-json1 was given. -if test "${enable_json1+set}" = set; then : - enableval=$enable_json1; -fi - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to support JSON" >&5 -$as_echo_n "checking whether to support JSON... " >&6; } -if test "${enable_json1}" = "yes" -o "${enable_all}" = "yes" ; then - OPT_FEATURE_FLAGS="${OPT_FEATURE_FLAGS} -DSQLITE_ENABLE_JSON1" - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - -######### -# See whether we should enable the LIMIT clause on UPDATE and DELETE -# statements. -# Check whether --enable-update-limit was given. -if test "${enable_update_limit+set}" = set; then : - enableval=$enable_update_limit; -fi - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to support LIMIT on UPDATE and DELETE statements" >&5 -$as_echo_n "checking whether to support LIMIT on UPDATE and DELETE statements... " >&6; } -if test "${enable_update_limit}" = "yes" ; then - OPT_FEATURE_FLAGS="${OPT_FEATURE_FLAGS} -DSQLITE_ENABLE_UPDATE_DELETE_LIMIT" - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - -######### -# See whether we should enable GEOPOLY -# Check whether --enable-geopoly was given. -if test "${enable_geopoly+set}" = set; then : - enableval=$enable_geopoly; enable_geopoly=yes -else - enable_geopoly=no -fi - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to support GEOPOLY" >&5 -$as_echo_n "checking whether to support GEOPOLY... " >&6; } -if test "${enable_geopoly}" = "yes" -o "${enable_all}" = "yes" ; then - OPT_FEATURE_FLAGS="${OPT_FEATURE_FLAGS} -DSQLITE_ENABLE_GEOPOLY" - enable_rtree=yes - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - -######### -# See whether we should enable RTREE -# Check whether --enable-rtree was given. -if test "${enable_rtree+set}" = set; then : - enableval=$enable_rtree; -fi - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to support RTREE" >&5 -$as_echo_n "checking whether to support RTREE... " >&6; } -if test "${enable_rtree}" = "yes" ; then - OPT_FEATURE_FLAGS="${OPT_FEATURE_FLAGS} -DSQLITE_ENABLE_RTREE" - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - -######### -# See whether we should enable the SESSION extension -# Check whether --enable-session was given. -if test "${enable_session+set}" = set; then : - enableval=$enable_session; -fi - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to support SESSION" >&5 -$as_echo_n "checking whether to support SESSION... " >&6; } -if test "${enable_session}" = "yes" -o "${enable_all}" = "yes" ; then - OPT_FEATURE_FLAGS="${OPT_FEATURE_FLAGS} -DSQLITE_ENABLE_SESSION" - OPT_FEATURE_FLAGS="${OPT_FEATURE_FLAGS} -DSQLITE_ENABLE_PREUPDATE_HOOK" - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - -######### -# attempt to duplicate any OMITS and ENABLES into the ${OPT_FEATURE_FLAGS} parameter -for option in $CFLAGS $CPPFLAGS -do - case $option in - -DSQLITE_OMIT*) OPT_FEATURE_FLAGS="$OPT_FEATURE_FLAGS $option";; - -DSQLITE_ENABLE*) OPT_FEATURE_FLAGS="$OPT_FEATURE_FLAGS $option";; - esac -done - - - -# attempt to remove any OMITS and ENABLES from the $(CFLAGS) parameter -ac_temp_CFLAGS="" -for option in $CFLAGS -do - case $option in - -DSQLITE_OMIT*) ;; - -DSQLITE_ENABLE*) ;; - *) ac_temp_CFLAGS="$ac_temp_CFLAGS $option";; - esac -done -CFLAGS=$ac_temp_CFLAGS - - -# attempt to remove any OMITS and ENABLES from the $(CPPFLAGS) parameter -ac_temp_CPPFLAGS="" -for option in $CPPFLAGS -do - case $option in - -DSQLITE_OMIT*) ;; - -DSQLITE_ENABLE*) ;; - *) ac_temp_CPPFLAGS="$ac_temp_CPPFLAGS $option";; - esac -done -CPPFLAGS=$ac_temp_CPPFLAGS - - -# attempt to remove any OMITS and ENABLES from the $(BUILD_CFLAGS) parameter -ac_temp_BUILD_CFLAGS="" -for option in $BUILD_CFLAGS -do - case $option in - -DSQLITE_OMIT*) ;; - -DSQLITE_ENABLE*) ;; - *) ac_temp_BUILD_CFLAGS="$ac_temp_BUILD_CFLAGS $option";; - esac -done -BUILD_CFLAGS=$ac_temp_BUILD_CFLAGS - - -######### -# See whether we should use GCOV -# Check whether --enable-gcov was given. -if test "${enable_gcov+set}" = set; then : - enableval=$enable_gcov; -fi - -if test "${use_gcov}" = "yes" ; then - USE_GCOV=1 -else - USE_GCOV=0 -fi - - -######### -# Enable/disabled amalagamation line macros -######## -AMALGAMATION_LINE_MACROS=--linemacros=0 -if test "${amalgamation_line_macros}" = "yes" ; then - AMALGAMATION_LINE_MACROS=--linemacros=1 -fi -if test "${amalgamation_line_macros}" = "no" ; then - AMALGAMATION_LINE_MACROS=--linemacros=0 -fi - - -######### -# Output the config header -ac_config_headers="$ac_config_headers config.h" - - -######### -# Generate the output files. -# - -ac_config_files="$ac_config_files Makefile sqlcipher.pc" - -cat >confcache <<\_ACEOF -# This file is a shell script that caches the results of configure -# tests run on this system so they can be shared between configure -# scripts and configure runs, see configure's option --config-cache. -# It is not useful on other systems. If it contains results you don't -# want to keep, you may remove or edit it. -# -# config.status only pays attention to the cache file if you give it -# the --recheck option to rerun configure. -# -# `ac_cv_env_foo' variables (set or unset) will be overridden when -# loading this file, other *unset* `ac_cv_foo' will be assigned the -# following values. - -_ACEOF - -# The following way of writing the cache mishandles newlines in values, -# but we know of no workaround that is simple, portable, and efficient. -# So, we kill variables containing newlines. -# Ultrix sh set writes to stderr and can't be redirected directly, -# and sets the high bit in the cache file unless we assign to the vars. -( - for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do - eval ac_val=\$$ac_var - case $ac_val in #( - *${as_nl}*) - case $ac_var in #( - *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 -$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; - esac - case $ac_var in #( - _ | IFS | as_nl) ;; #( - BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( - *) { eval $ac_var=; unset $ac_var;} ;; - esac ;; - esac - done - - (set) 2>&1 | - case $as_nl`(ac_space=' '; set) 2>&1` in #( - *${as_nl}ac_space=\ *) - # `set' does not quote correctly, so add quotes: double-quote - # substitution turns \\\\ into \\, and sed turns \\ into \. - sed -n \ - "s/'/'\\\\''/g; - s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" - ;; #( - *) - # `set' quotes correctly as required by POSIX, so do not add quotes. - sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" - ;; - esac | - sort -) | - sed ' - /^ac_cv_env_/b end - t clear - :clear - s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/ - t end - s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ - :end' >>confcache -if diff "$cache_file" confcache >/dev/null 2>&1; then :; else - if test -w "$cache_file"; then - if test "x$cache_file" != "x/dev/null"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5 -$as_echo "$as_me: updating cache $cache_file" >&6;} - if test ! -f "$cache_file" || test -h "$cache_file"; then - cat confcache >"$cache_file" - else - case $cache_file in #( - */* | ?:*) - mv -f confcache "$cache_file"$$ && - mv -f "$cache_file"$$ "$cache_file" ;; #( - *) - mv -f confcache "$cache_file" ;; - esac - fi - fi - else - { $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5 -$as_echo "$as_me: not updating unwritable cache $cache_file" >&6;} - fi -fi -rm -f confcache - -test "x$prefix" = xNONE && prefix=$ac_default_prefix -# Let make expand exec_prefix. -test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' - -DEFS=-DHAVE_CONFIG_H - -ac_libobjs= -ac_ltlibobjs= -U= -for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue - # 1. Remove the extension, and $U if already installed. - ac_script='s/\$U\././;s/\.o$//;s/\.obj$//' - ac_i=`$as_echo "$ac_i" | sed "$ac_script"` - # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR - # will be set to the directory where LIBOBJS objects are built. - as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext" - as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo' -done -LIBOBJS=$ac_libobjs - -LTLIBOBJS=$ac_ltlibobjs - - - -: "${CONFIG_STATUS=./config.status}" -ac_write_fail=0 -ac_clean_files_save=$ac_clean_files -ac_clean_files="$ac_clean_files $CONFIG_STATUS" -{ $as_echo "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5 -$as_echo "$as_me: creating $CONFIG_STATUS" >&6;} -as_write_fail=0 -cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1 -#! $SHELL -# Generated by $as_me. -# Run this file to recreate the current configuration. -# Compiler output produced by configure, useful for debugging -# configure, is in config.log if it exists. - -debug=false -ac_cs_recheck=false -ac_cs_silent=false - -SHELL=\${CONFIG_SHELL-$SHELL} -export SHELL -_ASEOF -cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1 -## -------------------- ## -## M4sh Initialization. ## -## -------------------- ## - -# Be more Bourne compatible -DUALCASE=1; export DUALCASE # for MKS sh -if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : - emulate sh - NULLCMD=: - # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which - # is contrary to our usage. Disable this feature. - alias -g '${1+"$@"}'='"$@"' - setopt NO_GLOB_SUBST -else - case `(set -o) 2>/dev/null` in #( - *posix*) : - set -o posix ;; #( - *) : - ;; -esac -fi - - -as_nl=' -' -export as_nl -# Printing a long string crashes Solaris 7 /usr/bin/printf. -as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' -as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo -as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo -# Prefer a ksh shell builtin over an external printf program on Solaris, -# but without wasting forks for bash or zsh. -if test -z "$BASH_VERSION$ZSH_VERSION" \ - && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then - as_echo='print -r --' - as_echo_n='print -rn --' -elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then - as_echo='printf %s\n' - as_echo_n='printf %s' -else - if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then - as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' - as_echo_n='/usr/ucb/echo -n' - else - as_echo_body='eval expr "X$1" : "X\\(.*\\)"' - as_echo_n_body='eval - arg=$1; - case $arg in #( - *"$as_nl"*) - expr "X$arg" : "X\\(.*\\)$as_nl"; - arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; - esac; - expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" - ' - export as_echo_n_body - as_echo_n='sh -c $as_echo_n_body as_echo' - fi - export as_echo_body - as_echo='sh -c $as_echo_body as_echo' -fi - -# The user is always right. -if test "${PATH_SEPARATOR+set}" != set; then - PATH_SEPARATOR=: - (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { - (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || - PATH_SEPARATOR=';' - } -fi - - -# IFS -# We need space, tab and new line, in precisely that order. Quoting is -# there to prevent editors from complaining about space-tab. -# (If _AS_PATH_WALK were called with IFS unset, it would disable word -# splitting by setting IFS to empty value.) -IFS=" "" $as_nl" - -# Find who we are. Look in the path if we contain no directory separator. -as_myself= -case $0 in #(( - *[\\/]* ) as_myself=$0 ;; - *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break - done -IFS=$as_save_IFS - - ;; -esac -# We did not find ourselves, most probably we were run as `sh COMMAND' -# in which case we are not to be found in the path. -if test "x$as_myself" = x; then - as_myself=$0 -fi -if test ! -f "$as_myself"; then - $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 - exit 1 -fi - -# Unset variables that we do not need and which cause bugs (e.g. in -# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" -# suppresses any "Segmentation fault" message there. '((' could -# trigger a bug in pdksh 5.2.14. -for as_var in BASH_ENV ENV MAIL MAILPATH -do eval test x\${$as_var+set} = xset \ - && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : -done -PS1='$ ' -PS2='> ' -PS4='+ ' - -# NLS nuisances. -LC_ALL=C -export LC_ALL -LANGUAGE=C -export LANGUAGE - -# CDPATH. -(unset CDPATH) >/dev/null 2>&1 && unset CDPATH - - -# as_fn_error STATUS ERROR [LINENO LOG_FD] -# ---------------------------------------- -# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are -# provided, also output the error to LOG_FD, referencing LINENO. Then exit the -# script with STATUS, using 1 if that was 0. -as_fn_error () -{ - as_status=$1; test $as_status -eq 0 && as_status=1 - if test "$4"; then - as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 - fi - $as_echo "$as_me: error: $2" >&2 - as_fn_exit $as_status -} # as_fn_error - - -# as_fn_set_status STATUS -# ----------------------- -# Set $? to STATUS, without forking. -as_fn_set_status () -{ - return $1 -} # as_fn_set_status - -# as_fn_exit STATUS -# ----------------- -# Exit the shell with STATUS, even in a "trap 0" or "set -e" context. -as_fn_exit () -{ - set +e - as_fn_set_status $1 - exit $1 -} # as_fn_exit - -# as_fn_unset VAR -# --------------- -# Portably unset VAR. -as_fn_unset () -{ - { eval $1=; unset $1;} -} -as_unset=as_fn_unset -# as_fn_append VAR VALUE -# ---------------------- -# Append the text in VALUE to the end of the definition contained in VAR. Take -# advantage of any shell optimizations that allow amortized linear growth over -# repeated appends, instead of the typical quadratic growth present in naive -# implementations. -if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : - eval 'as_fn_append () - { - eval $1+=\$2 - }' -else - as_fn_append () - { - eval $1=\$$1\$2 - } -fi # as_fn_append - -# as_fn_arith ARG... -# ------------------ -# Perform arithmetic evaluation on the ARGs, and store the result in the -# global $as_val. Take advantage of shells that can avoid forks. The arguments -# must be portable across $(()) and expr. -if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : - eval 'as_fn_arith () - { - as_val=$(( $* )) - }' -else - as_fn_arith () - { - as_val=`expr "$@" || test $? -eq 1` - } -fi # as_fn_arith - - -if expr a : '\(a\)' >/dev/null 2>&1 && - test "X`expr 00001 : '.*\(...\)'`" = X001; then - as_expr=expr -else - as_expr=false -fi - -if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then - as_basename=basename -else - as_basename=false -fi - -if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then - as_dirname=dirname -else - as_dirname=false -fi - -as_me=`$as_basename -- "$0" || -$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ - X"$0" : 'X\(//\)$' \| \ - X"$0" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X/"$0" | - sed '/^.*\/\([^/][^/]*\)\/*$/{ - s//\1/ - q - } - /^X\/\(\/\/\)$/{ - s//\1/ - q - } - /^X\/\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - -# Avoid depending upon Character Ranges. -as_cr_letters='abcdefghijklmnopqrstuvwxyz' -as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' -as_cr_Letters=$as_cr_letters$as_cr_LETTERS -as_cr_digits='0123456789' -as_cr_alnum=$as_cr_Letters$as_cr_digits - -ECHO_C= ECHO_N= ECHO_T= -case `echo -n x` in #((((( --n*) - case `echo 'xy\c'` in - *c*) ECHO_T=' ';; # ECHO_T is single tab character. - xy) ECHO_C='\c';; - *) echo `echo ksh88 bug on AIX 6.1` > /dev/null - ECHO_T=' ';; - esac;; -*) - ECHO_N='-n';; -esac - -rm -f conf$$ conf$$.exe conf$$.file -if test -d conf$$.dir; then - rm -f conf$$.dir/conf$$.file -else - rm -f conf$$.dir - mkdir conf$$.dir 2>/dev/null -fi -if (echo >conf$$.file) 2>/dev/null; then - if ln -s conf$$.file conf$$ 2>/dev/null; then - as_ln_s='ln -s' - # ... but there are two gotchas: - # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. - # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. - # In both cases, we have to default to `cp -pR'. - ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || - as_ln_s='cp -pR' - elif ln conf$$.file conf$$ 2>/dev/null; then - as_ln_s=ln - else - as_ln_s='cp -pR' - fi -else - as_ln_s='cp -pR' -fi -rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file -rmdir conf$$.dir 2>/dev/null - - -# as_fn_mkdir_p -# ------------- -# Create "$as_dir" as a directory, including parents if necessary. -as_fn_mkdir_p () -{ - - case $as_dir in #( - -*) as_dir=./$as_dir;; - esac - test -d "$as_dir" || eval $as_mkdir_p || { - as_dirs= - while :; do - case $as_dir in #( - *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( - *) as_qdir=$as_dir;; - esac - as_dirs="'$as_qdir' $as_dirs" - as_dir=`$as_dirname -- "$as_dir" || -$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$as_dir" : 'X\(//\)[^/]' \| \ - X"$as_dir" : 'X\(//\)$' \| \ - X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X"$as_dir" | - sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ - s//\1/ - q - } - /^X\(\/\/\)[^/].*/{ - s//\1/ - q - } - /^X\(\/\/\)$/{ - s//\1/ - q - } - /^X\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - test -d "$as_dir" && break - done - test -z "$as_dirs" || eval "mkdir $as_dirs" - } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" - - -} # as_fn_mkdir_p -if mkdir -p . 2>/dev/null; then - as_mkdir_p='mkdir -p "$as_dir"' -else - test -d ./-p && rmdir ./-p - as_mkdir_p=false -fi - - -# as_fn_executable_p FILE -# ----------------------- -# Test if FILE is an executable regular file. -as_fn_executable_p () -{ - test -f "$1" && test -x "$1" -} # as_fn_executable_p -as_test_x='test -x' -as_executable_p=as_fn_executable_p - -# Sed expression to map a string onto a valid CPP name. -as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" - -# Sed expression to map a string onto a valid variable name. -as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" - - -exec 6>&1 -## ----------------------------------- ## -## Main body of $CONFIG_STATUS script. ## -## ----------------------------------- ## -_ASEOF -test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1 - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -# Save the log message, to keep $0 and so on meaningful, and to -# report actual input values of CONFIG_FILES etc. instead of their -# values after options handling. -ac_log=" -This file was extended by sqlcipher $as_me 3.37.2, which was -generated by GNU Autoconf 2.69. Invocation command line was - - CONFIG_FILES = $CONFIG_FILES - CONFIG_HEADERS = $CONFIG_HEADERS - CONFIG_LINKS = $CONFIG_LINKS - CONFIG_COMMANDS = $CONFIG_COMMANDS - $ $0 $@ - -on `(hostname || uname -n) 2>/dev/null | sed 1q` -" - -_ACEOF - -case $ac_config_files in *" -"*) set x $ac_config_files; shift; ac_config_files=$*;; -esac - -case $ac_config_headers in *" -"*) set x $ac_config_headers; shift; ac_config_headers=$*;; -esac - - -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -# Files that config.status was made for. -config_files="$ac_config_files" -config_headers="$ac_config_headers" -config_commands="$ac_config_commands" - -_ACEOF - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -ac_cs_usage="\ -\`$as_me' instantiates files and other configuration actions -from templates according to the current configuration. Unless the files -and actions are specified as TAGs, all are instantiated by default. - -Usage: $0 [OPTION]... [TAG]... - - -h, --help print this help, then exit - -V, --version print version number and configuration settings, then exit - --config print configuration, then exit - -q, --quiet, --silent - do not print progress messages - -d, --debug don't remove temporary files - --recheck update $as_me by reconfiguring in the same conditions - --file=FILE[:TEMPLATE] - instantiate the configuration file FILE - --header=FILE[:TEMPLATE] - instantiate the configuration header FILE - -Configuration files: -$config_files - -Configuration headers: -$config_headers - -Configuration commands: -$config_commands - -Report bugs to the package provider." - -_ACEOF -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" -ac_cs_version="\\ -sqlcipher config.status 3.37.2 -configured by $0, generated by GNU Autoconf 2.69, - with options \\"\$ac_cs_config\\" - -Copyright (C) 2012 Free Software Foundation, Inc. -This config.status script is free software; the Free Software Foundation -gives unlimited permission to copy, distribute and modify it." - -ac_pwd='$ac_pwd' -srcdir='$srcdir' -INSTALL='$INSTALL' -AWK='$AWK' -test -n "\$AWK" || AWK=awk -_ACEOF - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -# The default lists apply if the user does not specify any file. -ac_need_defaults=: -while test $# != 0 -do - case $1 in - --*=?*) - ac_option=`expr "X$1" : 'X\([^=]*\)='` - ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'` - ac_shift=: - ;; - --*=) - ac_option=`expr "X$1" : 'X\([^=]*\)='` - ac_optarg= - ac_shift=: - ;; - *) - ac_option=$1 - ac_optarg=$2 - ac_shift=shift - ;; - esac - - case $ac_option in - # Handling of the options. - -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) - ac_cs_recheck=: ;; - --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) - $as_echo "$ac_cs_version"; exit ;; - --config | --confi | --conf | --con | --co | --c ) - $as_echo "$ac_cs_config"; exit ;; - --debug | --debu | --deb | --de | --d | -d ) - debug=: ;; - --file | --fil | --fi | --f ) - $ac_shift - case $ac_optarg in - *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; - '') as_fn_error $? "missing file argument" ;; - esac - as_fn_append CONFIG_FILES " '$ac_optarg'" - ac_need_defaults=false;; - --header | --heade | --head | --hea ) - $ac_shift - case $ac_optarg in - *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; - esac - as_fn_append CONFIG_HEADERS " '$ac_optarg'" - ac_need_defaults=false;; - --he | --h) - # Conflict between --help and --header - as_fn_error $? "ambiguous option: \`$1' -Try \`$0 --help' for more information.";; - --help | --hel | -h ) - $as_echo "$ac_cs_usage"; exit ;; - -q | -quiet | --quiet | --quie | --qui | --qu | --q \ - | -silent | --silent | --silen | --sile | --sil | --si | --s) - ac_cs_silent=: ;; - - # This is an error. - -*) as_fn_error $? "unrecognized option: \`$1' -Try \`$0 --help' for more information." ;; - - *) as_fn_append ac_config_targets " $1" - ac_need_defaults=false ;; - - esac - shift -done - -ac_configure_extra_args= - -if $ac_cs_silent; then - exec 6>/dev/null - ac_configure_extra_args="$ac_configure_extra_args --silent" -fi - -_ACEOF -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -if \$ac_cs_recheck; then - set X $SHELL '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion - shift - \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6 - CONFIG_SHELL='$SHELL' - export CONFIG_SHELL - exec "\$@" -fi - -_ACEOF -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -exec 5>>config.log -{ - echo - sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX -## Running $as_me. ## -_ASBOX - $as_echo "$ac_log" -} >&5 - -_ACEOF -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -# -# INIT-COMMANDS -# - - -# The HP-UX ksh and POSIX shell print the target directory to stdout -# if CDPATH is set. -(unset CDPATH) >/dev/null 2>&1 && unset CDPATH - -sed_quote_subst='$sed_quote_subst' -double_quote_subst='$double_quote_subst' -delay_variable_subst='$delay_variable_subst' -macro_version='`$ECHO "$macro_version" | $SED "$delay_single_quote_subst"`' -macro_revision='`$ECHO "$macro_revision" | $SED "$delay_single_quote_subst"`' -enable_shared='`$ECHO "$enable_shared" | $SED "$delay_single_quote_subst"`' -enable_static='`$ECHO "$enable_static" | $SED "$delay_single_quote_subst"`' -pic_mode='`$ECHO "$pic_mode" | $SED "$delay_single_quote_subst"`' -enable_fast_install='`$ECHO "$enable_fast_install" | $SED "$delay_single_quote_subst"`' -shared_archive_member_spec='`$ECHO "$shared_archive_member_spec" | $SED "$delay_single_quote_subst"`' -SHELL='`$ECHO "$SHELL" | $SED "$delay_single_quote_subst"`' -ECHO='`$ECHO "$ECHO" | $SED "$delay_single_quote_subst"`' -PATH_SEPARATOR='`$ECHO "$PATH_SEPARATOR" | $SED "$delay_single_quote_subst"`' -host_alias='`$ECHO "$host_alias" | $SED "$delay_single_quote_subst"`' -host='`$ECHO "$host" | $SED "$delay_single_quote_subst"`' -host_os='`$ECHO "$host_os" | $SED "$delay_single_quote_subst"`' -build_alias='`$ECHO "$build_alias" | $SED "$delay_single_quote_subst"`' -build='`$ECHO "$build" | $SED "$delay_single_quote_subst"`' -build_os='`$ECHO "$build_os" | $SED "$delay_single_quote_subst"`' -SED='`$ECHO "$SED" | $SED "$delay_single_quote_subst"`' -Xsed='`$ECHO "$Xsed" | $SED "$delay_single_quote_subst"`' -GREP='`$ECHO "$GREP" | $SED "$delay_single_quote_subst"`' -EGREP='`$ECHO "$EGREP" | $SED "$delay_single_quote_subst"`' -FGREP='`$ECHO "$FGREP" | $SED "$delay_single_quote_subst"`' -LD='`$ECHO "$LD" | $SED "$delay_single_quote_subst"`' -NM='`$ECHO "$NM" | $SED "$delay_single_quote_subst"`' -LN_S='`$ECHO "$LN_S" | $SED "$delay_single_quote_subst"`' -max_cmd_len='`$ECHO "$max_cmd_len" | $SED "$delay_single_quote_subst"`' -ac_objext='`$ECHO "$ac_objext" | $SED "$delay_single_quote_subst"`' -exeext='`$ECHO "$exeext" | $SED "$delay_single_quote_subst"`' -lt_unset='`$ECHO "$lt_unset" | $SED "$delay_single_quote_subst"`' -lt_SP2NL='`$ECHO "$lt_SP2NL" | $SED "$delay_single_quote_subst"`' -lt_NL2SP='`$ECHO "$lt_NL2SP" | $SED "$delay_single_quote_subst"`' -lt_cv_to_host_file_cmd='`$ECHO "$lt_cv_to_host_file_cmd" | $SED "$delay_single_quote_subst"`' -lt_cv_to_tool_file_cmd='`$ECHO "$lt_cv_to_tool_file_cmd" | $SED "$delay_single_quote_subst"`' -reload_flag='`$ECHO "$reload_flag" | $SED "$delay_single_quote_subst"`' -reload_cmds='`$ECHO "$reload_cmds" | $SED "$delay_single_quote_subst"`' -OBJDUMP='`$ECHO "$OBJDUMP" | $SED "$delay_single_quote_subst"`' -deplibs_check_method='`$ECHO "$deplibs_check_method" | $SED "$delay_single_quote_subst"`' -file_magic_cmd='`$ECHO "$file_magic_cmd" | $SED "$delay_single_quote_subst"`' -file_magic_glob='`$ECHO "$file_magic_glob" | $SED "$delay_single_quote_subst"`' -want_nocaseglob='`$ECHO "$want_nocaseglob" | $SED "$delay_single_quote_subst"`' -DLLTOOL='`$ECHO "$DLLTOOL" | $SED "$delay_single_quote_subst"`' -sharedlib_from_linklib_cmd='`$ECHO "$sharedlib_from_linklib_cmd" | $SED "$delay_single_quote_subst"`' -AR='`$ECHO "$AR" | $SED "$delay_single_quote_subst"`' -AR_FLAGS='`$ECHO "$AR_FLAGS" | $SED "$delay_single_quote_subst"`' -archiver_list_spec='`$ECHO "$archiver_list_spec" | $SED "$delay_single_quote_subst"`' -STRIP='`$ECHO "$STRIP" | $SED "$delay_single_quote_subst"`' -RANLIB='`$ECHO "$RANLIB" | $SED "$delay_single_quote_subst"`' -old_postinstall_cmds='`$ECHO "$old_postinstall_cmds" | $SED "$delay_single_quote_subst"`' -old_postuninstall_cmds='`$ECHO "$old_postuninstall_cmds" | $SED "$delay_single_quote_subst"`' -old_archive_cmds='`$ECHO "$old_archive_cmds" | $SED "$delay_single_quote_subst"`' -lock_old_archive_extraction='`$ECHO "$lock_old_archive_extraction" | $SED "$delay_single_quote_subst"`' -CC='`$ECHO "$CC" | $SED "$delay_single_quote_subst"`' -CFLAGS='`$ECHO "$CFLAGS" | $SED "$delay_single_quote_subst"`' -compiler='`$ECHO "$compiler" | $SED "$delay_single_quote_subst"`' -GCC='`$ECHO "$GCC" | $SED "$delay_single_quote_subst"`' -lt_cv_sys_global_symbol_pipe='`$ECHO "$lt_cv_sys_global_symbol_pipe" | $SED "$delay_single_quote_subst"`' -lt_cv_sys_global_symbol_to_cdecl='`$ECHO "$lt_cv_sys_global_symbol_to_cdecl" | $SED "$delay_single_quote_subst"`' -lt_cv_sys_global_symbol_to_import='`$ECHO "$lt_cv_sys_global_symbol_to_import" | $SED "$delay_single_quote_subst"`' -lt_cv_sys_global_symbol_to_c_name_address='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address" | $SED "$delay_single_quote_subst"`' -lt_cv_sys_global_symbol_to_c_name_address_lib_prefix='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address_lib_prefix" | $SED "$delay_single_quote_subst"`' -lt_cv_nm_interface='`$ECHO "$lt_cv_nm_interface" | $SED "$delay_single_quote_subst"`' -nm_file_list_spec='`$ECHO "$nm_file_list_spec" | $SED "$delay_single_quote_subst"`' -lt_sysroot='`$ECHO "$lt_sysroot" | $SED "$delay_single_quote_subst"`' -lt_cv_truncate_bin='`$ECHO "$lt_cv_truncate_bin" | $SED "$delay_single_quote_subst"`' -objdir='`$ECHO "$objdir" | $SED "$delay_single_quote_subst"`' -MAGIC_CMD='`$ECHO "$MAGIC_CMD" | $SED "$delay_single_quote_subst"`' -lt_prog_compiler_no_builtin_flag='`$ECHO "$lt_prog_compiler_no_builtin_flag" | $SED "$delay_single_quote_subst"`' -lt_prog_compiler_pic='`$ECHO "$lt_prog_compiler_pic" | $SED "$delay_single_quote_subst"`' -lt_prog_compiler_wl='`$ECHO "$lt_prog_compiler_wl" | $SED "$delay_single_quote_subst"`' -lt_prog_compiler_static='`$ECHO "$lt_prog_compiler_static" | $SED "$delay_single_quote_subst"`' -lt_cv_prog_compiler_c_o='`$ECHO "$lt_cv_prog_compiler_c_o" | $SED "$delay_single_quote_subst"`' -need_locks='`$ECHO "$need_locks" | $SED "$delay_single_quote_subst"`' -MANIFEST_TOOL='`$ECHO "$MANIFEST_TOOL" | $SED "$delay_single_quote_subst"`' -DSYMUTIL='`$ECHO "$DSYMUTIL" | $SED "$delay_single_quote_subst"`' -NMEDIT='`$ECHO "$NMEDIT" | $SED "$delay_single_quote_subst"`' -LIPO='`$ECHO "$LIPO" | $SED "$delay_single_quote_subst"`' -OTOOL='`$ECHO "$OTOOL" | $SED "$delay_single_quote_subst"`' -OTOOL64='`$ECHO "$OTOOL64" | $SED "$delay_single_quote_subst"`' -libext='`$ECHO "$libext" | $SED "$delay_single_quote_subst"`' -shrext_cmds='`$ECHO "$shrext_cmds" | $SED "$delay_single_quote_subst"`' -extract_expsyms_cmds='`$ECHO "$extract_expsyms_cmds" | $SED "$delay_single_quote_subst"`' -archive_cmds_need_lc='`$ECHO "$archive_cmds_need_lc" | $SED "$delay_single_quote_subst"`' -enable_shared_with_static_runtimes='`$ECHO "$enable_shared_with_static_runtimes" | $SED "$delay_single_quote_subst"`' -export_dynamic_flag_spec='`$ECHO "$export_dynamic_flag_spec" | $SED "$delay_single_quote_subst"`' -whole_archive_flag_spec='`$ECHO "$whole_archive_flag_spec" | $SED "$delay_single_quote_subst"`' -compiler_needs_object='`$ECHO "$compiler_needs_object" | $SED "$delay_single_quote_subst"`' -old_archive_from_new_cmds='`$ECHO "$old_archive_from_new_cmds" | $SED "$delay_single_quote_subst"`' -old_archive_from_expsyms_cmds='`$ECHO "$old_archive_from_expsyms_cmds" | $SED "$delay_single_quote_subst"`' -archive_cmds='`$ECHO "$archive_cmds" | $SED "$delay_single_quote_subst"`' -archive_expsym_cmds='`$ECHO "$archive_expsym_cmds" | $SED "$delay_single_quote_subst"`' -module_cmds='`$ECHO "$module_cmds" | $SED "$delay_single_quote_subst"`' -module_expsym_cmds='`$ECHO "$module_expsym_cmds" | $SED "$delay_single_quote_subst"`' -with_gnu_ld='`$ECHO "$with_gnu_ld" | $SED "$delay_single_quote_subst"`' -allow_undefined_flag='`$ECHO "$allow_undefined_flag" | $SED "$delay_single_quote_subst"`' -no_undefined_flag='`$ECHO "$no_undefined_flag" | $SED "$delay_single_quote_subst"`' -hardcode_libdir_flag_spec='`$ECHO "$hardcode_libdir_flag_spec" | $SED "$delay_single_quote_subst"`' -hardcode_libdir_separator='`$ECHO "$hardcode_libdir_separator" | $SED "$delay_single_quote_subst"`' -hardcode_direct='`$ECHO "$hardcode_direct" | $SED "$delay_single_quote_subst"`' -hardcode_direct_absolute='`$ECHO "$hardcode_direct_absolute" | $SED "$delay_single_quote_subst"`' -hardcode_minus_L='`$ECHO "$hardcode_minus_L" | $SED "$delay_single_quote_subst"`' -hardcode_shlibpath_var='`$ECHO "$hardcode_shlibpath_var" | $SED "$delay_single_quote_subst"`' -hardcode_automatic='`$ECHO "$hardcode_automatic" | $SED "$delay_single_quote_subst"`' -inherit_rpath='`$ECHO "$inherit_rpath" | $SED "$delay_single_quote_subst"`' -link_all_deplibs='`$ECHO "$link_all_deplibs" | $SED "$delay_single_quote_subst"`' -always_export_symbols='`$ECHO "$always_export_symbols" | $SED "$delay_single_quote_subst"`' -export_symbols_cmds='`$ECHO "$export_symbols_cmds" | $SED "$delay_single_quote_subst"`' -exclude_expsyms='`$ECHO "$exclude_expsyms" | $SED "$delay_single_quote_subst"`' -include_expsyms='`$ECHO "$include_expsyms" | $SED "$delay_single_quote_subst"`' -prelink_cmds='`$ECHO "$prelink_cmds" | $SED "$delay_single_quote_subst"`' -postlink_cmds='`$ECHO "$postlink_cmds" | $SED "$delay_single_quote_subst"`' -file_list_spec='`$ECHO "$file_list_spec" | $SED "$delay_single_quote_subst"`' -variables_saved_for_relink='`$ECHO "$variables_saved_for_relink" | $SED "$delay_single_quote_subst"`' -need_lib_prefix='`$ECHO "$need_lib_prefix" | $SED "$delay_single_quote_subst"`' -need_version='`$ECHO "$need_version" | $SED "$delay_single_quote_subst"`' -version_type='`$ECHO "$version_type" | $SED "$delay_single_quote_subst"`' -runpath_var='`$ECHO "$runpath_var" | $SED "$delay_single_quote_subst"`' -shlibpath_var='`$ECHO "$shlibpath_var" | $SED "$delay_single_quote_subst"`' -shlibpath_overrides_runpath='`$ECHO "$shlibpath_overrides_runpath" | $SED "$delay_single_quote_subst"`' -libname_spec='`$ECHO "$libname_spec" | $SED "$delay_single_quote_subst"`' -library_names_spec='`$ECHO "$library_names_spec" | $SED "$delay_single_quote_subst"`' -soname_spec='`$ECHO "$soname_spec" | $SED "$delay_single_quote_subst"`' -install_override_mode='`$ECHO "$install_override_mode" | $SED "$delay_single_quote_subst"`' -postinstall_cmds='`$ECHO "$postinstall_cmds" | $SED "$delay_single_quote_subst"`' -postuninstall_cmds='`$ECHO "$postuninstall_cmds" | $SED "$delay_single_quote_subst"`' -finish_cmds='`$ECHO "$finish_cmds" | $SED "$delay_single_quote_subst"`' -finish_eval='`$ECHO "$finish_eval" | $SED "$delay_single_quote_subst"`' -hardcode_into_libs='`$ECHO "$hardcode_into_libs" | $SED "$delay_single_quote_subst"`' -sys_lib_search_path_spec='`$ECHO "$sys_lib_search_path_spec" | $SED "$delay_single_quote_subst"`' -configure_time_dlsearch_path='`$ECHO "$configure_time_dlsearch_path" | $SED "$delay_single_quote_subst"`' -configure_time_lt_sys_library_path='`$ECHO "$configure_time_lt_sys_library_path" | $SED "$delay_single_quote_subst"`' -hardcode_action='`$ECHO "$hardcode_action" | $SED "$delay_single_quote_subst"`' -enable_dlopen='`$ECHO "$enable_dlopen" | $SED "$delay_single_quote_subst"`' -enable_dlopen_self='`$ECHO "$enable_dlopen_self" | $SED "$delay_single_quote_subst"`' -enable_dlopen_self_static='`$ECHO "$enable_dlopen_self_static" | $SED "$delay_single_quote_subst"`' -old_striplib='`$ECHO "$old_striplib" | $SED "$delay_single_quote_subst"`' -striplib='`$ECHO "$striplib" | $SED "$delay_single_quote_subst"`' - -LTCC='$LTCC' -LTCFLAGS='$LTCFLAGS' -compiler='$compiler_DEFAULT' - -# A function that is used when there is no print builtin or printf. -func_fallback_echo () -{ - eval 'cat <<_LTECHO_EOF -\$1 -_LTECHO_EOF' -} - -# Quote evaled strings. -for var in SHELL \ -ECHO \ -PATH_SEPARATOR \ -SED \ -GREP \ -EGREP \ -FGREP \ -LD \ -NM \ -LN_S \ -lt_SP2NL \ -lt_NL2SP \ -reload_flag \ -OBJDUMP \ -deplibs_check_method \ -file_magic_cmd \ -file_magic_glob \ -want_nocaseglob \ -DLLTOOL \ -sharedlib_from_linklib_cmd \ -AR \ -AR_FLAGS \ -archiver_list_spec \ -STRIP \ -RANLIB \ -CC \ -CFLAGS \ -compiler \ -lt_cv_sys_global_symbol_pipe \ -lt_cv_sys_global_symbol_to_cdecl \ -lt_cv_sys_global_symbol_to_import \ -lt_cv_sys_global_symbol_to_c_name_address \ -lt_cv_sys_global_symbol_to_c_name_address_lib_prefix \ -lt_cv_nm_interface \ -nm_file_list_spec \ -lt_cv_truncate_bin \ -lt_prog_compiler_no_builtin_flag \ -lt_prog_compiler_pic \ -lt_prog_compiler_wl \ -lt_prog_compiler_static \ -lt_cv_prog_compiler_c_o \ -need_locks \ -MANIFEST_TOOL \ -DSYMUTIL \ -NMEDIT \ -LIPO \ -OTOOL \ -OTOOL64 \ -shrext_cmds \ -export_dynamic_flag_spec \ -whole_archive_flag_spec \ -compiler_needs_object \ -with_gnu_ld \ -allow_undefined_flag \ -no_undefined_flag \ -hardcode_libdir_flag_spec \ -hardcode_libdir_separator \ -exclude_expsyms \ -include_expsyms \ -file_list_spec \ -variables_saved_for_relink \ -libname_spec \ -library_names_spec \ -soname_spec \ -install_override_mode \ -finish_eval \ -old_striplib \ -striplib; do - case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in - *[\\\\\\\`\\"\\\$]*) - eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED \\"\\\$sed_quote_subst\\"\\\`\\\\\\"" ## exclude from sc_prohibit_nested_quotes - ;; - *) - eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" - ;; - esac -done - -# Double-quote double-evaled strings. -for var in reload_cmds \ -old_postinstall_cmds \ -old_postuninstall_cmds \ -old_archive_cmds \ -extract_expsyms_cmds \ -old_archive_from_new_cmds \ -old_archive_from_expsyms_cmds \ -archive_cmds \ -archive_expsym_cmds \ -module_cmds \ -module_expsym_cmds \ -export_symbols_cmds \ -prelink_cmds \ -postlink_cmds \ -postinstall_cmds \ -postuninstall_cmds \ -finish_cmds \ -sys_lib_search_path_spec \ -configure_time_dlsearch_path \ -configure_time_lt_sys_library_path; do - case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in - *[\\\\\\\`\\"\\\$]*) - eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED -e \\"\\\$double_quote_subst\\" -e \\"\\\$sed_quote_subst\\" -e \\"\\\$delay_variable_subst\\"\\\`\\\\\\"" ## exclude from sc_prohibit_nested_quotes - ;; - *) - eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" - ;; - esac -done - -ac_aux_dir='$ac_aux_dir' - -# See if we are running on zsh, and set the options that allow our -# commands through without removal of \ escapes INIT. -if test -n "\${ZSH_VERSION+set}"; then - setopt NO_GLOB_SUBST -fi - - - PACKAGE='$PACKAGE' - VERSION='$VERSION' - RM='$RM' - ofile='$ofile' - - - - -_ACEOF - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 - -# Handling of arguments. -for ac_config_target in $ac_config_targets -do - case $ac_config_target in - "libtool") CONFIG_COMMANDS="$CONFIG_COMMANDS libtool" ;; - "config.h") CONFIG_HEADERS="$CONFIG_HEADERS config.h" ;; - "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;; - "sqlcipher.pc") CONFIG_FILES="$CONFIG_FILES sqlcipher.pc" ;; - - *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;; - esac -done - - -# If the user did not use the arguments to specify the items to instantiate, -# then the envvar interface is used. Set only those that are not. -# We use the long form for the default assignment because of an extremely -# bizarre bug on SunOS 4.1.3. -if $ac_need_defaults; then - test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files - test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers - test "${CONFIG_COMMANDS+set}" = set || CONFIG_COMMANDS=$config_commands -fi - -# Have a temporary directory for convenience. Make it in the build tree -# simply because there is no reason against having it here, and in addition, -# creating and moving files from /tmp can sometimes cause problems. -# Hook for its removal unless debugging. -# Note that there is a small window in which the directory will not be cleaned: -# after its creation but before its name has been assigned to `$tmp'. -$debug || -{ - tmp= ac_tmp= - trap 'exit_status=$? - : "${ac_tmp:=$tmp}" - { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status -' 0 - trap 'as_fn_exit 1' 1 2 13 15 -} -# Create a (secure) tmp directory for tmp files. - -{ - tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` && - test -d "$tmp" -} || -{ - tmp=./conf$$-$RANDOM - (umask 077 && mkdir "$tmp") -} || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5 -ac_tmp=$tmp - -# Set up the scripts for CONFIG_FILES section. -# No need to generate them if there are no CONFIG_FILES. -# This happens for instance with `./config.status config.h'. -if test -n "$CONFIG_FILES"; then - - -ac_cr=`echo X | tr X '\015'` -# On cygwin, bash can eat \r inside `` if the user requested igncr. -# But we know of no other shell where ac_cr would be empty at this -# point, so we can use a bashism as a fallback. -if test "x$ac_cr" = x; then - eval ac_cr=\$\'\\r\' -fi -ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' /dev/null` -if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then - ac_cs_awk_cr='\\r' -else - ac_cs_awk_cr=$ac_cr -fi - -echo 'BEGIN {' >"$ac_tmp/subs1.awk" && -_ACEOF - - -{ - echo "cat >conf$$subs.awk <<_ACEOF" && - echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' && - echo "_ACEOF" -} >conf$$subs.sh || - as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 -ac_delim_num=`echo "$ac_subst_vars" | grep -c '^'` -ac_delim='%!_!# ' -for ac_last_try in false false false false false :; do - . ./conf$$subs.sh || - as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 - - ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X` - if test $ac_delim_n = $ac_delim_num; then - break - elif $ac_last_try; then - as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 - else - ac_delim="$ac_delim!$ac_delim _$ac_delim!! " - fi -done -rm -f conf$$subs.sh - -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -cat >>"\$ac_tmp/subs1.awk" <<\\_ACAWK && -_ACEOF -sed -n ' -h -s/^/S["/; s/!.*/"]=/ -p -g -s/^[^!]*!// -:repl -t repl -s/'"$ac_delim"'$// -t delim -:nl -h -s/\(.\{148\}\)..*/\1/ -t more1 -s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/ -p -n -b repl -:more1 -s/["\\]/\\&/g; s/^/"/; s/$/"\\/ -p -g -s/.\{148\}// -t nl -:delim -h -s/\(.\{148\}\)..*/\1/ -t more2 -s/["\\]/\\&/g; s/^/"/; s/$/"/ -p -b -:more2 -s/["\\]/\\&/g; s/^/"/; s/$/"\\/ -p -g -s/.\{148\}// -t delim -' >$CONFIG_STATUS || ac_write_fail=1 -rm -f conf$$subs.awk -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -_ACAWK -cat >>"\$ac_tmp/subs1.awk" <<_ACAWK && - for (key in S) S_is_set[key] = 1 - FS = "" - -} -{ - line = $ 0 - nfields = split(line, field, "@") - substed = 0 - len = length(field[1]) - for (i = 2; i < nfields; i++) { - key = field[i] - keylen = length(key) - if (S_is_set[key]) { - value = S[key] - line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3) - len += length(value) + length(field[++i]) - substed = 1 - } else - len += 1 + keylen - } - - print line -} - -_ACAWK -_ACEOF -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then - sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g" -else - cat -fi < "$ac_tmp/subs1.awk" > "$ac_tmp/subs.awk" \ - || as_fn_error $? "could not setup config files machinery" "$LINENO" 5 -_ACEOF - -# VPATH may cause trouble with some makes, so we remove sole $(srcdir), -# ${srcdir} and @srcdir@ entries from VPATH if srcdir is ".", strip leading and -# trailing colons and then remove the whole line if VPATH becomes empty -# (actually we leave an empty line to preserve line numbers). -if test "x$srcdir" = x.; then - ac_vpsub='/^[ ]*VPATH[ ]*=[ ]*/{ -h -s/// -s/^/:/ -s/[ ]*$/:/ -s/:\$(srcdir):/:/g -s/:\${srcdir}:/:/g -s/:@srcdir@:/:/g -s/^:*// -s/:*$// -x -s/\(=[ ]*\).*/\1/ -G -s/\n// -s/^[^=]*=[ ]*$// -}' -fi - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -fi # test -n "$CONFIG_FILES" - -# Set up the scripts for CONFIG_HEADERS section. -# No need to generate them if there are no CONFIG_HEADERS. -# This happens for instance with `./config.status Makefile'. -if test -n "$CONFIG_HEADERS"; then -cat >"$ac_tmp/defines.awk" <<\_ACAWK || -BEGIN { -_ACEOF - -# Transform confdefs.h into an awk script `defines.awk', embedded as -# here-document in config.status, that substitutes the proper values into -# config.h.in to produce config.h. - -# Create a delimiter string that does not exist in confdefs.h, to ease -# handling of long lines. -ac_delim='%!_!# ' -for ac_last_try in false false :; do - ac_tt=`sed -n "/$ac_delim/p" confdefs.h` - if test -z "$ac_tt"; then - break - elif $ac_last_try; then - as_fn_error $? "could not make $CONFIG_HEADERS" "$LINENO" 5 - else - ac_delim="$ac_delim!$ac_delim _$ac_delim!! " - fi -done - -# For the awk script, D is an array of macro values keyed by name, -# likewise P contains macro parameters if any. Preserve backslash -# newline sequences. - -ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]* -sed -n ' -s/.\{148\}/&'"$ac_delim"'/g -t rset -:rset -s/^[ ]*#[ ]*define[ ][ ]*/ / -t def -d -:def -s/\\$// -t bsnl -s/["\\]/\\&/g -s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ -D["\1"]=" \3"/p -s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2"/p -d -:bsnl -s/["\\]/\\&/g -s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ -D["\1"]=" \3\\\\\\n"\\/p -t cont -s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2\\\\\\n"\\/p -t cont -d -:cont -n -s/.\{148\}/&'"$ac_delim"'/g -t clear -:clear -s/\\$// -t bsnlc -s/["\\]/\\&/g; s/^/"/; s/$/"/p -d -:bsnlc -s/["\\]/\\&/g; s/^/"/; s/$/\\\\\\n"\\/p -b cont -' >$CONFIG_STATUS || ac_write_fail=1 - -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 - for (key in D) D_is_set[key] = 1 - FS = "" -} -/^[\t ]*#[\t ]*(define|undef)[\t ]+$ac_word_re([\t (]|\$)/ { - line = \$ 0 - split(line, arg, " ") - if (arg[1] == "#") { - defundef = arg[2] - mac1 = arg[3] - } else { - defundef = substr(arg[1], 2) - mac1 = arg[2] - } - split(mac1, mac2, "(") #) - macro = mac2[1] - prefix = substr(line, 1, index(line, defundef) - 1) - if (D_is_set[macro]) { - # Preserve the white space surrounding the "#". - print prefix "define", macro P[macro] D[macro] - next - } else { - # Replace #undef with comments. This is necessary, for example, - # in the case of _POSIX_SOURCE, which is predefined and required - # on some systems where configure will not decide to define it. - if (defundef == "undef") { - print "/*", prefix defundef, macro, "*/" - next - } - } -} -{ print } -_ACAWK -_ACEOF -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 - as_fn_error $? "could not setup config headers machinery" "$LINENO" 5 -fi # test -n "$CONFIG_HEADERS" - - -eval set X " :F $CONFIG_FILES :H $CONFIG_HEADERS :C $CONFIG_COMMANDS" -shift -for ac_tag -do - case $ac_tag in - :[FHLC]) ac_mode=$ac_tag; continue;; - esac - case $ac_mode$ac_tag in - :[FHL]*:*);; - :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;; - :[FH]-) ac_tag=-:-;; - :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; - esac - ac_save_IFS=$IFS - IFS=: - set x $ac_tag - IFS=$ac_save_IFS - shift - ac_file=$1 - shift - - case $ac_mode in - :L) ac_source=$1;; - :[FH]) - ac_file_inputs= - for ac_f - do - case $ac_f in - -) ac_f="$ac_tmp/stdin";; - *) # Look for the file first in the build tree, then in the source tree - # (if the path is not absolute). The absolute path cannot be DOS-style, - # because $ac_f cannot contain `:'. - test -f "$ac_f" || - case $ac_f in - [\\/$]*) false;; - *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; - esac || - as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;; - esac - case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac - as_fn_append ac_file_inputs " '$ac_f'" - done - - # Let's still pretend it is `configure' which instantiates (i.e., don't - # use $as_me), people would be surprised to read: - # /* config.h. Generated by config.status. */ - configure_input='Generated from '` - $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g' - `' by configure.' - if test x"$ac_file" != x-; then - configure_input="$ac_file. $configure_input" - { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5 -$as_echo "$as_me: creating $ac_file" >&6;} - fi - # Neutralize special characters interpreted by sed in replacement strings. - case $configure_input in #( - *\&* | *\|* | *\\* ) - ac_sed_conf_input=`$as_echo "$configure_input" | - sed 's/[\\\\&|]/\\\\&/g'`;; #( - *) ac_sed_conf_input=$configure_input;; - esac - - case $ac_tag in - *:-:* | *:-) cat >"$ac_tmp/stdin" \ - || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; - esac - ;; - esac - - ac_dir=`$as_dirname -- "$ac_file" || -$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$ac_file" : 'X\(//\)[^/]' \| \ - X"$ac_file" : 'X\(//\)$' \| \ - X"$ac_file" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X"$ac_file" | - sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ - s//\1/ - q - } - /^X\(\/\/\)[^/].*/{ - s//\1/ - q - } - /^X\(\/\/\)$/{ - s//\1/ - q - } - /^X\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - as_dir="$ac_dir"; as_fn_mkdir_p - ac_builddir=. - -case "$ac_dir" in -.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; -*) - ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` - # A ".." for each directory in $ac_dir_suffix. - ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` - case $ac_top_builddir_sub in - "") ac_top_builddir_sub=. ac_top_build_prefix= ;; - *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; - esac ;; -esac -ac_abs_top_builddir=$ac_pwd -ac_abs_builddir=$ac_pwd$ac_dir_suffix -# for backward compatibility: -ac_top_builddir=$ac_top_build_prefix - -case $srcdir in - .) # We are building in place. - ac_srcdir=. - ac_top_srcdir=$ac_top_builddir_sub - ac_abs_top_srcdir=$ac_pwd ;; - [\\/]* | ?:[\\/]* ) # Absolute name. - ac_srcdir=$srcdir$ac_dir_suffix; - ac_top_srcdir=$srcdir - ac_abs_top_srcdir=$srcdir ;; - *) # Relative name. - ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix - ac_top_srcdir=$ac_top_build_prefix$srcdir - ac_abs_top_srcdir=$ac_pwd/$srcdir ;; -esac -ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix - - - case $ac_mode in - :F) - # - # CONFIG_FILE - # - - case $INSTALL in - [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;; - *) ac_INSTALL=$ac_top_build_prefix$INSTALL ;; - esac -_ACEOF - -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -# If the template does not know about datarootdir, expand it. -# FIXME: This hack should be removed a few years after 2.60. -ac_datarootdir_hack=; ac_datarootdir_seen= -ac_sed_dataroot=' -/datarootdir/ { - p - q -} -/@datadir@/p -/@docdir@/p -/@infodir@/p -/@localedir@/p -/@mandir@/p' -case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in -*datarootdir*) ac_datarootdir_seen=yes;; -*@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*) - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 -$as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} -_ACEOF -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 - ac_datarootdir_hack=' - s&@datadir@&$datadir&g - s&@docdir@&$docdir&g - s&@infodir@&$infodir&g - s&@localedir@&$localedir&g - s&@mandir@&$mandir&g - s&\\\${datarootdir}&$datarootdir&g' ;; -esac -_ACEOF - -# Neutralize VPATH when `$srcdir' = `.'. -# Shell code in configure.ac might set extrasub. -# FIXME: do we really want to maintain this feature? -cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -ac_sed_extra="$ac_vpsub -$extrasub -_ACEOF -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -:t -/@[a-zA-Z_][a-zA-Z_0-9]*@/!b -s|@configure_input@|$ac_sed_conf_input|;t t -s&@top_builddir@&$ac_top_builddir_sub&;t t -s&@top_build_prefix@&$ac_top_build_prefix&;t t -s&@srcdir@&$ac_srcdir&;t t -s&@abs_srcdir@&$ac_abs_srcdir&;t t -s&@top_srcdir@&$ac_top_srcdir&;t t -s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t -s&@builddir@&$ac_builddir&;t t -s&@abs_builddir@&$ac_abs_builddir&;t t -s&@abs_top_builddir@&$ac_abs_top_builddir&;t t -s&@INSTALL@&$ac_INSTALL&;t t -$ac_datarootdir_hack -" -eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$ac_tmp/subs.awk" \ - >$ac_tmp/out || as_fn_error $? "could not create $ac_file" "$LINENO" 5 - -test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && - { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } && - { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' \ - "$ac_tmp/out"`; test -z "$ac_out"; } && - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir' -which seems to be undefined. Please make sure it is defined" >&5 -$as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' -which seems to be undefined. Please make sure it is defined" >&2;} - - rm -f "$ac_tmp/stdin" - case $ac_file in - -) cat "$ac_tmp/out" && rm -f "$ac_tmp/out";; - *) rm -f "$ac_file" && mv "$ac_tmp/out" "$ac_file";; - esac \ - || as_fn_error $? "could not create $ac_file" "$LINENO" 5 - ;; - :H) - # - # CONFIG_HEADER - # - if test x"$ac_file" != x-; then - { - $as_echo "/* $configure_input */" \ - && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" - } >"$ac_tmp/config.h" \ - || as_fn_error $? "could not create $ac_file" "$LINENO" 5 - if diff "$ac_file" "$ac_tmp/config.h" >/dev/null 2>&1; then - { $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5 -$as_echo "$as_me: $ac_file is unchanged" >&6;} - else - rm -f "$ac_file" - mv "$ac_tmp/config.h" "$ac_file" \ - || as_fn_error $? "could not create $ac_file" "$LINENO" 5 - fi - else - $as_echo "/* $configure_input */" \ - && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" \ - || as_fn_error $? "could not create -" "$LINENO" 5 - fi - ;; - - :C) { $as_echo "$as_me:${as_lineno-$LINENO}: executing $ac_file commands" >&5 -$as_echo "$as_me: executing $ac_file commands" >&6;} - ;; - esac - - - case $ac_file$ac_mode in - "libtool":C) - - # See if we are running on zsh, and set the options that allow our - # commands through without removal of \ escapes. - if test -n "${ZSH_VERSION+set}"; then - setopt NO_GLOB_SUBST - fi - - cfgfile=${ofile}T - trap "$RM \"$cfgfile\"; exit 1" 1 2 15 - $RM "$cfgfile" - - cat <<_LT_EOF >> "$cfgfile" -#! $SHELL -# Generated automatically by $as_me ($PACKAGE) $VERSION -# NOTE: Changes made to this file will be lost: look at ltmain.sh. - -# Provide generalized library-building support services. -# Written by Gordon Matzigkeit, 1996 - -# Copyright (C) 2014 Free Software Foundation, Inc. -# This is free software; see the source for copying conditions. There is NO -# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - -# GNU Libtool is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of of the License, or -# (at your option) any later version. -# -# As a special exception to the GNU General Public License, if you -# distribute this file as part of a program or library that is built -# using GNU Libtool, you may include this file under the same -# distribution terms that you use for the rest of that program. -# -# GNU Libtool is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - - -# The names of the tagged configurations supported by this script. -available_tags='' - -# Configured defaults for sys_lib_dlsearch_path munging. -: \${LT_SYS_LIBRARY_PATH="$configure_time_lt_sys_library_path"} - -# ### BEGIN LIBTOOL CONFIG - -# Which release of libtool.m4 was used? -macro_version=$macro_version -macro_revision=$macro_revision - -# Whether or not to build shared libraries. -build_libtool_libs=$enable_shared - -# Whether or not to build static libraries. -build_old_libs=$enable_static - -# What type of objects to build. -pic_mode=$pic_mode - -# Whether or not to optimize for fast installation. -fast_install=$enable_fast_install - -# Shared archive member basename,for filename based shared library versioning on AIX. -shared_archive_member_spec=$shared_archive_member_spec - -# Shell to use when invoking shell scripts. -SHELL=$lt_SHELL - -# An echo program that protects backslashes. -ECHO=$lt_ECHO - -# The PATH separator for the build system. -PATH_SEPARATOR=$lt_PATH_SEPARATOR - -# The host system. -host_alias=$host_alias -host=$host -host_os=$host_os - -# The build system. -build_alias=$build_alias -build=$build -build_os=$build_os - -# A sed program that does not truncate output. -SED=$lt_SED - -# Sed that helps us avoid accidentally triggering echo(1) options like -n. -Xsed="\$SED -e 1s/^X//" - -# A grep program that handles long lines. -GREP=$lt_GREP - -# An ERE matcher. -EGREP=$lt_EGREP - -# A literal string matcher. -FGREP=$lt_FGREP - -# A BSD- or MS-compatible name lister. -NM=$lt_NM - -# Whether we need soft or hard links. -LN_S=$lt_LN_S - -# What is the maximum length of a command? -max_cmd_len=$max_cmd_len - -# Object file suffix (normally "o"). -objext=$ac_objext - -# Executable file suffix (normally ""). -exeext=$exeext - -# whether the shell understands "unset". -lt_unset=$lt_unset - -# turn spaces into newlines. -SP2NL=$lt_lt_SP2NL - -# turn newlines into spaces. -NL2SP=$lt_lt_NL2SP - -# convert \$build file names to \$host format. -to_host_file_cmd=$lt_cv_to_host_file_cmd - -# convert \$build files to toolchain format. -to_tool_file_cmd=$lt_cv_to_tool_file_cmd - -# An object symbol dumper. -OBJDUMP=$lt_OBJDUMP - -# Method to check whether dependent libraries are shared objects. -deplibs_check_method=$lt_deplibs_check_method - -# Command to use when deplibs_check_method = "file_magic". -file_magic_cmd=$lt_file_magic_cmd - -# How to find potential files when deplibs_check_method = "file_magic". -file_magic_glob=$lt_file_magic_glob - -# Find potential files using nocaseglob when deplibs_check_method = "file_magic". -want_nocaseglob=$lt_want_nocaseglob - -# DLL creation program. -DLLTOOL=$lt_DLLTOOL - -# Command to associate shared and link libraries. -sharedlib_from_linklib_cmd=$lt_sharedlib_from_linklib_cmd - -# The archiver. -AR=$lt_AR - -# Flags to create an archive. -AR_FLAGS=$lt_AR_FLAGS - -# How to feed a file listing to the archiver. -archiver_list_spec=$lt_archiver_list_spec - -# A symbol stripping program. -STRIP=$lt_STRIP - -# Commands used to install an old-style archive. -RANLIB=$lt_RANLIB -old_postinstall_cmds=$lt_old_postinstall_cmds -old_postuninstall_cmds=$lt_old_postuninstall_cmds - -# Whether to use a lock for old archive extraction. -lock_old_archive_extraction=$lock_old_archive_extraction - -# A C compiler. -LTCC=$lt_CC - -# LTCC compiler flags. -LTCFLAGS=$lt_CFLAGS - -# Take the output of nm and produce a listing of raw symbols and C names. -global_symbol_pipe=$lt_lt_cv_sys_global_symbol_pipe - -# Transform the output of nm in a proper C declaration. -global_symbol_to_cdecl=$lt_lt_cv_sys_global_symbol_to_cdecl - -# Transform the output of nm into a list of symbols to manually relocate. -global_symbol_to_import=$lt_lt_cv_sys_global_symbol_to_import - -# Transform the output of nm in a C name address pair. -global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address - -# Transform the output of nm in a C name address pair when lib prefix is needed. -global_symbol_to_c_name_address_lib_prefix=$lt_lt_cv_sys_global_symbol_to_c_name_address_lib_prefix - -# The name lister interface. -nm_interface=$lt_lt_cv_nm_interface - -# Specify filename containing input files for \$NM. -nm_file_list_spec=$lt_nm_file_list_spec - -# The root where to search for dependent libraries,and where our libraries should be installed. -lt_sysroot=$lt_sysroot - -# Command to truncate a binary pipe. -lt_truncate_bin=$lt_lt_cv_truncate_bin - -# The name of the directory that contains temporary libtool files. -objdir=$objdir - -# Used to examine libraries when file_magic_cmd begins with "file". -MAGIC_CMD=$MAGIC_CMD - -# Must we lock files when doing compilation? -need_locks=$lt_need_locks - -# Manifest tool. -MANIFEST_TOOL=$lt_MANIFEST_TOOL - -# Tool to manipulate archived DWARF debug symbol files on Mac OS X. -DSYMUTIL=$lt_DSYMUTIL - -# Tool to change global to local symbols on Mac OS X. -NMEDIT=$lt_NMEDIT - -# Tool to manipulate fat objects and archives on Mac OS X. -LIPO=$lt_LIPO - -# ldd/readelf like tool for Mach-O binaries on Mac OS X. -OTOOL=$lt_OTOOL - -# ldd/readelf like tool for 64 bit Mach-O binaries on Mac OS X 10.4. -OTOOL64=$lt_OTOOL64 - -# Old archive suffix (normally "a"). -libext=$libext - -# Shared library suffix (normally ".so"). -shrext_cmds=$lt_shrext_cmds - -# The commands to extract the exported symbol list from a shared archive. -extract_expsyms_cmds=$lt_extract_expsyms_cmds - -# Variables whose values should be saved in libtool wrapper scripts and -# restored at link time. -variables_saved_for_relink=$lt_variables_saved_for_relink - -# Do we need the "lib" prefix for modules? -need_lib_prefix=$need_lib_prefix - -# Do we need a version for libraries? -need_version=$need_version - -# Library versioning type. -version_type=$version_type - -# Shared library runtime path variable. -runpath_var=$runpath_var - -# Shared library path variable. -shlibpath_var=$shlibpath_var - -# Is shlibpath searched before the hard-coded library search path? -shlibpath_overrides_runpath=$shlibpath_overrides_runpath - -# Format of library name prefix. -libname_spec=$lt_libname_spec - -# List of archive names. First name is the real one, the rest are links. -# The last name is the one that the linker finds with -lNAME -library_names_spec=$lt_library_names_spec - -# The coded name of the library, if different from the real name. -soname_spec=$lt_soname_spec - -# Permission mode override for installation of shared libraries. -install_override_mode=$lt_install_override_mode - -# Command to use after installation of a shared archive. -postinstall_cmds=$lt_postinstall_cmds - -# Command to use after uninstallation of a shared archive. -postuninstall_cmds=$lt_postuninstall_cmds - -# Commands used to finish a libtool library installation in a directory. -finish_cmds=$lt_finish_cmds - -# As "finish_cmds", except a single script fragment to be evaled but -# not shown. -finish_eval=$lt_finish_eval - -# Whether we should hardcode library paths into libraries. -hardcode_into_libs=$hardcode_into_libs - -# Compile-time system search path for libraries. -sys_lib_search_path_spec=$lt_sys_lib_search_path_spec - -# Detected run-time system search path for libraries. -sys_lib_dlsearch_path_spec=$lt_configure_time_dlsearch_path - -# Explicit LT_SYS_LIBRARY_PATH set during ./configure time. -configure_time_lt_sys_library_path=$lt_configure_time_lt_sys_library_path - -# Whether dlopen is supported. -dlopen_support=$enable_dlopen - -# Whether dlopen of programs is supported. -dlopen_self=$enable_dlopen_self - -# Whether dlopen of statically linked programs is supported. -dlopen_self_static=$enable_dlopen_self_static - -# Commands to strip libraries. -old_striplib=$lt_old_striplib -striplib=$lt_striplib - - -# The linker used to build libraries. -LD=$lt_LD - -# How to create reloadable object files. -reload_flag=$lt_reload_flag -reload_cmds=$lt_reload_cmds - -# Commands used to build an old-style archive. -old_archive_cmds=$lt_old_archive_cmds - -# A language specific compiler. -CC=$lt_compiler - -# Is the compiler the GNU compiler? -with_gcc=$GCC - -# Compiler flag to turn off builtin functions. -no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag - -# Additional compiler flags for building library objects. -pic_flag=$lt_lt_prog_compiler_pic - -# How to pass a linker flag through the compiler. -wl=$lt_lt_prog_compiler_wl - -# Compiler flag to prevent dynamic linking. -link_static_flag=$lt_lt_prog_compiler_static - -# Does compiler simultaneously support -c and -o options? -compiler_c_o=$lt_lt_cv_prog_compiler_c_o - -# Whether or not to add -lc for building shared libraries. -build_libtool_need_lc=$archive_cmds_need_lc - -# Whether or not to disallow shared libs when runtime libs are static. -allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes - -# Compiler flag to allow reflexive dlopens. -export_dynamic_flag_spec=$lt_export_dynamic_flag_spec - -# Compiler flag to generate shared objects directly from archives. -whole_archive_flag_spec=$lt_whole_archive_flag_spec - -# Whether the compiler copes with passing no objects directly. -compiler_needs_object=$lt_compiler_needs_object - -# Create an old-style archive from a shared archive. -old_archive_from_new_cmds=$lt_old_archive_from_new_cmds - -# Create a temporary old-style archive to link instead of a shared archive. -old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds - -# Commands used to build a shared archive. -archive_cmds=$lt_archive_cmds -archive_expsym_cmds=$lt_archive_expsym_cmds - -# Commands used to build a loadable module if different from building -# a shared archive. -module_cmds=$lt_module_cmds -module_expsym_cmds=$lt_module_expsym_cmds - -# Whether we are building with GNU ld or not. -with_gnu_ld=$lt_with_gnu_ld - -# Flag that allows shared libraries with undefined symbols to be built. -allow_undefined_flag=$lt_allow_undefined_flag - -# Flag that enforces no undefined symbols. -no_undefined_flag=$lt_no_undefined_flag - -# Flag to hardcode \$libdir into a binary during linking. -# This must work even if \$libdir does not exist -hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec - -# Whether we need a single "-rpath" flag with a separated argument. -hardcode_libdir_separator=$lt_hardcode_libdir_separator - -# Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes -# DIR into the resulting binary. -hardcode_direct=$hardcode_direct - -# Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes -# DIR into the resulting binary and the resulting library dependency is -# "absolute",i.e impossible to change by setting \$shlibpath_var if the -# library is relocated. -hardcode_direct_absolute=$hardcode_direct_absolute - -# Set to "yes" if using the -LDIR flag during linking hardcodes DIR -# into the resulting binary. -hardcode_minus_L=$hardcode_minus_L - -# Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR -# into the resulting binary. -hardcode_shlibpath_var=$hardcode_shlibpath_var - -# Set to "yes" if building a shared library automatically hardcodes DIR -# into the library and all subsequent libraries and executables linked -# against it. -hardcode_automatic=$hardcode_automatic - -# Set to yes if linker adds runtime paths of dependent libraries -# to runtime path list. -inherit_rpath=$inherit_rpath - -# Whether libtool must link a program against all its dependency libraries. -link_all_deplibs=$link_all_deplibs - -# Set to "yes" if exported symbols are required. -always_export_symbols=$always_export_symbols - -# The commands to list exported symbols. -export_symbols_cmds=$lt_export_symbols_cmds - -# Symbols that should not be listed in the preloaded symbols. -exclude_expsyms=$lt_exclude_expsyms - -# Symbols that must always be exported. -include_expsyms=$lt_include_expsyms - -# Commands necessary for linking programs (against libraries) with templates. -prelink_cmds=$lt_prelink_cmds - -# Commands necessary for finishing linking programs. -postlink_cmds=$lt_postlink_cmds - -# Specify filename containing input files. -file_list_spec=$lt_file_list_spec - -# How to hardcode a shared library path into an executable. -hardcode_action=$hardcode_action - -# ### END LIBTOOL CONFIG - -_LT_EOF - - cat <<'_LT_EOF' >> "$cfgfile" - -# ### BEGIN FUNCTIONS SHARED WITH CONFIGURE - -# func_munge_path_list VARIABLE PATH -# ----------------------------------- -# VARIABLE is name of variable containing _space_ separated list of -# directories to be munged by the contents of PATH, which is string -# having a format: -# "DIR[:DIR]:" -# string "DIR[ DIR]" will be prepended to VARIABLE -# ":DIR[:DIR]" -# string "DIR[ DIR]" will be appended to VARIABLE -# "DIRP[:DIRP]::[DIRA:]DIRA" -# string "DIRP[ DIRP]" will be prepended to VARIABLE and string -# "DIRA[ DIRA]" will be appended to VARIABLE -# "DIR[:DIR]" -# VARIABLE will be replaced by "DIR[ DIR]" -func_munge_path_list () -{ - case x$2 in - x) - ;; - *:) - eval $1=\"`$ECHO $2 | $SED 's/:/ /g'` \$$1\" - ;; - x:*) - eval $1=\"\$$1 `$ECHO $2 | $SED 's/:/ /g'`\" - ;; - *::*) - eval $1=\"\$$1\ `$ECHO $2 | $SED -e 's/.*:://' -e 's/:/ /g'`\" - eval $1=\"`$ECHO $2 | $SED -e 's/::.*//' -e 's/:/ /g'`\ \$$1\" - ;; - *) - eval $1=\"`$ECHO $2 | $SED 's/:/ /g'`\" - ;; - esac -} - - -# Calculate cc_basename. Skip known compiler wrappers and cross-prefix. -func_cc_basename () -{ - for cc_temp in $*""; do - case $cc_temp in - compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; - distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; - \-*) ;; - *) break;; - esac - done - func_cc_basename_result=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"` -} - - -# ### END FUNCTIONS SHARED WITH CONFIGURE - -_LT_EOF - - case $host_os in - aix3*) - cat <<\_LT_EOF >> "$cfgfile" -# AIX sometimes has problems with the GCC collect2 program. For some -# reason, if we set the COLLECT_NAMES environment variable, the problems -# vanish in a puff of smoke. -if test set != "${COLLECT_NAMES+set}"; then - COLLECT_NAMES= - export COLLECT_NAMES -fi -_LT_EOF - ;; - esac - - -ltmain=$ac_aux_dir/ltmain.sh - - - # We use sed instead of cat because bash on DJGPP gets confused if - # if finds mixed CR/LF and LF-only lines. Since sed operates in - # text mode, it properly converts lines to CR/LF. This bash problem - # is reportedly fixed, but why not run on old versions too? - sed '$q' "$ltmain" >> "$cfgfile" \ - || (rm -f "$cfgfile"; exit 1) - - mv -f "$cfgfile" "$ofile" || - (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") - chmod +x "$ofile" - - ;; - - esac -done # for ac_tag - - -as_fn_exit 0 -_ACEOF -ac_clean_files=$ac_clean_files_save - -test $ac_write_fail = 0 || - as_fn_error $? "write failure creating $CONFIG_STATUS" "$LINENO" 5 - - -# configure is writing to config.log, and then calls config.status. -# config.status does its own redirection, appending to config.log. -# Unfortunately, on DOS this fails, as config.log is still kept open -# by configure, so config.status won't be able to write to it; its -# output is simply discarded. So we exec the FD to /dev/null, -# effectively closing config.log, so it can be properly (re)opened and -# appended to by config.status. When coming back to configure, we -# need to make the FD available again. -if test "$no_create" != yes; then - ac_cs_success=: - ac_config_status_args= - test "$silent" = yes && - ac_config_status_args="$ac_config_status_args --quiet" - exec 5>/dev/null - $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false - exec 5>>config.log - # Use ||, not &&, to avoid exiting from the if with $? = 1, which - # would make configure fail if this is the last instruction. - $ac_cs_success || as_fn_exit 1 -fi -if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5 -$as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;} -fi - +#!/bin/sh +dir="`dirname "$0"`/autosetup" +#@@INITCHECK@@# +WRAPPER="$0"; export WRAPPER; exec "`"$dir/autosetup-find-tclsh"`" "$dir/autosetup" "$@" diff --git a/configure.ac b/configure.ac deleted file mode 100644 index bb105aea6c..0000000000 --- a/configure.ac +++ /dev/null @@ -1,890 +0,0 @@ -# -# The build process allows for using a cross-compiler. But the default -# action is to target the same platform that we are running on. The -# configure script needs to discover the following properties of the -# build and target systems: -# -# srcdir -# -# The is the name of the directory that contains the -# "configure" shell script. All source files are -# located relative to this directory. -# -# bindir -# -# The name of the directory where executables should be -# written by the "install" target of the makefile. -# -# program_prefix -# -# Add this prefix to the names of all executables that run -# on the target machine. Default: "" -# -# ENABLE_SHARED -# -# True if shared libraries should be generated. -# -# BUILD_CC -# -# The name of a command that is used to convert C -# source files into executables that run on the build -# platform. -# -# BUILD_CFLAGS -# -# Switches that the build compiler needs in order to construct -# command-line programs. -# -# BUILD_LIBS -# -# Libraries that the build compiler needs in order to construct -# command-line programs. -# -# BUILD_EXEEXT -# -# The filename extension for executables on the build -# platform. "" for Unix and ".exe" for Windows. -# -# TCL_* -# -# Lots of values are read in from the tclConfig.sh script, -# if that script is available. This values are used for -# constructing and installing the TCL extension. -# -# TARGET_READLINE_LIBS -# -# This is the library directives passed to the target linker -# that cause the executable to link against the readline library. -# This might be a switch like "-lreadline" or pathnames of library -# file like "../../src/libreadline.a". -# -# TARGET_READLINE_INC -# -# This variables define the directory that contain header -# files for the readline library. If the compiler is able -# to find on its own, then this can be blank. -# -# TARGET_EXEEXT -# -# The filename extension for executables on the -# target platform. "" for Unix and ".exe" for windows. -# -# This configure.in file is easy to reuse on other projects. Just -# change the argument to AC_INIT(). And disable any features that -# you don't need (for example BLT) by erasing or commenting out -# the corresponding code. -# -AC_INIT(sqlcipher, m4_esyscmd([cat VERSION | tr -d '\n'])) - -dnl Make sure the local VERSION file matches this configure script -sqlite_version_sanity_check=`cat $srcdir/VERSION | tr -d '\n'` -if test "$PACKAGE_VERSION" != "$sqlite_version_sanity_check" ; then -AC_MSG_ERROR([configure script is out of date: - configure \$PACKAGE_VERSION = $PACKAGE_VERSION - top level VERSION file = $sqlite_version_sanity_check -please regen with autoconf]) -fi - -######### -# Programs needed -# -AC_PROG_LIBTOOL -AC_PROG_INSTALL - -######### -# Enable large file support (if special flags are necessary) -# -AC_SYS_LARGEFILE - -######### -# Check for needed/wanted data types -AC_CHECK_TYPES([int8_t, int16_t, int32_t, int64_t, intptr_t, uint8_t, - uint16_t, uint32_t, uint64_t, uintptr_t]) - -######### -# Check for needed/wanted headers -AC_CHECK_HEADERS([sys/types.h stdlib.h stdint.h inttypes.h malloc.h]) - -######### -# Figure out whether or not we have these functions -# -AC_CHECK_FUNCS([fdatasync gmtime_r isnan localtime_r localtime_s malloc_usable_size strchrnul usleep utime pread pread64 pwrite pwrite64]) - -######### -# By default, we use the amalgamation (this may be changed below...) -# -USE_AMALGAMATION=1 - -######### -# See whether we can run specific tclsh versions known to work well; -# if not, then we fall back to plain tclsh. -# TODO: try other versions before falling back? -# -AC_CHECK_PROGS(TCLSH_CMD, [tclsh8.7 tclsh8.6 tclsh8.5 tclsh], none) -if test "$TCLSH_CMD" = "none"; then - # If we can't find a local tclsh, then building the amalgamation will fail. - # We act as though --disable-amalgamation has been used. - echo "Warning: can't find tclsh - defaulting to non-amalgamation build." - USE_AMALGAMATION=0 - TCLSH_CMD="tclsh" -fi -AC_SUBST(TCLSH_CMD) - -AC_ARG_VAR([TCLLIBDIR], [Where to install tcl plugin]) -if test "x${TCLLIBDIR+set}" != "xset" ; then - TCLLIBDIR='$(libdir)' - for i in `echo 'puts stdout $auto_path' | ${TCLSH_CMD}` ; do - if test -d $i ; then - TCLLIBDIR=$i - break - fi - done - TCLLIBDIR="${TCLLIBDIR}/sqlite3" -fi - -######### -# Set up an appropriate program prefix -# -if test "$program_prefix" = "NONE"; then - program_prefix="" -fi -AC_SUBST(program_prefix) - -VERSION=[`cat $srcdir/VERSION | sed 's/^\([0-9]*\.*[0-9]*\).*/\1/'`] -AC_MSG_NOTICE(Version set to $VERSION) -AC_SUBST(VERSION) -RELEASE=`cat $srcdir/VERSION` -AC_MSG_NOTICE(Release set to $RELEASE) -AC_SUBST(RELEASE) - -######### -# Locate a compiler for the build machine. This compiler should -# generate command-line programs that run on the build machine. -# -if test x"$cross_compiling" = xno; then - BUILD_CC=$CC - BUILD_CFLAGS=$CFLAGS -else - if test "${BUILD_CC+set}" != set; then - AC_CHECK_PROGS(BUILD_CC, gcc cc cl) - fi - if test "${BUILD_CFLAGS+set}" != set; then - BUILD_CFLAGS="-g" - fi -fi -AC_SUBST(BUILD_CC) - -########## -# Do we want to support multithreaded use of sqlite -# -AC_ARG_ENABLE(threadsafe, -AC_HELP_STRING([--disable-threadsafe],[Disable mutexing])) -AC_MSG_CHECKING([whether to support threadsafe operation]) -if test "$enable_threadsafe" = "no"; then - SQLITE_THREADSAFE=0 - AC_MSG_RESULT([no]) -else - SQLITE_THREADSAFE=1 - AC_MSG_RESULT([yes]) -fi -AC_SUBST(SQLITE_THREADSAFE) - -if test "$SQLITE_THREADSAFE" = "1"; then - AC_SEARCH_LIBS(pthread_create, pthread) - AC_SEARCH_LIBS(pthread_mutexattr_init, pthread) -fi - -########## -# Which crypto library do we use -# -AC_ARG_WITH([crypto-lib], -AC_HELP_STRING([--with-crypto-lib],[Specify which crypto library to use]), -crypto_lib=$withval) -AC_MSG_CHECKING([for crypto library to use]) -if test "$crypto_lib" = "none"; then - AC_MSG_RESULT([none]) -else - if test "$crypto_lib" = "commoncrypto"; then - CFLAGS="$CFLAGS -DSQLCIPHER_CRYPTO_CC" - BUILD_CFLAGS="$BUILD_CFLAGS -DSQLCIPHER_CRYPTO_CC" - AC_MSG_RESULT([commoncrypto]) - else - if test "$crypto_lib" = "libtomcrypt"; then - CFLAGS="$CFLAGS -DSQLCIPHER_CRYPTO_LIBTOMCRYPT" - BUILD_CFLAGS="$BUILD_CFLAGS -DSQLCIPHER_CRYPTO_LIBTOMCRYPT" - AC_MSG_RESULT([libtomcrypt]) - AC_CHECK_LIB([tomcrypt], [register_cipher], , - AC_MSG_ERROR([Library crypto not found. Install libtomcrypt!"])) - else - if test "$crypto_lib" = "nss"; then - CFLAGS="$CFLAGS -DSQLCIPHER_CRYPTO_NSS" - BUILD_CFLAGS="$BUILD_CFLAGS -DSQLCIPHER_CRYPTO_NSS" - AC_MSG_RESULT([nss3]) - AC_CHECK_LIB([nss3], [PK11_Decrypt], , - AC_MSG_ERROR([Library crypto not found. Install nss!"])) - else - CFLAGS="$CFLAGS -DSQLCIPHER_CRYPTO_OPENSSL" - BUILD_CFLAGS="$BUILD_CFLAGS -DSQLCIPHER_CRYPTO_OPENSSL" - AC_MSG_RESULT([openssl]) - AC_CHECK_LIB([crypto], [HMAC_Init_ex], , - AC_MSG_ERROR([Library crypto not found. Install openssl!"])) - fi - fi - fi -fi - -########## -# Do we want to allow a connection created in one thread to be used -# in another thread. This does not work on many Linux systems (ex: RedHat 9) -# due to bugs in the threading implementations. This is thus off by default. -# -AC_ARG_ENABLE(cross-thread-connections, -AC_HELP_STRING([--enable-cross-thread-connections],[Allow connection sharing across threads]),,enable_xthreadconnect=no) -AC_MSG_CHECKING([whether to allow connections to be shared across threads]) -if test "$enable_xthreadconnect" = "no"; then - XTHREADCONNECT='' - AC_MSG_RESULT([no]) -else - XTHREADCONNECT='-DSQLITE_ALLOW_XTHREAD_CONNECT=1' - AC_MSG_RESULT([yes]) -fi -AC_SUBST(XTHREADCONNECT) - -########## -# Do we want to support release -# -AC_ARG_ENABLE(releasemode, -AC_HELP_STRING([--enable-releasemode],[Support libtool link to release mode]),,enable_releasemode=no) -AC_MSG_CHECKING([whether to support shared library linked as release mode or not]) -if test "$enable_releasemode" = "no"; then - ALLOWRELEASE="" - AC_MSG_RESULT([no]) -else - ALLOWRELEASE="-release `cat $srcdir/VERSION`" - AC_MSG_RESULT([yes]) -fi -AC_SUBST(ALLOWRELEASE) - -########## -# Do we want temporary databases in memory -# -AC_ARG_ENABLE(tempstore, -AC_HELP_STRING([--enable-tempstore],[Use an in-ram database for temporary tables (never,no,yes,always)]),,enable_tempstore=no) -AC_MSG_CHECKING([whether to use an in-ram database for temporary tables]) -case "$enable_tempstore" in - never ) - TEMP_STORE=0 - AC_MSG_RESULT([never]) - ;; - no ) - TEMP_STORE=1 - AC_MSG_RESULT([no]) - ;; - yes ) - TEMP_STORE=2 - AC_MSG_RESULT([yes]) - ;; - always ) - TEMP_STORE=3 - AC_MSG_RESULT([always]) - ;; - * ) - TEMP_STORE=1 - AC_MSG_RESULT([no]) - ;; -esac - -AC_SUBST(TEMP_STORE) - -########### -# Lots of things are different if we are compiling for Windows using -# the CYGWIN environment. So check for that special case and handle -# things accordingly. -# -AC_MSG_CHECKING([if executables have the .exe suffix]) -if test "$config_BUILD_EXEEXT" = ".exe"; then - CYGWIN=yes - AC_MSG_RESULT(yes) -else - AC_MSG_RESULT(unknown) -fi -if test "$CYGWIN" != "yes"; then - AC_CYGWIN -fi -if test "$CYGWIN" = "yes"; then - BUILD_EXEEXT=.exe -else - BUILD_EXEEXT=$EXEEXT -fi -if test x"$cross_compiling" = xno; then - TARGET_EXEEXT=$BUILD_EXEEXT -else - TARGET_EXEEXT=$config_TARGET_EXEEXT -fi -if test "$TARGET_EXEEXT" = ".exe"; then - SQLITE_OS_UNIX=0 - SQLITE_OS_WIN=1 - CFLAGS="$CFLAGS -DSQLITE_OS_WIN=1" -else - SQLITE_OS_UNIX=1 - SQLITE_OS_WIN=0 - CFLAGS="$CFLAGS -DSQLITE_OS_UNIX=1" -fi - -AC_SUBST(BUILD_EXEEXT) -AC_SUBST(SQLITE_OS_UNIX) -AC_SUBST(SQLITE_OS_WIN) -AC_SUBST(TARGET_EXEEXT) - -########## -# Figure out all the parameters needed to compile against Tcl. -# -# This code is derived from the SC_PATH_TCLCONFIG and SC_LOAD_TCLCONFIG -# macros in the in the tcl.m4 file of the standard TCL distribution. -# Those macros could not be used directly since we have to make some -# minor changes to accomodate systems that do not have TCL installed. -# -AC_ARG_ENABLE(tcl, AC_HELP_STRING([--disable-tcl],[do not build TCL extension]), - [use_tcl=$enableval],[use_tcl=yes]) -if test "${use_tcl}" = "yes" ; then - AC_ARG_WITH(tcl, AC_HELP_STRING([--with-tcl=DIR],[directory containing tcl configuration (tclConfig.sh)]), with_tclconfig=${withval}) - AC_MSG_CHECKING([for Tcl configuration]) - AC_CACHE_VAL(ac_cv_c_tclconfig,[ - # First check to see if --with-tcl was specified. - if test x"${with_tclconfig}" != x ; then - if test -f "${with_tclconfig}/tclConfig.sh" ; then - ac_cv_c_tclconfig=`(cd ${with_tclconfig}; pwd)` - else - AC_MSG_ERROR([${with_tclconfig} directory doesn't contain tclConfig.sh]) - fi - fi - - # Start autosearch by asking tclsh - if test x"${ac_cv_c_tclconfig}" = x ; then - if test x"$cross_compiling" = xno; then - for i in `echo 'puts stdout $auto_path' | ${TCLSH_CMD}` - do - if test -f "$i/tclConfig.sh" ; then - ac_cv_c_tclconfig="$i" - break - fi - done - fi - fi - - # On ubuntu 14.10, $auto_path on tclsh is not quite correct. - # So try again after applying corrections. - if test x"${ac_cv_c_tclconfig}" = x ; then - if test x"$cross_compiling" = xno; then - for i in `echo 'puts stdout $auto_path' | ${TCLSH_CMD} | sed 's,/tcltk/tcl,/tcl,g'` - do - if test -f "$i/tclConfig.sh" ; then - ac_cv_c_tclconfig="$i" - break - fi - done - fi - fi - - # Recent versions of Xcode on Macs hid the tclConfig.sh file - # in a strange place. - if test x"${ac_cv_c_tclconfig}" = x ; then - if test x"$cross_compiling" = xno; then - for i in /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX*.sdk/usr/lib - do - if test -f "$i/tclConfig.sh" ; then - ac_cv_c_tclconfig="$i" - break - fi - done - fi - fi - - # then check for a private Tcl installation - if test x"${ac_cv_c_tclconfig}" = x ; then - for i in \ - ../tcl \ - `ls -dr ../tcl[[8-9]].[[0-9]].[[0-9]]* 2>/dev/null` \ - `ls -dr ../tcl[[8-9]].[[0-9]] 2>/dev/null` \ - `ls -dr ../tcl[[8-9]].[[0-9]]* 2>/dev/null` \ - ../../tcl \ - `ls -dr ../../tcl[[8-9]].[[0-9]].[[0-9]]* 2>/dev/null` \ - `ls -dr ../../tcl[[8-9]].[[0-9]] 2>/dev/null` \ - `ls -dr ../../tcl[[8-9]].[[0-9]]* 2>/dev/null` \ - ../../../tcl \ - `ls -dr ../../../tcl[[8-9]].[[0-9]].[[0-9]]* 2>/dev/null` \ - `ls -dr ../../../tcl[[8-9]].[[0-9]] 2>/dev/null` \ - `ls -dr ../../../tcl[[8-9]].[[0-9]]* 2>/dev/null` - do - if test -f "$i/unix/tclConfig.sh" ; then - ac_cv_c_tclconfig=`(cd $i/unix; pwd)` - break - fi - done - fi - - # check in a few common install locations - if test x"${ac_cv_c_tclconfig}" = x ; then - for i in \ - `ls -d ${libdir} 2>/dev/null` \ - `ls -d /usr/local/lib 2>/dev/null` \ - `ls -d /usr/contrib/lib 2>/dev/null` \ - `ls -d /usr/lib 2>/dev/null` - do - if test -f "$i/tclConfig.sh" ; then - ac_cv_c_tclconfig=`(cd $i; pwd)` - break - fi - done - fi - - # check in a few other private locations - if test x"${ac_cv_c_tclconfig}" = x ; then - for i in \ - ${srcdir}/../tcl \ - `ls -dr ${srcdir}/../tcl[[8-9]].[[0-9]].[[0-9]]* 2>/dev/null` \ - `ls -dr ${srcdir}/../tcl[[8-9]].[[0-9]] 2>/dev/null` \ - `ls -dr ${srcdir}/../tcl[[8-9]].[[0-9]]* 2>/dev/null` - do - if test -f "$i/unix/tclConfig.sh" ; then - ac_cv_c_tclconfig=`(cd $i/unix; pwd)` - break - fi - done - fi - ]) - - if test x"${ac_cv_c_tclconfig}" = x ; then - use_tcl=no - AC_MSG_WARN(Can't find Tcl configuration definitions) - AC_MSG_WARN(*** Without Tcl the regression tests cannot be executed ***) - AC_MSG_WARN(*** Consider using --with-tcl=... to define location of Tcl ***) - else - TCL_BIN_DIR=${ac_cv_c_tclconfig} - AC_MSG_RESULT(found $TCL_BIN_DIR/tclConfig.sh) - - AC_MSG_CHECKING([for existence of $TCL_BIN_DIR/tclConfig.sh]) - if test -f "$TCL_BIN_DIR/tclConfig.sh" ; then - AC_MSG_RESULT([loading]) - . $TCL_BIN_DIR/tclConfig.sh - else - AC_MSG_RESULT([file not found]) - fi - - # - # If the TCL_BIN_DIR is the build directory (not the install directory), - # then set the common variable name to the value of the build variables. - # For example, the variable TCL_LIB_SPEC will be set to the value - # of TCL_BUILD_LIB_SPEC. An extension should make use of TCL_LIB_SPEC - # instead of TCL_BUILD_LIB_SPEC since it will work with both an - # installed and uninstalled version of Tcl. - # - - if test -f $TCL_BIN_DIR/Makefile ; then - TCL_LIB_SPEC=${TCL_BUILD_LIB_SPEC} - TCL_STUB_LIB_SPEC=${TCL_BUILD_STUB_LIB_SPEC} - TCL_STUB_LIB_PATH=${TCL_BUILD_STUB_LIB_PATH} - fi - - # - # eval is required to do the TCL_DBGX substitution - # - - eval "TCL_LIB_FILE=\"${TCL_LIB_FILE}\"" - eval "TCL_LIB_FLAG=\"${TCL_LIB_FLAG}\"" - eval "TCL_LIB_SPEC=\"${TCL_LIB_SPEC}\"" - - eval "TCL_STUB_LIB_FILE=\"${TCL_STUB_LIB_FILE}\"" - eval "TCL_STUB_LIB_FLAG=\"${TCL_STUB_LIB_FLAG}\"" - eval "TCL_STUB_LIB_SPEC=\"${TCL_STUB_LIB_SPEC}\"" - - AC_SUBST(TCL_VERSION) - AC_SUBST(TCL_BIN_DIR) - AC_SUBST(TCL_SRC_DIR) - AC_SUBST(TCL_INCLUDE_SPEC) - - AC_SUBST(TCL_LIB_FILE) - AC_SUBST(TCL_LIB_FLAG) - AC_SUBST(TCL_LIB_SPEC) - - AC_SUBST(TCL_STUB_LIB_FILE) - AC_SUBST(TCL_STUB_LIB_FLAG) - AC_SUBST(TCL_STUB_LIB_SPEC) - AC_SUBST(TCL_SHLIB_SUFFIX) - fi -fi -if test "${use_tcl}" = "no" ; then - HAVE_TCL="" -else - HAVE_TCL=1 -fi -AC_SUBST(HAVE_TCL) - -########## -# Figure out what C libraries are required to compile programs -# that use "readline()" library. -# -TARGET_READLINE_LIBS="" -TARGET_READLINE_INC="" -TARGET_HAVE_READLINE=0 -TARGET_HAVE_EDITLINE=0 -AC_ARG_ENABLE([editline], - [AC_HELP_STRING([--enable-editline],[enable BSD editline support])], - [with_editline=$enableval], - [with_editline=auto]) -AC_ARG_ENABLE([readline], - [AC_HELP_STRING([--disable-readline],[disable readline support])], - [with_readline=$enableval], - [with_readline=auto]) - -if test x"$with_editline" != xno; then - sLIBS=$LIBS - LIBS="" - TARGET_HAVE_EDITLINE=1 - AC_SEARCH_LIBS(readline,edit,[with_readline=no],[TARGET_HAVE_EDITLINE=0]) - TARGET_READLINE_LIBS=$LIBS - LIBS=$sLIBS -fi -if test x"$with_readline" != xno; then - found="yes" - - AC_ARG_WITH([readline-lib], - [AC_HELP_STRING([--with-readline-lib],[specify readline library])], - [with_readline_lib=$withval], - [with_readline_lib="auto"]) - if test "x$with_readline_lib" = xauto; then - save_LIBS="$LIBS" - LIBS="" - AC_SEARCH_LIBS(tgetent, [readline ncurses curses termcap], [term_LIBS="$LIBS"], [term_LIBS=""]) - AC_CHECK_LIB([readline], [readline], [TARGET_READLINE_LIBS="-lreadline"], [found="no"]) - TARGET_READLINE_LIBS="$TARGET_READLINE_LIBS $term_LIBS" - LIBS="$save_LIBS" - else - TARGET_READLINE_LIBS="$with_readline_lib" - fi - - AC_ARG_WITH([readline-inc], - [AC_HELP_STRING([--with-readline-inc],[specify readline include paths])], - [with_readline_inc=$withval], - [with_readline_inc="auto"]) - if test "x$with_readline_inc" = xauto; then - AC_CHECK_HEADER(readline.h, [found="yes"], [ - found="no" - if test "$cross_compiling" != yes; then - for dir in /usr /usr/local /usr/local/readline /usr/contrib /mingw; do - for subdir in include include/readline; do - AC_CHECK_FILE($dir/$subdir/readline.h, found=yes) - if test "$found" = "yes"; then - TARGET_READLINE_INC="-I$dir/$subdir" - break - fi - done - test "$found" = "yes" && break - done - fi - ]) - else - TARGET_READLINE_INC="$with_readline_inc" - fi - - if test x"$found" = xno; then - TARGET_READLINE_LIBS="" - TARGET_READLINE_INC="" - TARGET_HAVE_READLINE=0 - else - TARGET_HAVE_READLINE=1 - fi -fi - -AC_SUBST(TARGET_READLINE_LIBS) -AC_SUBST(TARGET_READLINE_INC) -AC_SUBST(TARGET_HAVE_READLINE) -AC_SUBST(TARGET_HAVE_EDITLINE) - -########## -# Figure out what C libraries are required to compile programs -# that use "fdatasync()" function. -# -AC_SEARCH_LIBS(fdatasync, [rt]) - -######### -# check for debug enabled -AC_ARG_ENABLE(debug, AC_HELP_STRING([--enable-debug],[enable debugging & verbose explain])) -AC_MSG_CHECKING([build type]) -if test "${enable_debug}" = "yes" ; then - TARGET_DEBUG="-DSQLITE_DEBUG=1 -DSQLITE_ENABLE_SELECTTRACE -DSQLITE_ENABLE_WHERETRACE -O0" - AC_MSG_RESULT([debug]) -else - TARGET_DEBUG="-DNDEBUG" - AC_MSG_RESULT([release]) -fi -AC_SUBST(TARGET_DEBUG) - -######### -# See whether we should use the amalgamation to build -AC_ARG_ENABLE(amalgamation, AC_HELP_STRING([--disable-amalgamation], - [Disable the amalgamation and instead build all files separately])) -if test "${enable_amalgamation}" = "no" ; then - USE_AMALGAMATION=0 -fi -AC_SUBST(USE_AMALGAMATION) - -######### -# By default, amalgamation sqlite3.c will have #line directives. -# This is a build option not shown by ./configure --help -# To control it, use configure option: amalgamation_line_macros=? -# where ? is no to suppress #line directives or yes to create them. -AMALGAMATION_LINE_MACROS=--linemacros=1 -AC_ARG_VAR(amalgamation_line_macros,) -AC_SUBST(AMALGAMATION_LINE_MACROS) -if test "${amalgamation_line_macros+set}" = set; then : - enableval=$amalgamation_line_macros; -fi -if test "${amalgamation_line_macros}" = "yes" ; then - AMALGAMATION_LINE_MACROS=--linemacros=1 -fi -if test "${amalgamation_line_macros}" = "no" ; then - AMALGAMATION_LINE_MACROS=--linemacros=0 -fi - -######### -# Look for zlib. Only needed by extensions and by the sqlite3.exe shell -AC_CHECK_HEADERS(zlib.h) -AC_SEARCH_LIBS(deflate, z, [HAVE_ZLIB="-DSQLITE_HAVE_ZLIB=1"], [HAVE_ZLIB=""]) -AC_SUBST(HAVE_ZLIB) - -######### -# See whether we should allow loadable extensions -AC_ARG_ENABLE(load-extension, AC_HELP_STRING([--disable-load-extension], - [Disable loading of external extensions]),,[enable_load_extension=yes]) -if test "${enable_load_extension}" = "yes" ; then - OPT_FEATURE_FLAGS="" - AC_SEARCH_LIBS(dlopen, dl) -else - OPT_FEATURE_FLAGS="-DSQLITE_OMIT_LOAD_EXTENSION=1" -fi - -########## -# Do we want to support math functions -# -AC_ARG_ENABLE(math, -AC_HELP_STRING([--disable-math],[Disable math functions])) -AC_MSG_CHECKING([whether to support math functions]) -if test "$enable_math" = "no"; then - AC_MSG_RESULT([no]) -else - AC_MSG_RESULT([yes]) - OPT_FEATURE_FLAGS="${OPT_FEATURE_FLAGS} -DSQLITE_ENABLE_MATH_FUNCTIONS" - AC_SEARCH_LIBS(ceil, m) -fi - - -######## -# The --enable-all argument is short-hand to enable -# multiple extensions. -AC_ARG_ENABLE(all, AC_HELP_STRING([--enable-all], - [Enable FTS4, FTS5, Geopoly, JSON, RTree, Sessions])) - -########## -# Do we want to support memsys3 and/or memsys5 -# -AC_ARG_ENABLE(memsys5, - AC_HELP_STRING([--enable-memsys5],[Enable MEMSYS5])) -AC_MSG_CHECKING([whether to support MEMSYS5]) -if test "${enable_memsys5}" = "yes"; then - OPT_FEATURE_FLAGS="${OPT_FEATURE_FLAGS} -DSQLITE_ENABLE_MEMSYS5" - AC_MSG_RESULT([yes]) -else - AC_MSG_RESULT([no]) -fi -AC_ARG_ENABLE(memsys3, - AC_HELP_STRING([--enable-memsys3],[Enable MEMSYS3])) -AC_MSG_CHECKING([whether to support MEMSYS3]) -if test "${enable_memsys3}" = "yes" -a "${enable_memsys5}" = "no"; then - OPT_FEATURE_FLAGS="${OPT_FEATURE_FLAGS} -DSQLITE_ENABLE_MEMSYS3" - AC_MSG_RESULT([yes]) -else - AC_MSG_RESULT([no]) -fi - -######### -# See whether we should enable Full Text Search extensions -AC_ARG_ENABLE(fts3, AC_HELP_STRING([--enable-fts3], - [Enable the FTS3 extension])) -AC_MSG_CHECKING([whether to support FTS3]) -if test "${enable_fts3}" = "yes" ; then - OPT_FEATURE_FLAGS="${OPT_FEATURE_FLAGS} -DSQLITE_ENABLE_FTS3" - AC_MSG_RESULT([yes]) -else - AC_MSG_RESULT([no]) -fi -AC_ARG_ENABLE(fts4, AC_HELP_STRING([--enable-fts4], - [Enable the FTS4 extension])) -AC_MSG_CHECKING([whether to support FTS4]) -if test "${enable_fts4}" = "yes" -o "${enable_all}" = "yes" ; then - AC_MSG_RESULT([yes]) - OPT_FEATURE_FLAGS="${OPT_FEATURE_FLAGS} -DSQLITE_ENABLE_FTS4" - AC_SEARCH_LIBS([log],[m]) -else - AC_MSG_RESULT([no]) -fi -AC_ARG_ENABLE(fts5, AC_HELP_STRING([--enable-fts5], - [Enable the FTS5 extension])) -AC_MSG_CHECKING([whether to support FTS5]) -if test "${enable_fts5}" = "yes" -o "${enable_all}" = "yes" ; then - AC_MSG_RESULT([yes]) - OPT_FEATURE_FLAGS="${OPT_FEATURE_FLAGS} -DSQLITE_ENABLE_FTS5" - AC_SEARCH_LIBS([log],[m]) -else - AC_MSG_RESULT([no]) -fi - -######### -# See whether we should enable JSON1 -AC_ARG_ENABLE(json1, AC_HELP_STRING([--enable-json1],[Enable the JSON1 extension])) -AC_MSG_CHECKING([whether to support JSON]) -if test "${enable_json1}" = "yes" -o "${enable_all}" = "yes" ; then - OPT_FEATURE_FLAGS="${OPT_FEATURE_FLAGS} -DSQLITE_ENABLE_JSON1" - AC_MSG_RESULT([yes]) -else - AC_MSG_RESULT([no]) -fi - -######### -# See whether we should enable the LIMIT clause on UPDATE and DELETE -# statements. -AC_ARG_ENABLE(update-limit, AC_HELP_STRING([--enable-update-limit], - [Enable the UPDATE/DELETE LIMIT clause])) -AC_MSG_CHECKING([whether to support LIMIT on UPDATE and DELETE statements]) -if test "${enable_update_limit}" = "yes" ; then - OPT_FEATURE_FLAGS="${OPT_FEATURE_FLAGS} -DSQLITE_ENABLE_UPDATE_DELETE_LIMIT" - AC_MSG_RESULT([yes]) -else - AC_MSG_RESULT([no]) -fi - -######### -# See whether we should enable GEOPOLY -AC_ARG_ENABLE(geopoly, AC_HELP_STRING([--enable-geopoly], - [Enable the GEOPOLY extension]), - [enable_geopoly=yes],[enable_geopoly=no]) -AC_MSG_CHECKING([whether to support GEOPOLY]) -if test "${enable_geopoly}" = "yes" -o "${enable_all}" = "yes" ; then - OPT_FEATURE_FLAGS="${OPT_FEATURE_FLAGS} -DSQLITE_ENABLE_GEOPOLY" - enable_rtree=yes - AC_MSG_RESULT([yes]) -else - AC_MSG_RESULT([no]) -fi - -######### -# See whether we should enable RTREE -AC_ARG_ENABLE(rtree, AC_HELP_STRING([--enable-rtree], - [Enable the RTREE extension])) -AC_MSG_CHECKING([whether to support RTREE]) -if test "${enable_rtree}" = "yes" ; then - OPT_FEATURE_FLAGS="${OPT_FEATURE_FLAGS} -DSQLITE_ENABLE_RTREE" - AC_MSG_RESULT([yes]) -else - AC_MSG_RESULT([no]) -fi - -######### -# See whether we should enable the SESSION extension -AC_ARG_ENABLE(session, AC_HELP_STRING([--enable-session], - [Enable the SESSION extension])) -AC_MSG_CHECKING([whether to support SESSION]) -if test "${enable_session}" = "yes" -o "${enable_all}" = "yes" ; then - OPT_FEATURE_FLAGS="${OPT_FEATURE_FLAGS} -DSQLITE_ENABLE_SESSION" - OPT_FEATURE_FLAGS="${OPT_FEATURE_FLAGS} -DSQLITE_ENABLE_PREUPDATE_HOOK" - AC_MSG_RESULT([yes]) -else - AC_MSG_RESULT([no]) -fi - -######### -# attempt to duplicate any OMITS and ENABLES into the ${OPT_FEATURE_FLAGS} parameter -for option in $CFLAGS $CPPFLAGS -do - case $option in - -DSQLITE_OMIT*) OPT_FEATURE_FLAGS="$OPT_FEATURE_FLAGS $option";; - -DSQLITE_ENABLE*) OPT_FEATURE_FLAGS="$OPT_FEATURE_FLAGS $option";; - esac -done -AC_SUBST(OPT_FEATURE_FLAGS) - - -# attempt to remove any OMITS and ENABLES from the $(CFLAGS) parameter -ac_temp_CFLAGS="" -for option in $CFLAGS -do - case $option in - -DSQLITE_OMIT*) ;; - -DSQLITE_ENABLE*) ;; - *) ac_temp_CFLAGS="$ac_temp_CFLAGS $option";; - esac -done -CFLAGS=$ac_temp_CFLAGS - - -# attempt to remove any OMITS and ENABLES from the $(CPPFLAGS) parameter -ac_temp_CPPFLAGS="" -for option in $CPPFLAGS -do - case $option in - -DSQLITE_OMIT*) ;; - -DSQLITE_ENABLE*) ;; - *) ac_temp_CPPFLAGS="$ac_temp_CPPFLAGS $option";; - esac -done -CPPFLAGS=$ac_temp_CPPFLAGS - - -# attempt to remove any OMITS and ENABLES from the $(BUILD_CFLAGS) parameter -ac_temp_BUILD_CFLAGS="" -for option in $BUILD_CFLAGS -do - case $option in - -DSQLITE_OMIT*) ;; - -DSQLITE_ENABLE*) ;; - *) ac_temp_BUILD_CFLAGS="$ac_temp_BUILD_CFLAGS $option";; - esac -done -BUILD_CFLAGS=$ac_temp_BUILD_CFLAGS - - -######### -# See whether we should use GCOV -AC_ARG_ENABLE(gcov, AC_HELP_STRING([--enable-gcov], - [Enable coverage testing using gcov])) -if test "${use_gcov}" = "yes" ; then - USE_GCOV=1 -else - USE_GCOV=0 -fi -AC_SUBST(USE_GCOV) - -######### -# Enable/disabled amalagamation line macros -######## -AMALGAMATION_LINE_MACROS=--linemacros=0 -if test "${amalgamation_line_macros}" = "yes" ; then - AMALGAMATION_LINE_MACROS=--linemacros=1 -fi -if test "${amalgamation_line_macros}" = "no" ; then - AMALGAMATION_LINE_MACROS=--linemacros=0 -fi -AC_SUBST(AMALGAMATION_LINE_MACROS) - -######### -# Output the config header -AC_CONFIG_HEADERS(config.h) - -######### -# Generate the output files. -# -AC_SUBST(BUILD_CFLAGS) -AC_OUTPUT([ -Makefile -sqlcipher.pc -]) diff --git a/contrib/sqlitecon.tcl b/contrib/sqlitecon.tcl index b5dbcafc2a..78463a1ffa 100644 --- a/contrib/sqlitecon.tcl +++ b/contrib/sqlitecon.tcl @@ -567,7 +567,7 @@ proc sqlitecon::Cut w { } } -# Do a paste opeation. +# Do a paste operation. # proc sqlitecon::Paste w { if {[sqlitecon::canCut $w]==1} { diff --git a/doc/compile-for-unix.md b/doc/compile-for-unix.md new file mode 100644 index 0000000000..ce76b97bae --- /dev/null +++ b/doc/compile-for-unix.md @@ -0,0 +1,70 @@ +# Notes On Compiling SQLite On All Kinds Of Unix + +Here are step-by-step instructions on how to build SQLite from +canonical source on any modern machine that isn't Windows. These +notes are tested (on 2024-10-11) on Ubuntu and on MacOS, but they +are general and should work on most any modern unix platform. +See the companion document ([](./compile-for-windows.md>)) for +guidance on building for Windows. + + 1. Install a C-compiler. GCC or Clang both work fine. If you are + reading this document, you've probably already done that. + + 2. *(Optional):* Install TCL development libraries. In this note, + we'll do a private install in the $HOME/local directory, + but you can make adjustments to install TCL wherever you like. + This document assumes you are working with TCL version 9.0. + See also the [](./tcl-extension-testing.md) document that contains + more details on compiling Tcl for use with SQLite. +
    +
  1. Get the TCL source archive, perhaps from + + or . +
  2. Untar the source archive. CD into the "unix/" subfolder + of the source tree. +
  3. Run: `mkdir $HOME/local` +
  4. Run: `./configure --prefix=$HOME/local` +
  5. Run: `make install` +
+

+ As of 2024-10-25, TCL is not longer required for many + common build targets, such as "sqlite3.c" or the "sqlite3" + command-line tool. So you can skip this step if that is all + you want to build. TCL is still required to run "make test" + and similar, or to build the TCL extension, of course. + + 4. Download the SQLite source tree and unpack it. CD into the + toplevel directory of the source tree. + + 5. Run: `./configure --enable-all --with-tclsh=$HOME/local/bin/tclsh9.0` + + You do not need to use --with-tclsh if the tclsh you want to use is the + first one on your PATH or if you are building without TCL. + + 6. Run the "`Makefile`" makefile with an appropriate target. + Examples: +

    +
  • `make sqlite3.c` +
  • `make sqlite3` +
  • `make sqldiff` +
  • `make sqlite3_rsync` +
+

None of the targets above require TCL. TCL is needed + for the following targets: +

    +
  • `make tclextension-install` +
  • `make devtest` +
  • `make releasetest` +
  • `make sqlite3_analyzer` +
+ + It is not required that you run the "tclextension-install" target prior to + running tests. However, the tests will run more smoothly if you do. + The version of SQLite used for the TCL extension does *not* need to + correspond to the version of SQLite under test. So you can install the + SQLite TCL extension once, and then use it to test many different versions + of SQLite. + + + 7. For a debugging build of the CLI, where the ".treetrace" and ".wheretrace" + commands work, add the the --with-debug argument to configure. diff --git a/doc/compile-for-windows.md b/doc/compile-for-windows.md new file mode 100644 index 0000000000..0e59c83fed --- /dev/null +++ b/doc/compile-for-windows.md @@ -0,0 +1,182 @@ +# Notes On Compiling SQLite On Windows 11 + +Below are step-by-step instructions on how to build SQLite from +canonical source on a new Windows 11 PC, as of 2025-10-31. +See [](./compile-for-unix.md) for a similar guide for unix-like +systems, including MacOS. + + 1. Install Microsoft Visual Studio. The free "community edition" + will work fine. Do a standard install for C++ development. + SQLite only needs the + "cl" compiler and the "nmake" build tool. +
  • Note: + VS2015 or later is required for the procedures below to + all work. You *might* be able to get the build to work with + earlier versions of MSVC, but in that case the TCL installation + of step 3 will be required, since the "jimsh0.c" program of + Autosetup that is used as a substitute for "tclsh.exe" won't + compile with versions of Visual Studio prior to VS2015. In any + event, building SQLite from canonical source code on Windows + is not supported for earlier versions of Visual Studio.
+ + 2. Under the "Start" menu, find "All Apps" then go to "Visual Studio 20XX" + and find "x64 Native Tools Command Prompt for VS 20XX". Pin that + application to your task bar, as you will use it a lot. Bring up + an instance of this command prompt and do all of the subsequent steps + in that "x64 Native Tools" command prompt. (Or use "x86" if you want + a 32-bit build. Or use "ARM64" if you want to do a build for Windows + on ARM.) The subsequent steps will not work in a vanilla + DOS prompt. Nor will they work in PowerShell. + + 3. *(Optional):* Install TCL development libraries. + This note assumes that you will + install the TCL development libraries in the "`c:\Tcl`" directory. + Make adjustments + if you want TCL installed somewhere else. SQLite needs both the + "tclsh90.exe" command-line tool as part of the build process, and + the "tcl90.lib" and "tclstub.lib" libraries in order to run tests. + This document assumes you are working with TCL version 9.0. + See [](./tcl-extension-testing.md#windows) for guidance on how + to compile TCL version 8.6 for use with SQLite. +
    +
  1. Get the TCL source archive, perhaps from + + or . +
  2. Untar or unzip the source archive. CD into the "win/" subfolder + of the source tree. +
  3. Run: `nmake /f makefile.vc INSTALLDIR=c:\Tcl release` +
  4. Run: `nmake /f makefile.vc INSTALLDIR=c:\Tcl install`
    + Notes: +
      +
    1. The previous two `nmake` commands must be run separately. +
    2. Also, the INSTALLDIR=... argument is required on both. +
    +
  5. Optional: CD to `c:\Tcl\bin` and make a copy of + `tclsh90.exe` over into just `tclsh.exe`. +
  6. Optional: + Add `c:\Tcl\bin` to your %PATH%. To do this, go to Settings + and search for "path". Select "edit environment variables for + your account" and modify your default PATH accordingly. + You will need to close and reopen your command prompts after + making this change. +
+ + As of 2024-10-25, TCL is not longer required for many + common build targets, such as "sqlite3.c" or the "sqlite3.exe" + command-line tool. So you can skip this step if that is all + you want to build. TCL is still required to run "make test" + and similar, or to build the TCL extension, of course. + + 4. Download the SQLite source tree and unpack it. CD into the + toplevel directory of the source tree. + + 5. Run the "`Makefile.msc`" makefile with an appropriate target. + Examples: +
    +
  • `nmake /f makefile.msc` +
  • `nmake /f makefile.msc sqlite3.c` +
  • `nmake /f makefile.msc sqlite3.exe` +
  • `nmake /f makefile.msc sqldiff.exe` +
  • `nmake /f makefile.msc sqlite3_rsync.exe` +
+

No TCL is required for the nmake targets above. But for the ones + that follow, you will need a TCL installation, as described in step 3 + above. If you install TCL in some directory other than C:\\Tcl, then + you will also need to add the "TCLDIR=<dir>" option on the + nmake command line to tell nmake where your TCL is installed. +

    +
  • `nmake /f makefile.msc tclextension-install` +
  • `nmake /f makefile.msc devtest` +
  • `nmake /f makefile.msc releasetest` +
  • `nmake /f makefile.msc sqlite3_analyzer.exe` +
+ + It is not required that you run the "tclextension-install" target prior to + running tests. However, the tests will run more smoothly if you do. + The version of SQLite used for the TCL extension does *not* need to + correspond to the version of SQLite under test. So you can install the + SQLite TCL extension once, and then use it to test many different versions + of SQLite. + + + 7. For a debugging build of the CLI, where the ".treetrace" and ".wheretrace" + commands work, add the DEBUG=3 argument to nmake. Like this: +
    +
  • `nmake /f makefile.msc DEBUG=3 clean sqlite3.exe` +
+ + +## 32-bit Builds + +Doing a 32-bit build is just like doing a 64-bit build with the +following minor changes: + + 1. Use the "x86 Native Tools Command Prompt" instead of + "x64 Native Tools Command Prompt". "**x86**" instead of "**x64**". + + 2. Use a different installation directory for TCL. + The recommended directory is `c:\tcl32`. Thus you end up + with two TCL builds: +
    +
  • `c:\tcl` ← 64-bit (the default) +
  • `c:\tcl32` ← 32-bit +
+ + 3. Ensure that `c:\tcl32\bin` comes before `c:\tcl\bin` on + your PATH environment variable. You can achieve this using + a command like: +
    +
  • `set PATH=c:\tcl32\bin;%PATH%` +
+ +## Building a DLL + +The command the developers use for building the deliverable DLL on the +[download page](https://sqlite.org/download.html) is as follows: + +> nmake /f Makefile.msc sqlite3.dll USE_NATIVE_LIBPATHS=1 "OPTS=-DSQLITE_ENABLE_FTS3=1 -DSQLITE_ENABLE_FTS4=1 -DSQLITE_ENABLE_FTS5=1 -DSQLITE_ENABLE_RTREE=1 -DSQLITE_ENABLE_JSON1=1 -DSQLITE_ENABLE_GEOPOLY=1 -DSQLITE_ENABLE_SESSION=1 -DSQLITE_ENABLE_PREUPDATE_HOOK=1 -DSQLITE_ENABLE_SERIALIZE=1 -DSQLITE_ENABLE_MATH_FUNCTIONS=1" + +That command generates both the sqlite3.dll and sqlite3.def files. The same +command works for both 32-bit and 64-bit builds. + +## Statically Linking The TCL Library + +Some utility programs associated with SQLite need to be linked +with TCL in order to function. The [sqlite3_analyzer.exe program](https://sqlite.org/sqlanalyze.html) +is an example. You can build as described above, and then +enter: + +> nmake /f Makefile.msc sqlite3_analyzer.exe + +And you will end up with a working executable. However, that executable +will depend on having the "tcl98.dll" library somewhere on your %PATH%. +Use the following steps to build an executable that has the TCL library +statically linked so that it does not depend on separate DLL: + + 1. Use the appropriate "Command Prompt" window - either x86 or + x64, depending on whether you want a 32-bit or 64-bit executable. + + 2. Untar the TCL source tarball into a fresh directory. CD into + the "win/" subfolder. + + 3. Run: `nmake /f makefile.vc OPTS=static shell` + + 4. CD into the "Release*" subfolder that is created (note the + wildcard - the full name of the directory might vary). There + you will find the "tcl90s.lib" file. Copy this file into the + same directory that you put the "tcl90.lib" on your initial + installation. (In this document, that directory is + "`C:\Tcl32\lib`" for 32-bit builds and + "`C:\Tcl\lib`" for 64-bit builds.) + + 5. CD into your SQLite source code directory and build the desired + utility program, but add the following extra argument to the + nmake command line: +
STATICALLY_LINK_TCL=1
+

So, for example, to build a statically linked version of + sqlite3_analyzer.exe, you might type: +

nmake /f Makefile.msc STATICALLY_LINK_TCL=1 sqlite3_analyzer.exe
+ + 6. After your executable is built, you can verify that it does not + depend on the TCL DLL by running: +
dumpbin /dependents sqlite3_analyzer.exe
diff --git a/doc/json-enhancements.md b/doc/json-enhancements.md new file mode 100644 index 0000000000..bc03e8978c --- /dev/null +++ b/doc/json-enhancements.md @@ -0,0 +1,144 @@ +# JSON Functions Enhancements (2022) + +This document summaries enhancements to the SQLite JSON support added in +early 2022. + +## 1.0 Change summary: + + 1. New **->** and **->>** operators that work like MySQL and PostgreSQL (PG). + 2. JSON functions are built-in rather than being an extension. They + are included by default, but can be omitted using the + -DSQLITE_OMIT_JSON compile-time option. + + +## 2.0 New operators **->** and **->>** + +The SQLite language adds two new binary operators **->** and **->>**. +Both operators are similar to json_extract(). The left operand is +JSON and the right operand is a JSON path expression (possibly abbreviated +for compatibility with PG - see below). So they are similar to a +two-argument call to json_extract(). + +The difference between -> and ->> (and json_extract()) is as follows: + + * The -> operator always returns JSON. + + * The ->> operator converts the answer into a primitive SQL datatype + such as TEXT, INTEGER, REAL, or NULL. If a JSON object or array + is selected, that object or array is rendered as text. If a JSON + value is selected, that value is converted into its corresponding + SQL type + + * The json_extract() interface returns JSON when a JSON object or + array is selected, or a primitive SQL datatype when a JSON value + is selected. This is different from MySQL, in which json_extract() + always returns JSON, but the difference is retained because it has + worked that way for 6 years and changing it now would likely break + a lot of legacy code. + +In MySQL and PG, the ->> operator always returns TEXT (or NULL) and never +INTEGER or REAL. This is due to limitations in the type handling capabilities +of those systems. In MySQL and PG, the result type a function or operator +may only depend on the type of its arguments, never the value of its arguments. +But the underlying JSON type depends on the value of the JSON path +expression, not the type of the JSON path expression (which is always TEXT). +Hence, the result type of ->> in MySQL and PG is unable to vary according +to the type of the JSON value being extracted. + +The type system in SQLite is more general. Functions in SQLite are able +to return different datatypes depending on the value of their arguments. +So the ->> operator in SQLite is able to return TEXT, INTEGER, REAL, or NULL +depending on the JSON type of the value being extracted. This means that +the behavior of the ->> is slightly different in SQLite versus MySQL and PG +in that it will sometimes return INTEGER and REAL values, depending on its +inputs. It is possible to implement the ->> operator in SQLite so that it +always operates exactly like MySQL and PG and always returns TEXT or NULL, +but I have been unable to think of any situations where returning the +actual JSON value this would cause problems, so I'm including the enhanced +functionality in SQLite. + +The table below attempts to summarize the differences between the +-> and ->> operators and the json_extract() function, for SQLite, MySQL, +and PG. JSON values are shown using their SQL text representation but +in a bold font. + + + +
JSONPATH-> operator
(all)
->> operator
(MySQL/PG) +
->> operator
(SQLite)
json_extract()
(SQLite) +
**'{"a":123}'** '$.a' **'123'** '123' 123 123 +
**'{"a":4.5}'** '$.a' **'4.5'** '4.5' 4.5 4.5 +
**'{"a":"xyz"}'** '$.a' **'"xyz"'** 'xyz' 'xyz' 'xyz' +
**'{"a":null}'** '$.a' **'null'** NULL NULL NULL +
**'{"a":[6,7,8]}'** '$.a' **'[6,7,8]'** '[6,7,8]' '[6,7,8]' **'[6,7,8]'** +
**'{"a":{"x":9}}'** '$.a' **'{"x":9}'** '{"x":9}' '{"x":9}' **'{"x":9}'** +
**'{"b":999}'** '$.a' NULL NULL NULL NULL +
+ +Important points about the table above: + + * The -> operator always returns either JSON or NULL. + + * The ->> operator never returns JSON. It always returns TEXT or NULL, or in the + case of SQLite, INTEGER or REAL. + + * The MySQL json_extract() function works exactly the same + as the MySQL -> operator. + + * The SQLite json_extract() operator works like -> for JSON objects and + arrays, and like ->> for JSON values. + + * The -> operator works the same for all systems. + + * The only difference in ->> between SQLite and other systems is that + when the JSON value is numeric, SQLite returns a numeric SQL value, + whereas the other systems return a text representation of the numeric + value. + +### 2.1 Abbreviated JSON path expressions for PG compatibility + +The table above always shows the full JSON path expression: '$.a'. But +PG does not accept this syntax. PG only allows a single JSON object label +name or a single integer array index. In order to provide compatibility +with PG, The -> and ->> operators in SQLite are extended to also support +a JSON object label or an integer array index for the right-hand side +operand, in addition to a full JSON path expression. + +Thus, a -> or ->> operator that works on MySQL will work in +SQLite. And a -> or ->> operator that works in PG will work in SQLite. +But because SQLite supports the union of the disjoint capabilities of +MySQL and PG, there will always be -> and ->> operators that work in +SQLite that do not work in one of MySQL and PG. This is an unavoidable +consequence of the different syntax for -> and ->> in MySQL and PG. + +In the following table, assume that "value1" is a JSON object and +"value2" is a JSON array. + + +
SQL expression Works in MySQL?Works in PG?Works in SQLite +
value1->'$.a' yes no yes +
value1->'a' no yes yes +
value2->'$[2]' yes no yes +
value2->2 no yes yes +
+ +The abbreviated JSON path expressions only work for the -> and ->> operators +in SQLite. The json_extract() function, and all other built-in SQLite +JSON functions, continue to require complete JSON path expressions for their +PATH arguments. + +## 3.0 JSON moved into the core + +The JSON interface is now moved into the SQLite core. + +When originally written in 2015, the JSON functions were an extension +that could be optionally included at compile-time, or loaded at run-time. +The implementation was in a source file named ext/misc/json1.c in the +source tree. JSON functions were only compiled in if the +-DSQLITE_ENABLE_JSON1 compile-time option was used. + +After these enhancements, the JSON functions are now built-ins. +The source file that implements the JSON functions is moved to src/json.c. +No special compile-time options are needed to load JSON into the build. +Instead, there is a new -DSQLITE_OMIT_JSON compile-time option to leave +them out. diff --git a/doc/jsonb.md b/doc/jsonb.md new file mode 100644 index 0000000000..63ce77b170 --- /dev/null +++ b/doc/jsonb.md @@ -0,0 +1,289 @@ +# The JSONB Format + +This document describes SQLite's JSONB binary encoding of +JSON. + +## 1.0 What Is JSONB? + +Beginning with version 3.45.0 (circa 2024-01-01), SQLite supports an +alternative binary encoding of JSON which we call "JSONB". JSONB is +a binary format that stored as a BLOB. + +The advantage of JSONB over ordinary text RFC 8259 JSON is that JSONB +is both slightly smaller (by between 5% and 10% in most cases) and +can be processed in less than half the number of CPU cycles. The built-in +[JSON SQL functions] of SQLite can accept either ordinary text JSON +or the binary JSONB encoding for any of their JSON inputs. + +The "JSONB" name is inspired by [PostgreSQL](https://postgresql.org), but the +on-disk format for SQLite's JSONB is not the same as PostgreSQL's. +The two formats have the same name, but they have wildly different internal +representations and are not in any way binary compatible. + +The central idea behind this JSONB specification is that each element +begins with a header that includes the size and type of that element. +The header takes the place of punctuation such as double-quotes, +curly-brackes, square-brackets, commas, and colons. Since the size +and type of each element is contained in its header, the element can +be read faster since it is no longer necessary to carefully scan forward +looking for the closing delimiter. The payload of JSONB is the same +as for corresponding text JSON. The same payload bytes occur in the +same order. The only real difference between JSONB and ordinary text +JSON is that JSONB includes a binary header on +each element and omits delimiter and separator punctuation. + +### 1.1 Internal Use Only + +The details of the JSONB are not intended to be visible to application +developers. Application developers should look at JSONB as an opaque BLOB +used internally by SQLite. Nevertheless, we want the format to be backwards +compatible across all future versions of SQLite. To that end, the format +is documented by this file in the source tree. But this file should be +used only by SQLite core developers, not by developers of applications +that only use SQLite. + +## 2.0 The Purpose Of This Document + +JSONB is not intended as an external format to be used by +applications. JSONB is designed for internal use by SQLite only. +Programmers do not need to understand the JSONB format in order to +use it effectively. +Applications should access JSONB only through the [JSON SQL functions], +not by looking at individual bytes of the BLOB. + +However, JSONB is intended to be portable and backwards compatible +for all future versions of SQLite. In other words, you should not have +to export and reimport your SQLite database files when you upgrade to +a newer SQLite version. For that reason, the JSONB format needs to +be well-defined. + +This document is therefore similar in purpose to the +[SQLite database file format] document that describes the on-disk +format of an SQLite database file. Applications are not expected +to directly read and write the bits and bytes of SQLite database files. +The SQLite database file format is carefully documented so that it +can be stable and enduring. In the same way, the JSONB representation +of JSON is documented here so that it too can be stable and enduring, +not so that applications can read or writes individual bytes. + +## 3.0 Encoding + +JSONB is a direct translation of the underlying text JSON. The difference +is that JSONB uses a binary encoding that is faster to parse compared to +the detailed syntax of text JSON. + +Each JSON element is encoded as a header and a payload. The header +determines type of element (string, numeric, boolean, null, object, or +array) and the size of the payload. The header can be between 1 and +9 bytes in size. The payload can be any size from zero bytes up to the +maximum allowed BLOB size. + +### 3.1 Payload Size + +The upper four bits of the first byte of the header determine size of the +header and possibly also the size of the payload. +If the upper four bits have a value between 0 and 11, then the header is +exactly one byte in size and the payload size is determined by those +upper four bits. If the upper four bits have a value between 12 and 15, +that means that the total header size is 2, 3, 5, or 9 bytes and the +payload size is unsigned big-endian integer that is contained in the +subsequent bytes. The size integer is the one byte that following the +initial header byte if the upper four bits +are 12, two bytes if the upper bits are 13, four bytes if the upper bits +are 14, and eight bytes if the upper bits are 15. The current design +of SQLite does not support BLOB values larger than 2GiB, so the eight-byte +variant of the payload size integer will never be used by the current code. +The eight-byte payload size integer is included in the specification +to allow for future expansion. + +The header for an element does *not* need to be in its simplest +form. For example, consider the JSON numeric value "`1`". +That element can be encode in five different ways: + + * `0x13 0x31` + * `0xc3 0x01 0x31` + * `0xd3 0x00 0x01 0x31` + * `0xe3 0x00 0x00 0x00 0x01 0x31` + * `0xf3 0x00 0x00 0x00 0x00 0x00 0x00 0x00 0x01 0x31` + +The shortest encoding is preferred, of course, and usually happens with +primitive elements such as numbers. However the total size of an array +or object might not be known exactly when the header of the element is +first generated. It is convenient to reserve space for the largest +possible header and then go back and fill in the correct payload size +at the end. This technique can result in array or object headers that +are larger than absolutely necessary. + +### 3.2 Element Type + +The least-significant four bits of the first byte of the header (the first +byte masked against 0x0f) determine element type. The following codes are +used: + +
    +
  1. NULL → +The element is a JSON "null". The payload size for a true JSON NULL must +must be zero. Future versions of SQLite might extend the JSONB format +with elements that have a zero element type but a non-zero size. In that +way, legacy versions of SQLite will interpret the element as a NULL +for backwards compatibility while newer versions will interpret the +element in some other way. + +

  2. TRUE → +The element is a JSON "true". The payload size must be zero for a actual +"true" value. Elements with type 1 and a non-zero payload size are +reserved for future expansion. Legacy implementations that see an element +type of 1 with a non-zero payload size should continue to interpret that +element as "true" for compatibility. + +

  3. FALSE → +The element is a JSON "false". The payload size must be zero for a actual +"false" value. Elements with type 2 and a non-zero payload size are +reserved for future expansion. Legacy implementations that see an element +type of 2 with a non-zero payload size should continue to interpret that +element as "false" for compatibility. + +

  4. INT → +The element is a JSON integer value in the canonical +RFC 8259 format, without extensions. The payload is the ASCII +text representation of that numeric value. + +

  5. INT5 → +The element is a JSON integer literal in hexadecimal notation. +The payload is the ASCII text representation of the literal. +Because the payload is in a non-standard format, it will need +to be translated when the JSONB is converted into RFC 8259 text JSON. + +

  6. FLOAT → +The element is a JSON floating-point value in the canonical +RFC 8259 format, without extensions. The payload is the ASCII +text representation of that numeric value. + +

  7. FLOAT5 → +The element is a JSON floating-point value that is not in the +canonical JSON format but rather the extended JSON5 format. +The payload is the ASCII text representation of that numeric value. +Because the payload is in a non-standard format, it will need to +be translated when the JSONB is converted into RFC 8259 text JSON. + +

  8. TEXT → +The element is a JSON string value that does not contain +any escapes nor any characters that need to be escaped for either SQL or +JSON. The payload is the UTF8 text representation of the string value. +The payload does not include string delimiters. + +

  9. TEXTJ → +The element is a JSON string value that contains +RFC 8259 character escapes (such as "\n" or "\u0020"). +Those escapes will need to be translated into actual UTF8 if this element +is [json_extract|extracted] into SQL. +The payload is the UTF8 text representation of the escaped string value. +The payload does not include string delimiters. + +

  10. TEXT5 → +The element is a JSON string value that contains +character escapes, including some character escapes that part of JSON5 +and which are not found in the canonical RFC 8259 spec. +Those escapes will need to be translated into standard JSON prior to +rendering the JSON as text, or into their actual UTF8 characters if this +element is [json_extract|extracted] into SQL. +The payload is the UTF8 text representation of the escaped string value. +The payload does not include string delimiters. + +

  11. TEXTRAW → +The element is a JSON string value that contains +UTF8 characters that need to be escaped if this string is rendered into +standard JSON text. +The payload does not include string delimiters. + +

  12. ARRAY → +The element is a JSON array. The payload contains +JSONB elements that comprise values contained within the array. + +

  13. OBJECT → +The element is a JSON object. The payload contains +pairs of JSONB elements that comprise entries for the JSON object. +The first element in each pair must be a string (types 7 through 10). +The second element of each pair may be any types, including nested +arrays or objects. + +

  14. RESERVED-13 → +Reserved for future expansion. Legacy implements that encounter this +element type should raise an error. + +

  15. RESERVED-14 → +Reserved for future expansion. Legacy implements that encounter this +element type should raise an error. + +

  16. RESERVED-15 → +Reserved for future expansion. Legacy implements that encounter this +element type should raise an error. +

+ +Element types outside the range of 0 to 12 are reserved for future +expansion. The current implement raises an error if see an element type +other than those listed above. However, future versions of SQLite might +use of the three remaining element types to implement indexing or similar +optimizations, to speed up lookup against large JSON arrays and/or objects. + +### 3.3 Design Rationale For Element Types + +A key goal of JSONB is that it should be quick to translate +to and from text JSON and/or be constructed from SQL values. +When converting from text into JSONB, we do not want the +converter subroutine to burn CPU cycles converting elements +values into some standard format which might never be used. +Format conversion is "lazy" - it is deferred until actually +needed. This has implications for the JSONB format design: + + 1. Numeric values are stored as text, not a numbers. The values are + a direct copy of the text JSON values from which they are derived. + + 2. There are multiple element types depending on the details of value + formats. For example, INT is used for pure RFC-8259 integer + literals and INT5 exists for JSON5 extensions such as hexadecimal + notation. FLOAT is used for pure RFC-8259 floating point literals + and FLOAT5 is used for JSON5 extensions. There are four different + representations of strings, depending on where the string came from + and how special characters within the string are escaped. + +A second goal of JSONB is that it should be capable of serving as the +"parse tree" for JSON when a JSON value is being processed by the +various [JSON SQL functions] built into SQLite. Before JSONB was +developed, operations such [json_replace()] and [json_patch()] +and similar worked in three stages: + + + 1. Translate the text JSON into a internal format that is + easier to scan and edit. + 2. Perform the requested operation on the JSON. + 3. Translate the internal format back into text. + +JSONB seeks to serve as the internal format directly - bypassing +the first and third stages of that process. Since most of the CPU +cycles are spent on the first and third stages, that suggests that +JSONB processing will be much faster than text JSON processing. + +So when processing JSONB, only the second stage of the three-stage +process is required. But when processing text JSON, it is still necessary +to do stages one and three. If JSONB is to be used as the internal +binary representation, this is yet another reason to store numeric +values as text. Storing numbers as text minimizes the amount of +conversion work needed for stages one and three. This is also why +there are four different representations of text in JSONB. Different +text representations are used for text coming from different sources +(RFC-8259 JSON, JSON5, or SQL string values) and conversions only +happen if and when they are actually needed. + +### 3.4 Valid JSONB BLOBs + +A valid JSONB BLOB consists of a single JSON element. The element must +exactly fill the BLOB. This one element is often a JSON object or array +and those usually contain additional elements as its payload, but the +element can be a primitive value such a string, number, boolean, or null. + +When the built-in JSON functions are attempting to determine if a BLOB +argument is a JSONB or just a random BLOB, they look at the header of +the outer element to see that it is well-formed and that the element +completely fills the BLOB. If these conditions are met, then the BLOB +is accepted as a JSONB value. diff --git a/doc/lemon.html b/doc/lemon.html index 324b3f3319..965f305c04 100644 --- a/doc/lemon.html +++ b/doc/lemon.html @@ -322,7 +322,7 @@

3.2.1 Allocating The Parse Object On Stack

  • Declare a local variable of type "yyParser"
  • Initialize the variable using ParseInit() -
  • Pass a pointer to the variable in calls ot Parse() +
  • Pass a pointer to the variable in calls to Parse()
  • Deallocate substructure in the parse variable using ParseFinalize().
@@ -683,6 +683,7 @@

4.4 Special Directives

  • %endif
  • %extra_argument
  • %fallback +
  • %free
  • %if
  • %ifdef
  • %ifndef @@ -693,6 +694,7 @@

    4.4 Special Directives

  • %parse_accept
  • %parse_failure
  • %right +
  • %realloc
  • %stack_overflow
  • %stack_size
  • %start_symbol @@ -844,7 +846,7 @@

    4.4.7 The %fallback directive

    would have generated a syntax error.

    The %fallback directive was added to support robust parsing of SQL -syntax in SQLite. +syntax in SQLite. The SQL language contains a large assortment of keywords, each of which appears as a different token to the language parser. SQL contains so many keywords that it can be difficult for programmers to keep up with @@ -879,7 +881,7 @@

    4.4.8 The %if directive and its friends

    Grammar text in between "%ifdef MACRO" and the next nested "%endif" is ignored unless the "-DMACRO" command-line option is used. Grammar text -betwen "%ifndef MACRO" and the next nested "%endif" is +between "%ifndef MACRO" and the next nested "%endif" is included except when the "-DMACRO" command-line option is used.

    The text in between "%if CONDITIONAL" and its @@ -1200,6 +1202,21 @@

    4.4.25 The %wildcard directive

    the wildcard token and some other token, the other token is always used. The wildcard token is only matched if there are no alternatives.

    + +

    4.4.26 The %realloc and %free directives

    + +

    The %realloc and %free directives defines function +that allocate and free heap memory. The signatures of these functions +should be the same as the realloc() and free() functions from the standard +C library. + +

    If both of these functions are defined +then these functions are used to allocate and free +memory for supplemental parser stack space, if the initial +parse stack space is exceeded. The initial parser stack size +is specified by either %stack_size or the +-DYYSTACKDEPTH compile-time flag. +

    5.0 Error Processing

    @@ -1224,13 +1241,14 @@

    5.0 Error Processing

    first syntax error, of course, if there are no instances of the "error" non-terminal in your grammar.

    +

    6.0 History of Lemon

    Lemon was originally written by Richard Hipp sometime in the late 1980s on a Sun4 Workstation using K&R C. -There was a companion LL(1) parser generator program named "Lime", the -source code to which as been lost.

    +There was a companion LL(1) parser generator program named "Lime". +The Lime source code has been lost.

    The lemon.c source file was originally many separate files that were compiled together to generate the "lemon" executable. Sometime in the diff --git a/doc/pager-invariants.txt b/doc/pager-invariants.txt index 44444dad54..0fea0a698d 100644 --- a/doc/pager-invariants.txt +++ b/doc/pager-invariants.txt @@ -45,7 +45,7 @@ *** Definition: Two databases (or the same database at two points it time) are said to be "logically equivalent" if they give the same answer to all queries. Note in particular the content of freelist leaf - pages can be changed arbitarily without effecting the logical equivalence + pages can be changed arbitrarily without effecting the logical equivalence of the database. (7) At any time, if any subset, including the empty set and the total set, diff --git a/doc/tcl-extension-testing.md b/doc/tcl-extension-testing.md new file mode 100644 index 0000000000..eb2a8c3a3b --- /dev/null +++ b/doc/tcl-extension-testing.md @@ -0,0 +1,264 @@ +# Test Procedures For The SQLite TCL Extension + +## 1.0 Background + +The SQLite TCL extension logic (in the +"[tclsqlite.c](/file/src/tclsqlite.c)" source +file) is statically linked into "textfixture" executable +which is the program used to do most of the testing +associated with "make test", "make devtest", and/or +"make releasetest". So the functionality of the SQLite +TCL extension is thoroughly vetted during normal testing. The +procedures below are designed to test the loadable extension +aspect of the SQLite TCL extension, and in particular to verify +that the "make tclextension-install" build target works and that +an ordinary tclsh can subsequently run "package require sqlite3". + +This procedure can also be used as a template for how to set up +a local TCL+SQLite development environment. In other words, it +can be be used as a guide on how to compile per-user copies of +Tcl that are used to develop, test, and debug SQLite. In that +case, perhaps make minor changes to the procedure such as: + + * Make TCLBUILD directory is permanent. + * Enable debugging symbols on the Tcl library build. + * Reduce the optimization level to -O0 for easier debugging. + * Also compile "wish" to go with each "tclsh". + + + +## 2.0 Testing On Unix-like Systems (Including Mac) + +See also the [](./compile-for-unix.md) document which provides another +perspective on how to compile SQLite on unix-like systems. + +### 2.1 Setup + +

      +
    1. + [Fossil][] installed. +
    2. Check out source code and set environment variables: +
        +
      1. **TCLSOURCE** → + The top-level directory of a [Fossil][] check-out of the + [TCL source tree][tcl-fossil]. +
      2. **SQLITESOURCE** → + A Fossil check-out of the SQLite source tree. +
      3. **TCLHOME** → + A directory that does not exist at the start of the test and which + will be deleted at the end of the test, and that will contain the + test builds of the TCL libraries and the SQLite TCL Extensions. + It is the top-most installation directory, i.e. the one provided + to Tcl's `./configure --prefix=/path/to/tcl`. +
      4. **TCLVERSION** → + The `X.Y`-form version of Tcl being used: 8.6, 9.0, 9.1... +
      +
    + +### 2.2 Testing TCL 8.x and 9.x on unix + +From a checked-out copy of [the core Tcl tree][tcl-fossil] + +
      +
    1. `TCLVERSION=8.6`
      + ↑ A version of your choice. This process has been tested with + values of 8.6, 9.0, and 9.1 (as of 2025-04-16). The out-of-life + version 8.5 fails some of `make devtest` for undetermined reasons. +
    2. `TCLHOME=$HOME/tcl/$TCLVERSION` +
    3. `TCLSOURCE=/path/to/tcl/checkout` +
    4. `SQLITESOURCE=/path/to/sqlite/checkout` +
    5. `rm -fr $TCLHOME`
      + ↑ Ensure that no stale Tcl installation is laying around. +
    6. `cd $TCLSOURCE` +
    7. `fossil up core-8-6-branch`
      + ↑ The branch corresponding to `$TCLVERSION`, e.g. + `core-9-0-branch` or `trunk`. +
    8. `fossil clean -x` +
    9. `cd unix` +
    10. `./configure --prefix=$TCLHOME --disable-shared`
      + ↑ The `--disable-shared` is to avoid the need to set `LD_LIBRARY_PATH` + when using this Tcl build. +
    11. `make install` +
    12. `cd $SQLITESOURCE` +
    13. `fossil clean -x` +
    14. `./configure --with-tcl=$TCLHOME --all` +
    15. `make tclextension-install`
      + ↑ Verify extension installed at + `$TCLHOME/lib/tcl${TCLVERSION}/sqlite`. +
    16. `make tclextension-list`
      + ↑ Verify TCL extension correctly installed. +
    17. `make tclextension-verify`
      + ↑ Verify that the correct version is installed. +
    18. `$TCLHOME/bin/tclsh[89].[0-9] test/testrunner.tcl release --explain`
      + ↑ Verify thousands of lines of output with no errors. Or + consider running "devtest" without --explain instead of "release". +
    + +### 2.3 Cleanup + +
      +
    1. `rm -rf $TCLHOME` +
    + + +## 3.0 Testing On Windows + +See also the [](./compile-for-windows.md) document which provides another +perspective on how to compile SQLite on Windows. + +### 3.1 Setup for Windows + +(These docs are not as up-to-date as the Unix docs, above.) + +
      +
    1. + [Fossil][] installed. +
    2. + Unix-like command-line tools installed. Example: + [unxutils](https://unxutils.sourceforge.net/) +
    3. [Visual Studio](https://visualstudio.microsoft.com/vs/community/) + installed. VS2015 or later required. +
    4. Check out source code and set environment variables. +
        +
      1. **TCLSOURCE** → + The top-level directory of a Fossil check-out of the TCL source tree. +
      2. **SQLITESOURCE** → + A Fossil check-out of the SQLite source tree. +
      3. **TCLBUILD** → + A directory that does not exist at the start of the test and which + will be deleted at the end of the test, and that will contain the + test builds of the TCL libraries and the SQLite TCL Extensions. +
      4. **ORIGINALPATH** → + The original value of %PATH%. In other words, set as follows: + `set ORIGINALPATH %PATH%` +
      +
    + +### 3.2 Testing TCL 8.6 on Windows + +
      +
    1. `mkdir %TCLBUILD%\tcl86` +
    2. `cd %TCLSOURCE%\win` +
    3. `fossil up core-8-6-16`
      + ↑ Or some other version of Tcl8.6. +
    4. `fossil clean -x` +
    5. `set INSTALLDIR=%TCLBUILD%\tcl86` +
    6. `nmake /f makefile.vc release`
      + ⇅ You *must* invoke the "release" and "install" targets + using separate "nmake" commands or tclsh86t.exe won't be + installed. +
    7. `nmake /f makefile.vc install` +
    8. `cd %SQLITESOURCE%` +
    9. `fossil clean -x` +
    10. `set TCLDIR=%TCLBUILD%\tcl86` +
    11. `set PATH=%TCLBUILD%\tcl86\bin;%ORIGINALPATH%` +
    12. `set TCLSH_CMD=%TCLBUILD%\tcl86\bin\tclsh86t.exe` +
    13. `nmake /f Makefile.msc tclextension-install`
      + ↑ Verify extension installed at %TCLBUILD%\\tcl86\\lib\\tcl8.6\\sqlite3.* +
    14. `nmake /f Makefile.msc tclextension-verify` +
    15. `tclsh86t test/testrunner.tcl release --explain`
      + ↑ Verify thousands of lines of output with no errors. Or + consider running "devtest" without --explain instead of "release". +
    + +### 3.3 Testing TCL 9.0 on Windows + +
      +
    1. `mkdir %TCLBUILD%\tcl90` +
    2. `cd %TCLSOURCE%\win` +
    3. `fossil up core-9-0-0`
      + ↑ Or some other version of Tcl9 +
    4. `fossil clean -x` +
    5. `set INSTALLDIR=%TCLBUILD%\tcl90` +
    6. `nmake /f makefile.vc release`
      + ⇅ You *must* invoke the "release" and "install" targets + using separate "nmake" commands or tclsh90.exe won't be + installed. +
    7. `nmake /f makefile.vc install` +
    8. `cd %SQLITESOURCE%` +
    9. `fossil clean -x` +
    10. `set TCLDIR=%TCLBUILD%\tcl90` +
    11. `set PATH=%TCLBUILD%\tcl90\bin;%ORIGINALPATH%` +
    12. `set TCLSH_CMD=%TCLBUILD%\tcl90\bin\tclsh90.exe` +
    13. `nmake /f Makefile.msc tclextension-install`
      + ↑ Verify extension installed at %TCLBUILD%\\tcl90\\lib\\sqlite3.* +
    14. `nmake /f Makefile.msc tclextension-verify` +
    15. `tclsh90 test/testrunner.tcl release --explain`
      + ↑ Verify thousands of lines of output with no errors. Or + consider running "devtest" without --explain instead of "release". +
    + +### 3.4 Cleanup + +
      +
    1. `rm -rf %TCLBUILD%` +
    + +## 4.0 Testing the TEA(ish) Build (unix only) + +This part requires following the setup instructions for Unix systems, +at the top of this document. + +The former TEA, now TEA(ish), build of this extension uses the same +code as the builds described above but is provided in a form more +convenient for downstream Tcl users. + +It lives in `autoconf/tea` and, as part of the `autoconf` bundle, +_cannot be tested directly from the canonical tree_. Instead it has to +be packaged. + +### 4.1 Teaish Setup + +Follow the same Tcl- and environment-related related setup described +in the first section of this document, up to and including the +installation of Tcl (unless, of course, it was already installed using +those same instructions). + +### 4.2 Teaish Testing + +
      +
    1. `cd $SQLITESOURCE` +
    2. Run either `make snapshot-tarball` or `make amalgamation-tarball` + ↑ + Those steps will leave behind a temp dir called `mkpkg_tmp_dir`, + under which the extension is most readily reached. It can optionally + be extracted from the generated tarball, but that tarball was + generated from this dir, and reusing this dir is a time saver + during development. +
    3. `cd mkpkg_tmp/tea` +
    4. `./configure --with-tcl=$TCLHOME` +
    5. `make test install`
      + ↑ Should run to completion without any errors. +
    6. `make uninstall`
      + ↑ Will uninstall the extension. This _can_ be run + in the same invocation as the `install` target, but only + if the `-j#` make flag is _not_ used. If it is, the + install/uninstall steps will race and make a mess of things. + Parallel builds do not help in this build, anyway, as there's + only a single C file to compile. +
    + +When actively developing and testing the teaish build, which requires +going through the tarball generation, there's a caveat about the +`mkpkg_tmp_dir` dir: it will be deleted every time a tarball is +built, the shell console which is parked in that +directory for testing needs to add `cd $PWD &&` to the start of the +build commands, like: + +> +``` +[user@host:.../mkpkg_tmp_dir/tea]$ \ + cd $PWD && ./configure CFLAGS=-O0 --with-tcl=$TCLHOME \ + && make test install uninstall +``` + +### 4.3 Teaish Cleanup + + +
      +
    1. `rm -rf $TCLHOME` +
    2. `cd $SQLITESOURCE; rm -fr mkpkg_tmp_dir; fossil clean -x` +
    + +[Fossil]: https://fossil-scm.org/home +[tcl-fossil]: https://core.tcl-lang.org/tcl diff --git a/doc/testrunner.md b/doc/testrunner.md new file mode 100644 index 0000000000..d1696e9d1d --- /dev/null +++ b/doc/testrunner.md @@ -0,0 +1,404 @@ + + +# The testrunner.tcl Script + + + + +# 1. Overview + +The testrunner.tcl program is a Tcl script used to run multiple SQLite +tests in parallel, thus reducing testing time on multi-core machines. +It supports the following types of tests: + + * Tcl test scripts. + + * Fuzzcheck tests, including using an external fuzzcheck database. + + * Tests run with `make` commands. Examples: + - `make devtest` + - `make releasetest` + - `make sdevtest` + - `make testrunner` + +The testrunner.tcl program stores output of all tests and builds run in +log file **testrunner.log**, created in the current working directory. +Search this file to find details of errors. Suggested search commands: + + * `grep "^!" testrunner.log` + * `grep failed testrunner.log` + +The testrunner.tcl program also populates SQLite database **testrunner.db**. +This database contains details of all tests run, running and to be run. +A useful query might be: + +``` + SELECT * FROM script WHERE state='failed' +``` + +You can get a summary of errors in a prior run by invoking commands like +these: + +``` + tclsh $(TESTDIR)/testrunner.tcl errors + tclsh $(TESTDIR)/testrunner.tcl errors -v +``` + +Running the command: + +``` + tclsh $(TESTDIR)/testrunner.tcl status +``` + +in the directory containing the testrunner.db database runs various queries +to produce a succinct report on the state of a running testrunner.tcl script. +A good way to keep and eye on test progress is to run either of the two +following commands: + +``` + watch tclsh $(TESTDIR)/testrunner.tcl status + tclsh $(TESTDIR)/testrunner.tcl status -d 2 +``` + +Both of the commands above accomplish about the same thing, but the second +one has the advantage of not requiring "watch" to be installed on your +system. + +Sometimes testrunner.tcl uses the `testfixture` binary that it is run with +to run tests (see "Binary Tests" below). Sometimes it builds testfixture and +other binaries in specific configurations to test (see "Source Tests"). + + +# 2. Binary Tests + +The commands described in this section all run various combinations of the Tcl +test scripts using the `testfixture` binary used to run the testrunner.tcl +script (i.e. they do not invoke the compiler to build new binaries, or the +`make` command to run tests that are not Tcl scripts). The procedure to run +these tests is therefore: + + 1. Build the "testfixture" (or "testfixture.exe" for windows) binary using + whatever method seems convenient. + + 2. Test the binary built in step 1 by running testrunner.tcl with it, + perhaps with various options. + +The following sub-sections describe the various options that can be +passed to testrunner.tcl to test binary testfixture builds. + + +## 2.1. Organization of Tcl Tests + +Tcl tests are stored in files that match the pattern *\*.test*. They are +found in both the $TOP/test/ directory, and in the various sub-directories +of the $TOP/ext/ directory of the source tree. Not all *\*.test* files +contain Tcl tests - a handful are Tcl scripts designed to invoke other +*\*.test* files. + +The **veryquick** set of tests is a subset of all Tcl test scripts in the +source tree. In includes most tests, but excludes some that are very slow. +Almost all fault-injection tests (those that test the response of the library +to OOM or IO errors) are excluded. It is defined in source file +*test/permutations.test*. + +The **full** set of tests includes all Tcl test scripts in the source tree. +To run a "full" test is to run all Tcl test scripts that can be found in the +source tree. + +File *permutations.test* defines various test "permutations". A permutation +consists of: + + * A subset of Tcl test scripts, and + + * Runtime configuration to apply before running each test script + (e.g. enabling auto-vacuum, or disable lookaside). + +Running **all** tests is to run all tests in the full test set, plus a dozen +or so permutations. The specific permutations that are run as part of "all" +are defined in file *testrunner_data.tcl*. + + +## 2.2. Commands to Run Tests + +To run the "veryquick" test set, use either of the following: + +``` + ./testfixture $TESTDIR/testrunner.tcl + ./testfixture $TESTDIR/testrunner.tcl veryquick +``` + +To run the "full" test suite: + +``` + ./testfixture $TESTDIR/testrunner.tcl full +``` + +To run the subset of the "full" test suite for which the test file name matches +a specified pattern (e.g. all tests that start with "fts5"), either of: + +``` + ./testfixture $TESTDIR/testrunner.tcl fts5% + ./testfixture $TESTDIR/testrunner.tcl 'fts5*' +``` + +Strictly speaking, for a test to be run the pattern must match the script +filename, not including the directory, using the rules of Tcl's +\[string match\] command. Except that before the matching is done, any "%" +characters specified as part of the pattern are transformed to "\*". + + +To run "all" tests (full + permutations): + +``` + ./testfixture $TESTDIR/testrunner.tcl all +``` + + +## 2.3. Investigating Binary Test Failures + +If a test fails, testrunner.tcl reports name of the Tcl test script and, if +applicable, the name of the permutation, to stdout. This information can also +be retrieved from either *testrunner.log* or *testrunner.db*. + +If there is no permutation, the individual test script may be run with: + +``` + ./testfixture $PATH_TO_SCRIPT +``` + +Or, if the failure occured as part of a permutation: + +``` + ./testfixture $TESTDIR/testrunner.tcl $PERMUTATION $PATH_TO_SCRIPT +``` + +TODO: An example instead of "$PERMUTATION" and $PATH\_TO\_SCRIPT? + + +# 3. Source Code Tests + +The commands described in this section invoke the C compiler to build +binaries from the source tree, then use those binaries to run Tcl and +other tests. The advantages of this are that: + + * it is possible to test multiple build configurations with a single + command, and + + * it ensures that tests are always run using binaries created with the + same set of compiler options. + +The testrunner.tcl commands described in this section may be run using +either a *testfixture* (or testfixture.exe) build, or with any other Tcl +shell that supports SQLite 3.31.1 or newer via "package require sqlite3". + +TODO: ./configure + Makefile.msc build systems. + + +## 3.1. Commands to Run SQLite Tests + +The **mdevtest** command is equivalent to running the veryquick tests and +the `make fuzztest` target once for each of two --enable-all builds - one +with debugging enabled and one without: + +``` + tclsh $TESTDIR/testrunner.tcl mdevtest +``` + +In other words, it is equivalent to running: + +``` + $TOP/configure --enable-all --enable-debug + make fuzztest + make testfixture + ./testfixture $TOP/test/testrunner.tcl veryquick + + # Then, after removing files created by the tests above: + $TOP/configure --enable-all OPTS="-O0" + make fuzztest + make testfixture + ./testfixture $TOP/test/testrunner.tcl veryquick +``` + +The **sdevtest** command is identical to the mdevtest command, except that the +second of the two builds is a sanitizer build. Specifically, this means that +OPTS="-fsanitize=address,undefined" is specified instead of OPTS="-O0": + +``` + tclsh $TESTDIR/testrunner.tcl sdevtest +``` + +The **release** command runs lots of tests under lots of builds. It runs +different combinations of builds and tests depending on whether it is run +on Linux, Windows or OSX. Refer to *testrunner\_data.tcl* for the details +of the specific tests run. + +``` + tclsh $TESTDIR/testrunner.tcl release +``` + +As with source code tests, one or more patterns +may be appended to any of the above commands (mdevtest, sdevtest or release). +Pattern matching is used for both Tcl tests and fuzz tests. + +``` + tclsh $TESTDIR/testrunner.tcl release rtree% +``` + + +## 3.2. Running ZipVFS Tests + +testrunner.tcl can build a zipvfs-enabled testfixture and use it to run +tests from the Zipvfs project with the following command: + +``` + tclsh $TESTDIR/testrunner.tcl --zipvfs $PATH_TO_ZIPVFS +``` + +This can be combined with any of "mdevtest", "sdevtest" or "release" to +test both SQLite and Zipvfs with a single command: + +``` + tclsh $TESTDIR/testrunner.tcl --zipvfs $PATH_TO_ZIPVFS mdevtest +``` + + +## 3.3. Investigating Source Code Test Failures + +Investigating a test failure that occurs during source code testing is a +two step process: + + 1. Recreating the build configuration in which the test failed, and + + 2. Re-running the actual test. + +To recreate a build configuration, use the testrunner.tcl **script** command +to create a build script. A build script is a bash script on Linux or OSX, or +a dos \*.bat file on windows. For example: + +``` + # Create a script that recreates build configuration "Device-One" on + # Linux or OSX: + tclsh $TESTDIR/testrunner.tcl script Device-One > make.sh + + # Create a script that recreates build configuration "Have-Not" on Windows: + tclsh $TESTDIR/testrunner.tcl script Have-Not > make.bat +``` + +The generated bash or \*.bat file script accepts a single argument - a makefile +target to build. This may be used either to run a `make` command test directly, +or else to build a testfixture (or testfixture.exe) binary with which to +run a Tcl test script, as described above. + + +## 3.4 External Fuzzcheck Databases + +Testrunner.tcl will also run fuzzcheck against an external (out of tree) +database, for example fuzzcheck databases generated by dbsqlfuzz. To do +this, simply add the "`--fuzzdb` *FILENAME*" command-line option or set +the FUZZDB environment variable to the name of the external +database. For large external databases, testrunner.tcl will automatically use +the "`--slice`" command-line option of fuzzcheck to divide the work up into +multiple jobs, to increase parallelism. + +Thus, for example, to run a full releasetest including an external +dbsqlfuzz database, run a command like one of these: + +``` + tclsh test/testrunner.tcl releasetest --fuzzdb ../fuzz/20250415.db + FUZZDB=../fuzz/20250415.db make releasetest + nmake /f Makefile.msc FUZZDB=../fuzz/20250415.db releasetest +``` + +The patternlist option to testrunner.tcl will match against fuzzcheck +databases. So if you want to run *only* tests involving the external +database, you can use a command something like this: + +``` + tclsh test/testrunner.tcl releasetest 20250415 --fuzzdb ../fuzz/20250415.db +``` + + +# 4. Extra testrunner.tcl Options + +The testrunner.tcl script options in this section may be used with both source +code and binary tests. + +The **--buildonly** option instructs testrunner.tcl just to build the binaries +required by a test, not to run any actual tests. For example: + +``` + # Build binaries required by release test. + tclsh $TESTDIR/testrunner.tcl --buildonly release" +``` + +The **--dryrun** option prevents testrunner.tcl from building any binaries +or running any tests. Instead, it just writes the shell commands that it +would normally execute into the testrunner.log file. Example: + +``` + # Log the shell commmands that make up the mdevtest test. + tclsh $TESTDIR/testrunner.tcl --dryrun mdevtest" +``` + +The **--explain** option is similar to --dryrun in that it prevents +testrunner.tcl from building any binaries or running any tests. The +difference is that --explain prints on standard output a human-readable +summary of all the builds and tests that would have been run. + +``` + # Show what builds and tests would have been run + tclsh $TESTDIR/testrunner.tcl --explain mdevtest +``` + +The **--status** option uses VT100 escape sequences to display the test +status full-screen. This is similar to running +"`watch test/testrunner status`" in a separate window, just more convenient. +Unfortunately, this option does not work correctly on Windows, due to the +sketchy implementation of VT100 escapes on the Windows console. + + +# 5. Controlling CPU Core Utilization + +When running either binary or source code tests, testrunner.tcl reports the +number of jobs it intends to use to stdout. e.g. + +``` + $ ./testfixture $TESTDIR/testrunner.tcl + splitting work across 16 jobs + ... more output ... +``` + +By default, testfixture.tcl attempts to set the number of jobs to the number +of real cores on the machine. This can be overridden using the "--jobs" (or -j) +switch: + +``` + $ ./testfixture $TESTDIR/testrunner.tcl --jobs 8 + splitting work across 8 jobs + ... more output ... +``` + +The number of jobs may also be changed while an instance of testrunner.tcl is +running by exucuting the following command from the directory containing the +testrunner.log and testrunner.db files: + +``` + $ ./testfixture $TESTDIR/testrunner.tcl njob $NEW_NUMBER_OF_JOBS +``` diff --git a/doc/vfs-shm.txt b/doc/vfs-shm.txt index c1f125a120..a483e9b159 100644 --- a/doc/vfs-shm.txt +++ b/doc/vfs-shm.txt @@ -1,6 +1,6 @@ The 5 states of an historical rollback lock as implemented by the xLock, xUnlock, and xCheckReservedLock methods of the sqlite3_io_methods -objec are: +object are: UNLOCKED SHARED @@ -58,7 +58,7 @@ The meanings of the various wal-index locking states is as follows: A particular lock manager implementation may coalesce one or more of the wal-index locking states, though with a reduction in concurrency. -For example, an implemention might implement only exclusive locking, +For example, an implementation might implement only exclusive locking, in which case all states would be equivalent to CHECKPOINT, meaning that only one reader or one writer or one checkpointer could be active at a time. Or, an implementation might combine READ and READ_FULL into diff --git a/doc/wal-lock.md b/doc/wal-lock.md index d74bb88b63..8df7cc836c 100644 --- a/doc/wal-lock.md +++ b/doc/wal-lock.md @@ -12,7 +12,7 @@ facilitates transfer of OS priority between processes when a high priority process is blocked by a lower priority one. Only read/write clients use blocking locks. Clients that have read-only access -to the \*-shm file nevery use blocking locks. +to the \*-shm file never use blocking locks. Threads or processes that access a single database at a time never deadlock as a result of blocking database locks. But it is of course possible for threads diff --git a/ext/README.md b/ext/README.md index 933a33d053..78312819ab 100644 --- a/ext/README.md +++ b/ext/README.md @@ -1,6 +1,6 @@ ## Loadable Extensions -Various [loadable extensions](https://www.sqlite.org/loadext.html) for +Various [loadable extensions](https://sqlite.org/loadext.html) for SQLite are found in subfolders. Most subfolders are dedicated to a single loadable extension (for diff --git a/ext/async/README.txt b/ext/async/README.txt deleted file mode 100644 index f62fa2fc17..0000000000 --- a/ext/async/README.txt +++ /dev/null @@ -1,170 +0,0 @@ -NOTE (2012-11-29): - -The functionality implemented by this extension has been superseded -by WAL-mode. This module is no longer supported or maintained. The -code is retained for historical reference only. - ------------------------------------------------------------------------------- - -Normally, when SQLite writes to a database file, it waits until the write -operation is finished before returning control to the calling application. -Since writing to the file-system is usually very slow compared with CPU -bound operations, this can be a performance bottleneck. This directory -contains an extension that causes SQLite to perform all write requests -using a separate thread running in the background. Although this does not -reduce the overall system resources (CPU, disk bandwidth etc.) at all, it -allows SQLite to return control to the caller quickly even when writing to -the database, eliminating the bottleneck. - - 1. Functionality - - 1.1 How it Works - 1.2 Limitations - 1.3 Locking and Concurrency - - 2. Compilation and Usage - - 3. Porting - - - -1. FUNCTIONALITY - - With asynchronous I/O, write requests are handled by a separate thread - running in the background. This means that the thread that initiates - a database write does not have to wait for (sometimes slow) disk I/O - to occur. The write seems to happen very quickly, though in reality - it is happening at its usual slow pace in the background. - - Asynchronous I/O appears to give better responsiveness, but at a price. - You lose the Durable property. With the default I/O backend of SQLite, - once a write completes, you know that the information you wrote is - safely on disk. With the asynchronous I/O, this is not the case. If - your program crashes or if a power loss occurs after the database - write but before the asynchronous write thread has completed, then the - database change might never make it to disk and the next user of the - database might not see your change. - - You lose Durability with asynchronous I/O, but you still retain the - other parts of ACID: Atomic, Consistent, and Isolated. Many - appliations get along fine without the Durablity. - - 1.1 How it Works - - Asynchronous I/O works by creating a special SQLite "vfs" structure - and registering it with sqlite3_vfs_register(). When files opened via - this vfs are written to (using the vfs xWrite() method), the data is not - written directly to disk, but is placed in the "write-queue" to be - handled by the background thread. - - When files opened with the asynchronous vfs are read from - (using the vfs xRead() method), the data is read from the file on - disk and the write-queue, so that from the point of view of - the vfs reader the xWrite() appears to have already completed. - - The special vfs is registered (and unregistered) by calls to the - API functions sqlite3async_initialize() and sqlite3async_shutdown(). - See section "Compilation and Usage" below for details. - - 1.2 Limitations - - In order to gain experience with the main ideas surrounding asynchronous - IO, this implementation is deliberately kept simple. Additional - capabilities may be added in the future. - - For example, as currently implemented, if writes are happening at a - steady stream that exceeds the I/O capability of the background writer - thread, the queue of pending write operations will grow without bound. - If this goes on for long enough, the host system could run out of memory. - A more sophisticated module could to keep track of the quantity of - pending writes and stop accepting new write requests when the queue of - pending writes grows too large. - - 1.3 Locking and Concurrency - - Multiple connections from within a single process that use this - implementation of asynchronous IO may access a single database - file concurrently. From the point of view of the user, if all - connections are from within a single process, there is no difference - between the concurrency offered by "normal" SQLite and SQLite - using the asynchronous backend. - - If file-locking is enabled (it is enabled by default), then connections - from multiple processes may also read and write the database file. - However concurrency is reduced as follows: - - * When a connection using asynchronous IO begins a database - transaction, the database is locked immediately. However the - lock is not released until after all relevant operations - in the write-queue have been flushed to disk. This means - (for example) that the database may remain locked for some - time after a "COMMIT" or "ROLLBACK" is issued. - - * If an application using asynchronous IO executes transactions - in quick succession, other database users may be effectively - locked out of the database. This is because when a BEGIN - is executed, a database lock is established immediately. But - when the corresponding COMMIT or ROLLBACK occurs, the lock - is not released until the relevant part of the write-queue - has been flushed through. As a result, if a COMMIT is followed - by a BEGIN before the write-queue is flushed through, the database - is never unlocked,preventing other processes from accessing - the database. - - File-locking may be disabled at runtime using the sqlite3async_control() - API (see below). This may improve performance when an NFS or other - network file-system, as the synchronous round-trips to the server be - required to establish file locks are avoided. However, if multiple - connections attempt to access the same database file when file-locking - is disabled, application crashes and database corruption is a likely - outcome. - - -2. COMPILATION AND USAGE - - The asynchronous IO extension consists of a single file of C code - (sqlite3async.c), and a header file (sqlite3async.h) that defines the - C API used by applications to activate and control the modules - functionality. - - To use the asynchronous IO extension, compile sqlite3async.c as - part of the application that uses SQLite. Then use the API defined - in sqlite3async.h to initialize and configure the module. - - The asynchronous IO VFS API is described in detail in comments in - sqlite3async.h. Using the API usually consists of the following steps: - - 1. Register the asynchronous IO VFS with SQLite by calling the - sqlite3async_initialize() function. - - 2. Create a background thread to perform write operations and call - sqlite3async_run(). - - 3. Use the normal SQLite API to read and write to databases via - the asynchronous IO VFS. - - Refer to sqlite3async.h for details. - - -3. PORTING - - Currently the asynchronous IO extension is compatible with win32 systems - and systems that support the pthreads interface, including Mac OSX, Linux, - and other varieties of Unix. - - To port the asynchronous IO extension to another platform, the user must - implement mutex and condition variable primitives for the new platform. - Currently there is no externally available interface to allow this, but - modifying the code within sqlite3async.c to include the new platforms - concurrency primitives is relatively easy. Search within sqlite3async.c - for the comment string "PORTING FUNCTIONS" for details. Then implement - new versions of each of the following: - - static void async_mutex_enter(int eMutex); - static void async_mutex_leave(int eMutex); - static void async_cond_wait(int eCond, int eMutex); - static void async_cond_signal(int eCond); - static void async_sched_yield(void); - - The functionality required of each of the above functions is described - in comments in sqlite3async.c. diff --git a/ext/async/sqlite3async.c b/ext/async/sqlite3async.c deleted file mode 100644 index eed7c8d738..0000000000 --- a/ext/async/sqlite3async.c +++ /dev/null @@ -1,1706 +0,0 @@ -/* -** 2005 December 14 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** -** $Id: sqlite3async.c,v 1.7 2009/07/18 11:52:04 danielk1977 Exp $ -** -** This file contains the implementation of an asynchronous IO backend -** for SQLite. -*/ - -#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_ASYNCIO) - -#include "sqlite3async.h" -#include "sqlite3.h" -#include -#include -#include - -/* Useful macros used in several places */ -#define MIN(x,y) ((x)<(y)?(x):(y)) -#define MAX(x,y) ((x)>(y)?(x):(y)) - -#ifndef SQLITE_AMALGAMATION -/* Macro to mark parameters as unused and silence compiler warnings. */ -#define UNUSED_PARAMETER(x) (void)(x) -#endif - -/* Forward references */ -typedef struct AsyncWrite AsyncWrite; -typedef struct AsyncFile AsyncFile; -typedef struct AsyncFileData AsyncFileData; -typedef struct AsyncFileLock AsyncFileLock; -typedef struct AsyncLock AsyncLock; - -/* Enable for debugging */ -#ifndef NDEBUG -#include -static int sqlite3async_trace = 0; -# define ASYNC_TRACE(X) if( sqlite3async_trace ) asyncTrace X -static void asyncTrace(const char *zFormat, ...){ - char *z; - va_list ap; - va_start(ap, zFormat); - z = sqlite3_vmprintf(zFormat, ap); - va_end(ap); - fprintf(stderr, "[%d] %s", 0 /* (int)pthread_self() */, z); - sqlite3_free(z); -} -#else -# define ASYNC_TRACE(X) -#endif - -/* -** THREAD SAFETY NOTES -** -** Basic rules: -** -** * Both read and write access to the global write-op queue must be -** protected by the async.queueMutex. As are the async.ioError and -** async.nFile variables. -** -** * The async.pLock list and all AsyncLock and AsyncFileLock -** structures must be protected by the async.lockMutex mutex. -** -** * The file handles from the underlying system are not assumed to -** be thread safe. -** -** * See the last two paragraphs under "The Writer Thread" for -** an assumption to do with file-handle synchronization by the Os. -** -** Deadlock prevention: -** -** There are three mutex used by the system: the "writer" mutex, -** the "queue" mutex and the "lock" mutex. Rules are: -** -** * It is illegal to block on the writer mutex when any other mutex -** are held, and -** -** * It is illegal to block on the queue mutex when the lock mutex -** is held. -** -** i.e. mutex's must be grabbed in the order "writer", "queue", "lock". -** -** File system operations (invoked by SQLite thread): -** -** xOpen -** xDelete -** xFileExists -** -** File handle operations (invoked by SQLite thread): -** -** asyncWrite, asyncClose, asyncTruncate, asyncSync -** -** The operations above add an entry to the global write-op list. They -** prepare the entry, acquire the async.queueMutex momentarily while -** list pointers are manipulated to insert the new entry, then release -** the mutex and signal the writer thread to wake up in case it happens -** to be asleep. -** -** -** asyncRead, asyncFileSize. -** -** Read operations. Both of these read from both the underlying file -** first then adjust their result based on pending writes in the -** write-op queue. So async.queueMutex is held for the duration -** of these operations to prevent other threads from changing the -** queue in mid operation. -** -** -** asyncLock, asyncUnlock, asyncCheckReservedLock -** -** These primitives implement in-process locking using a hash table -** on the file name. Files are locked correctly for connections coming -** from the same process. But other processes cannot see these locks -** and will therefore not honor them. -** -** -** The writer thread: -** -** The async.writerMutex is used to make sure only there is only -** a single writer thread running at a time. -** -** Inside the writer thread is a loop that works like this: -** -** WHILE (write-op list is not empty) -** Do IO operation at head of write-op list -** Remove entry from head of write-op list -** END WHILE -** -** The async.queueMutex is always held during the test, and when the entry is removed from the head -** of the write-op list. Sometimes it is held for the interim -** period (while the IO is performed), and sometimes it is -** relinquished. It is relinquished if (a) the IO op is an -** ASYNC_CLOSE or (b) when the file handle was opened, two of -** the underlying systems handles were opened on the same -** file-system entry. -** -** If condition (b) above is true, then one file-handle -** (AsyncFile.pBaseRead) is used exclusively by sqlite threads to read the -** file, the other (AsyncFile.pBaseWrite) by sqlite3_async_flush() -** threads to perform write() operations. This means that read -** operations are not blocked by asynchronous writes (although -** asynchronous writes may still be blocked by reads). -** -** This assumes that the OS keeps two handles open on the same file -** properly in sync. That is, any read operation that starts after a -** write operation on the same file system entry has completed returns -** data consistent with the write. We also assume that if one thread -** reads a file while another is writing it all bytes other than the -** ones actually being written contain valid data. -** -** If the above assumptions are not true, set the preprocessor symbol -** SQLITE_ASYNC_TWO_FILEHANDLES to 0. -*/ - - -#ifndef NDEBUG -# define TESTONLY( X ) X -#else -# define TESTONLY( X ) -#endif - -/* -** PORTING FUNCTIONS -** -** There are two definitions of the following functions. One for pthreads -** compatible systems and one for Win32. These functions isolate the OS -** specific code required by each platform. -** -** The system uses three mutexes and a single condition variable. To -** block on a mutex, async_mutex_enter() is called. The parameter passed -** to async_mutex_enter(), which must be one of ASYNC_MUTEX_LOCK, -** ASYNC_MUTEX_QUEUE or ASYNC_MUTEX_WRITER, identifies which of the three -** mutexes to lock. Similarly, to unlock a mutex, async_mutex_leave() is -** called with a parameter identifying the mutex being unlocked. Mutexes -** are not recursive - it is an error to call async_mutex_enter() to -** lock a mutex that is already locked, or to call async_mutex_leave() -** to unlock a mutex that is not currently locked. -** -** The async_cond_wait() and async_cond_signal() functions are modelled -** on the pthreads functions with similar names. The first parameter to -** both functions is always ASYNC_COND_QUEUE. When async_cond_wait() -** is called the mutex identified by the second parameter must be held. -** The mutex is unlocked, and the calling thread simultaneously begins -** waiting for the condition variable to be signalled by another thread. -** After another thread signals the condition variable, the calling -** thread stops waiting, locks mutex eMutex and returns. The -** async_cond_signal() function is used to signal the condition variable. -** It is assumed that the mutex used by the thread calling async_cond_wait() -** is held by the caller of async_cond_signal() (otherwise there would be -** a race condition). -** -** It is guaranteed that no other thread will call async_cond_wait() when -** there is already a thread waiting on the condition variable. -** -** The async_sched_yield() function is called to suggest to the operating -** system that it would be a good time to shift the current thread off the -** CPU. The system will still work if this function is not implemented -** (it is not currently implemented for win32), but it might be marginally -** more efficient if it is. -*/ -static void async_mutex_enter(int eMutex); -static void async_mutex_leave(int eMutex); -static void async_cond_wait(int eCond, int eMutex); -static void async_cond_signal(int eCond); -static void async_sched_yield(void); - -/* -** There are also two definitions of the following. async_os_initialize() -** is called when the asynchronous VFS is first installed, and os_shutdown() -** is called when it is uninstalled (from within sqlite3async_shutdown()). -** -** For pthreads builds, both of these functions are no-ops. For win32, -** they provide an opportunity to initialize and finalize the required -** mutex and condition variables. -** -** If async_os_initialize() returns other than zero, then the initialization -** fails and SQLITE_ERROR is returned to the user. -*/ -static int async_os_initialize(void); -static void async_os_shutdown(void); - -/* Values for use as the 'eMutex' argument of the above functions. The -** integer values assigned to these constants are important for assert() -** statements that verify that mutexes are locked in the correct order. -** Specifically, it is unsafe to try to lock mutex N while holding a lock -** on mutex M if (M<=N). -*/ -#define ASYNC_MUTEX_LOCK 0 -#define ASYNC_MUTEX_QUEUE 1 -#define ASYNC_MUTEX_WRITER 2 - -/* Values for use as the 'eCond' argument of the above functions. */ -#define ASYNC_COND_QUEUE 0 - -/************************************************************************* -** Start of OS specific code. -*/ -#if SQLITE_OS_WIN || defined(_WIN32) || defined(WIN32) || defined(__CYGWIN__) || defined(__MINGW32__) || defined(__BORLANDC__) - -#include - -/* The following block contains the win32 specific code. */ - -#define mutex_held(X) (GetCurrentThreadId()==primitives.aHolder[X]) - -static struct AsyncPrimitives { - int isInit; - DWORD aHolder[3]; - CRITICAL_SECTION aMutex[3]; - HANDLE aCond[1]; -} primitives = { 0 }; - -static int async_os_initialize(void){ - if( !primitives.isInit ){ - primitives.aCond[0] = CreateEvent(NULL, TRUE, FALSE, 0); - if( primitives.aCond[0]==NULL ){ - return 1; - } - InitializeCriticalSection(&primitives.aMutex[0]); - InitializeCriticalSection(&primitives.aMutex[1]); - InitializeCriticalSection(&primitives.aMutex[2]); - primitives.isInit = 1; - } - return 0; -} -static void async_os_shutdown(void){ - if( primitives.isInit ){ - DeleteCriticalSection(&primitives.aMutex[0]); - DeleteCriticalSection(&primitives.aMutex[1]); - DeleteCriticalSection(&primitives.aMutex[2]); - CloseHandle(primitives.aCond[0]); - primitives.isInit = 0; - } -} - -/* The following block contains the Win32 specific code. */ -static void async_mutex_enter(int eMutex){ - assert( eMutex==0 || eMutex==1 || eMutex==2 ); - assert( eMutex!=2 || (!mutex_held(0) && !mutex_held(1) && !mutex_held(2)) ); - assert( eMutex!=1 || (!mutex_held(0) && !mutex_held(1)) ); - assert( eMutex!=0 || (!mutex_held(0)) ); - EnterCriticalSection(&primitives.aMutex[eMutex]); - TESTONLY( primitives.aHolder[eMutex] = GetCurrentThreadId(); ) -} -static void async_mutex_leave(int eMutex){ - assert( eMutex==0 || eMutex==1 || eMutex==2 ); - assert( mutex_held(eMutex) ); - TESTONLY( primitives.aHolder[eMutex] = 0; ) - LeaveCriticalSection(&primitives.aMutex[eMutex]); -} -static void async_cond_wait(int eCond, int eMutex){ - ResetEvent(primitives.aCond[eCond]); - async_mutex_leave(eMutex); - WaitForSingleObject(primitives.aCond[eCond], INFINITE); - async_mutex_enter(eMutex); -} -static void async_cond_signal(int eCond){ - assert( mutex_held(ASYNC_MUTEX_QUEUE) ); - SetEvent(primitives.aCond[eCond]); -} -static void async_sched_yield(void){ - Sleep(0); -} -#else - -/* The following block contains the pthreads specific code. */ -#include -#include - -#define mutex_held(X) pthread_equal(primitives.aHolder[X], pthread_self()) - -static int async_os_initialize(void) {return 0;} -static void async_os_shutdown(void) {} - -static struct AsyncPrimitives { - pthread_mutex_t aMutex[3]; - pthread_cond_t aCond[1]; - pthread_t aHolder[3]; -} primitives = { - { PTHREAD_MUTEX_INITIALIZER, - PTHREAD_MUTEX_INITIALIZER, - PTHREAD_MUTEX_INITIALIZER - } , { - PTHREAD_COND_INITIALIZER - } , { 0, 0, 0 } -}; - -static void async_mutex_enter(int eMutex){ - assert( eMutex==0 || eMutex==1 || eMutex==2 ); - assert( eMutex!=2 || (!mutex_held(0) && !mutex_held(1) && !mutex_held(2)) ); - assert( eMutex!=1 || (!mutex_held(0) && !mutex_held(1)) ); - assert( eMutex!=0 || (!mutex_held(0)) ); - pthread_mutex_lock(&primitives.aMutex[eMutex]); - TESTONLY( primitives.aHolder[eMutex] = pthread_self(); ) -} -static void async_mutex_leave(int eMutex){ - assert( eMutex==0 || eMutex==1 || eMutex==2 ); - assert( mutex_held(eMutex) ); - TESTONLY( primitives.aHolder[eMutex] = 0; ) - pthread_mutex_unlock(&primitives.aMutex[eMutex]); -} -static void async_cond_wait(int eCond, int eMutex){ - assert( eMutex==0 || eMutex==1 || eMutex==2 ); - assert( mutex_held(eMutex) ); - TESTONLY( primitives.aHolder[eMutex] = 0; ) - pthread_cond_wait(&primitives.aCond[eCond], &primitives.aMutex[eMutex]); - TESTONLY( primitives.aHolder[eMutex] = pthread_self(); ) -} -static void async_cond_signal(int eCond){ - assert( mutex_held(ASYNC_MUTEX_QUEUE) ); - pthread_cond_signal(&primitives.aCond[eCond]); -} -static void async_sched_yield(void){ - sched_yield(); -} -#endif -/* -** End of OS specific code. -*************************************************************************/ - -#define assert_mutex_is_held(X) assert( mutex_held(X) ) - - -#ifndef SQLITE_ASYNC_TWO_FILEHANDLES -/* #define SQLITE_ASYNC_TWO_FILEHANDLES 0 */ -#define SQLITE_ASYNC_TWO_FILEHANDLES 1 -#endif - -/* -** State information is held in the static variable "async" defined -** as the following structure. -** -** Both async.ioError and async.nFile are protected by async.queueMutex. -*/ -static struct TestAsyncStaticData { - AsyncWrite *pQueueFirst; /* Next write operation to be processed */ - AsyncWrite *pQueueLast; /* Last write operation on the list */ - AsyncLock *pLock; /* Linked list of all AsyncLock structures */ - volatile int ioDelay; /* Extra delay between write operations */ - volatile int eHalt; /* One of the SQLITEASYNC_HALT_XXX values */ - volatile int bLockFiles; /* Current value of "lockfiles" parameter */ - int ioError; /* True if an IO error has occurred */ - int nFile; /* Number of open files (from sqlite pov) */ -} async = { 0,0,0,0,0,1,0,0 }; - -/* Possible values of AsyncWrite.op */ -#define ASYNC_NOOP 0 -#define ASYNC_WRITE 1 -#define ASYNC_SYNC 2 -#define ASYNC_TRUNCATE 3 -#define ASYNC_CLOSE 4 -#define ASYNC_DELETE 5 -#define ASYNC_OPENEXCLUSIVE 6 -#define ASYNC_UNLOCK 7 - -/* Names of opcodes. Used for debugging only. -** Make sure these stay in sync with the macros above! -*/ -static const char *azOpcodeName[] = { - "NOOP", "WRITE", "SYNC", "TRUNCATE", "CLOSE", "DELETE", "OPENEX", "UNLOCK" -}; - -/* -** Entries on the write-op queue are instances of the AsyncWrite -** structure, defined here. -** -** The interpretation of the iOffset and nByte variables varies depending -** on the value of AsyncWrite.op: -** -** ASYNC_NOOP: -** No values used. -** -** ASYNC_WRITE: -** iOffset -> Offset in file to write to. -** nByte -> Number of bytes of data to write (pointed to by zBuf). -** -** ASYNC_SYNC: -** nByte -> flags to pass to sqlite3OsSync(). -** -** ASYNC_TRUNCATE: -** iOffset -> Size to truncate file to. -** nByte -> Unused. -** -** ASYNC_CLOSE: -** iOffset -> Unused. -** nByte -> Unused. -** -** ASYNC_DELETE: -** iOffset -> Contains the "syncDir" flag. -** nByte -> Number of bytes of zBuf points to (file name). -** -** ASYNC_OPENEXCLUSIVE: -** iOffset -> Value of "delflag". -** nByte -> Number of bytes of zBuf points to (file name). -** -** ASYNC_UNLOCK: -** nByte -> Argument to sqlite3OsUnlock(). -** -** -** For an ASYNC_WRITE operation, zBuf points to the data to write to the file. -** This space is sqlite3_malloc()d along with the AsyncWrite structure in a -** single blob, so is deleted when sqlite3_free() is called on the parent -** structure. -*/ -struct AsyncWrite { - AsyncFileData *pFileData; /* File to write data to or sync */ - int op; /* One of ASYNC_xxx etc. */ - sqlite_int64 iOffset; /* See above */ - int nByte; /* See above */ - char *zBuf; /* Data to write to file (or NULL if op!=ASYNC_WRITE) */ - AsyncWrite *pNext; /* Next write operation (to any file) */ -}; - -/* -** An instance of this structure is created for each distinct open file -** (i.e. if two handles are opened on the one file, only one of these -** structures is allocated) and stored in the async.aLock hash table. The -** keys for async.aLock are the full pathnames of the opened files. -** -** AsyncLock.pList points to the head of a linked list of AsyncFileLock -** structures, one for each handle currently open on the file. -** -** If the opened file is not a main-database (the SQLITE_OPEN_MAIN_DB is -** not passed to the sqlite3OsOpen() call), or if async.bLockFiles is -** false, variables AsyncLock.pFile and AsyncLock.eLock are never used. -** Otherwise, pFile is a file handle opened on the file in question and -** used to obtain the file-system locks required by database connections -** within this process. -** -** See comments above the asyncLock() function for more details on -** the implementation of database locking used by this backend. -*/ -struct AsyncLock { - char *zFile; - int nFile; - sqlite3_file *pFile; - int eLock; - AsyncFileLock *pList; - AsyncLock *pNext; /* Next in linked list headed by async.pLock */ -}; - -/* -** An instance of the following structure is allocated along with each -** AsyncFileData structure (see AsyncFileData.lock), but is only used if the -** file was opened with the SQLITE_OPEN_MAIN_DB. -*/ -struct AsyncFileLock { - int eLock; /* Internally visible lock state (sqlite pov) */ - int eAsyncLock; /* Lock-state with write-queue unlock */ - AsyncFileLock *pNext; -}; - -/* -** The AsyncFile structure is a subclass of sqlite3_file used for -** asynchronous IO. -** -** All of the actual data for the structure is stored in the structure -** pointed to by AsyncFile.pData, which is allocated as part of the -** sqlite3OsOpen() using sqlite3_malloc(). The reason for this is that the -** lifetime of the AsyncFile structure is ended by the caller after OsClose() -** is called, but the data in AsyncFileData may be required by the -** writer thread after that point. -*/ -struct AsyncFile { - sqlite3_io_methods *pMethod; - AsyncFileData *pData; -}; -struct AsyncFileData { - char *zName; /* Underlying OS filename - used for debugging */ - int nName; /* Number of characters in zName */ - sqlite3_file *pBaseRead; /* Read handle to the underlying Os file */ - sqlite3_file *pBaseWrite; /* Write handle to the underlying Os file */ - AsyncFileLock lock; /* Lock state for this handle */ - AsyncLock *pLock; /* AsyncLock object for this file system entry */ - AsyncWrite closeOp; /* Preallocated close operation */ -}; - -/* -** Add an entry to the end of the global write-op list. pWrite should point -** to an AsyncWrite structure allocated using sqlite3_malloc(). The writer -** thread will call sqlite3_free() to free the structure after the specified -** operation has been completed. -** -** Once an AsyncWrite structure has been added to the list, it becomes the -** property of the writer thread and must not be read or modified by the -** caller. -*/ -static void addAsyncWrite(AsyncWrite *pWrite){ - /* We must hold the queue mutex in order to modify the queue pointers */ - if( pWrite->op!=ASYNC_UNLOCK ){ - async_mutex_enter(ASYNC_MUTEX_QUEUE); - } - - /* Add the record to the end of the write-op queue */ - assert( !pWrite->pNext ); - if( async.pQueueLast ){ - assert( async.pQueueFirst ); - async.pQueueLast->pNext = pWrite; - }else{ - async.pQueueFirst = pWrite; - } - async.pQueueLast = pWrite; - ASYNC_TRACE(("PUSH %p (%s %s %d)\n", pWrite, azOpcodeName[pWrite->op], - pWrite->pFileData ? pWrite->pFileData->zName : "-", pWrite->iOffset)); - - if( pWrite->op==ASYNC_CLOSE ){ - async.nFile--; - } - - /* The writer thread might have been idle because there was nothing - ** on the write-op queue for it to do. So wake it up. */ - async_cond_signal(ASYNC_COND_QUEUE); - - /* Drop the queue mutex */ - if( pWrite->op!=ASYNC_UNLOCK ){ - async_mutex_leave(ASYNC_MUTEX_QUEUE); - } -} - -/* -** Increment async.nFile in a thread-safe manner. -*/ -static void incrOpenFileCount(void){ - /* We must hold the queue mutex in order to modify async.nFile */ - async_mutex_enter(ASYNC_MUTEX_QUEUE); - if( async.nFile==0 ){ - async.ioError = SQLITE_OK; - } - async.nFile++; - async_mutex_leave(ASYNC_MUTEX_QUEUE); -} - -/* -** This is a utility function to allocate and populate a new AsyncWrite -** structure and insert it (via addAsyncWrite() ) into the global list. -*/ -static int addNewAsyncWrite( - AsyncFileData *pFileData, - int op, - sqlite3_int64 iOffset, - int nByte, - const char *zByte -){ - AsyncWrite *p; - if( op!=ASYNC_CLOSE && async.ioError ){ - return async.ioError; - } - p = sqlite3_malloc(sizeof(AsyncWrite) + (zByte?nByte:0)); - if( !p ){ - /* The upper layer does not expect operations like OsWrite() to - ** return SQLITE_NOMEM. This is partly because under normal conditions - ** SQLite is required to do rollback without calling malloc(). So - ** if malloc() fails here, treat it as an I/O error. The above - ** layer knows how to handle that. - */ - return SQLITE_IOERR; - } - p->op = op; - p->iOffset = iOffset; - p->nByte = nByte; - p->pFileData = pFileData; - p->pNext = 0; - if( zByte ){ - p->zBuf = (char *)&p[1]; - memcpy(p->zBuf, zByte, nByte); - }else{ - p->zBuf = 0; - } - addAsyncWrite(p); - return SQLITE_OK; -} - -/* -** Close the file. This just adds an entry to the write-op list, the file is -** not actually closed. -*/ -static int asyncClose(sqlite3_file *pFile){ - AsyncFileData *p = ((AsyncFile *)pFile)->pData; - - /* Unlock the file, if it is locked */ - async_mutex_enter(ASYNC_MUTEX_LOCK); - p->lock.eLock = 0; - async_mutex_leave(ASYNC_MUTEX_LOCK); - - addAsyncWrite(&p->closeOp); - return SQLITE_OK; -} - -/* -** Implementation of sqlite3OsWrite() for asynchronous files. Instead of -** writing to the underlying file, this function adds an entry to the end of -** the global AsyncWrite list. Either SQLITE_OK or SQLITE_NOMEM may be -** returned. -*/ -static int asyncWrite( - sqlite3_file *pFile, - const void *pBuf, - int amt, - sqlite3_int64 iOff -){ - AsyncFileData *p = ((AsyncFile *)pFile)->pData; - return addNewAsyncWrite(p, ASYNC_WRITE, iOff, amt, pBuf); -} - -/* -** Read data from the file. First we read from the filesystem, then adjust -** the contents of the buffer based on ASYNC_WRITE operations in the -** write-op queue. -** -** This method holds the mutex from start to finish. -*/ -static int asyncRead( - sqlite3_file *pFile, - void *zOut, - int iAmt, - sqlite3_int64 iOffset -){ - AsyncFileData *p = ((AsyncFile *)pFile)->pData; - int rc = SQLITE_OK; - sqlite3_int64 filesize = 0; - sqlite3_file *pBase = p->pBaseRead; - sqlite3_int64 iAmt64 = (sqlite3_int64)iAmt; - - /* Grab the write queue mutex for the duration of the call */ - async_mutex_enter(ASYNC_MUTEX_QUEUE); - - /* If an I/O error has previously occurred in this virtual file - ** system, then all subsequent operations fail. - */ - if( async.ioError!=SQLITE_OK ){ - rc = async.ioError; - goto asyncread_out; - } - - if( pBase->pMethods ){ - sqlite3_int64 nRead; - rc = pBase->pMethods->xFileSize(pBase, &filesize); - if( rc!=SQLITE_OK ){ - goto asyncread_out; - } - nRead = MIN(filesize - iOffset, iAmt64); - if( nRead>0 ){ - rc = pBase->pMethods->xRead(pBase, zOut, (int)nRead, iOffset); - ASYNC_TRACE(("READ %s %d bytes at %d\n", p->zName, nRead, iOffset)); - } - } - - if( rc==SQLITE_OK ){ - AsyncWrite *pWrite; - char *zName = p->zName; - - for(pWrite=async.pQueueFirst; pWrite; pWrite = pWrite->pNext){ - if( pWrite->op==ASYNC_WRITE && ( - (pWrite->pFileData==p) || - (zName && pWrite->pFileData->zName==zName) - )){ - sqlite3_int64 nCopy; - sqlite3_int64 nByte64 = (sqlite3_int64)pWrite->nByte; - - /* Set variable iBeginIn to the offset in buffer pWrite->zBuf[] from - ** which data should be copied. Set iBeginOut to the offset within - ** the output buffer to which data should be copied. If either of - ** these offsets is a negative number, set them to 0. - */ - sqlite3_int64 iBeginOut = (pWrite->iOffset-iOffset); - sqlite3_int64 iBeginIn = -iBeginOut; - if( iBeginIn<0 ) iBeginIn = 0; - if( iBeginOut<0 ) iBeginOut = 0; - - filesize = MAX(filesize, pWrite->iOffset+nByte64); - - nCopy = MIN(nByte64-iBeginIn, iAmt64-iBeginOut); - if( nCopy>0 ){ - memcpy(&((char *)zOut)[iBeginOut], &pWrite->zBuf[iBeginIn], (size_t)nCopy); - ASYNC_TRACE(("OVERREAD %d bytes at %d\n", nCopy, iBeginOut+iOffset)); - } - } - } - } - -asyncread_out: - async_mutex_leave(ASYNC_MUTEX_QUEUE); - if( rc==SQLITE_OK && filesize<(iOffset+iAmt) ){ - rc = SQLITE_IOERR_SHORT_READ; - } - return rc; -} - -/* -** Truncate the file to nByte bytes in length. This just adds an entry to -** the write-op list, no IO actually takes place. -*/ -static int asyncTruncate(sqlite3_file *pFile, sqlite3_int64 nByte){ - AsyncFileData *p = ((AsyncFile *)pFile)->pData; - return addNewAsyncWrite(p, ASYNC_TRUNCATE, nByte, 0, 0); -} - -/* -** Sync the file. This just adds an entry to the write-op list, the -** sync() is done later by sqlite3_async_flush(). -*/ -static int asyncSync(sqlite3_file *pFile, int flags){ - AsyncFileData *p = ((AsyncFile *)pFile)->pData; - return addNewAsyncWrite(p, ASYNC_SYNC, 0, flags, 0); -} - -/* -** Read the size of the file. First we read the size of the file system -** entry, then adjust for any ASYNC_WRITE or ASYNC_TRUNCATE operations -** currently in the write-op list. -** -** This method holds the mutex from start to finish. -*/ -int asyncFileSize(sqlite3_file *pFile, sqlite3_int64 *piSize){ - AsyncFileData *p = ((AsyncFile *)pFile)->pData; - int rc = SQLITE_OK; - sqlite3_int64 s = 0; - sqlite3_file *pBase; - - async_mutex_enter(ASYNC_MUTEX_QUEUE); - - /* Read the filesystem size from the base file. If pMethods is NULL, this - ** means the file hasn't been opened yet. In this case all relevant data - ** must be in the write-op queue anyway, so we can omit reading from the - ** file-system. - */ - pBase = p->pBaseRead; - if( pBase->pMethods ){ - rc = pBase->pMethods->xFileSize(pBase, &s); - } - - if( rc==SQLITE_OK ){ - AsyncWrite *pWrite; - for(pWrite=async.pQueueFirst; pWrite; pWrite = pWrite->pNext){ - if( pWrite->op==ASYNC_DELETE - && p->zName - && strcmp(p->zName, pWrite->zBuf)==0 - ){ - s = 0; - }else if( pWrite->pFileData && ( - (pWrite->pFileData==p) - || (p->zName && pWrite->pFileData->zName==p->zName) - )){ - switch( pWrite->op ){ - case ASYNC_WRITE: - s = MAX(pWrite->iOffset + (sqlite3_int64)(pWrite->nByte), s); - break; - case ASYNC_TRUNCATE: - s = MIN(s, pWrite->iOffset); - break; - } - } - } - *piSize = s; - } - async_mutex_leave(ASYNC_MUTEX_QUEUE); - return rc; -} - -/* -** Lock or unlock the actual file-system entry. -*/ -static int getFileLock(AsyncLock *pLock){ - int rc = SQLITE_OK; - AsyncFileLock *pIter; - int eRequired = 0; - - if( pLock->pFile ){ - for(pIter=pLock->pList; pIter; pIter=pIter->pNext){ - assert(pIter->eAsyncLock>=pIter->eLock); - if( pIter->eAsyncLock>eRequired ){ - eRequired = pIter->eAsyncLock; - assert(eRequired>=0 && eRequired<=SQLITE_LOCK_EXCLUSIVE); - } - } - - if( eRequired>pLock->eLock ){ - rc = pLock->pFile->pMethods->xLock(pLock->pFile, eRequired); - if( rc==SQLITE_OK ){ - pLock->eLock = eRequired; - } - } - else if( eRequiredeLock && eRequired<=SQLITE_LOCK_SHARED ){ - rc = pLock->pFile->pMethods->xUnlock(pLock->pFile, eRequired); - if( rc==SQLITE_OK ){ - pLock->eLock = eRequired; - } - } - } - - return rc; -} - -/* -** Return the AsyncLock structure from the global async.pLock list -** associated with the file-system entry identified by path zName -** (a string of nName bytes). If no such structure exists, return 0. -*/ -static AsyncLock *findLock(const char *zName, int nName){ - AsyncLock *p = async.pLock; - while( p && (p->nFile!=nName || memcmp(p->zFile, zName, nName)) ){ - p = p->pNext; - } - return p; -} - -/* -** The following two methods - asyncLock() and asyncUnlock() - are used -** to obtain and release locks on database files opened with the -** asynchronous backend. -*/ -static int asyncLock(sqlite3_file *pFile, int eLock){ - int rc = SQLITE_OK; - AsyncFileData *p = ((AsyncFile *)pFile)->pData; - - if( p->zName ){ - async_mutex_enter(ASYNC_MUTEX_LOCK); - if( p->lock.eLockpLock; - AsyncFileLock *pIter; - assert(pLock && pLock->pList); - for(pIter=pLock->pList; pIter; pIter=pIter->pNext){ - if( pIter!=&p->lock && ( - (eLock==SQLITE_LOCK_EXCLUSIVE && pIter->eLock>=SQLITE_LOCK_SHARED) || - (eLock==SQLITE_LOCK_PENDING && pIter->eLock>=SQLITE_LOCK_RESERVED) || - (eLock==SQLITE_LOCK_RESERVED && pIter->eLock>=SQLITE_LOCK_RESERVED) || - (eLock==SQLITE_LOCK_SHARED && pIter->eLock>=SQLITE_LOCK_PENDING) - )){ - rc = SQLITE_BUSY; - } - } - if( rc==SQLITE_OK ){ - p->lock.eLock = eLock; - p->lock.eAsyncLock = MAX(p->lock.eAsyncLock, eLock); - } - assert(p->lock.eAsyncLock>=p->lock.eLock); - if( rc==SQLITE_OK ){ - rc = getFileLock(pLock); - } - } - async_mutex_leave(ASYNC_MUTEX_LOCK); - } - - ASYNC_TRACE(("LOCK %d (%s) rc=%d\n", eLock, p->zName, rc)); - return rc; -} -static int asyncUnlock(sqlite3_file *pFile, int eLock){ - int rc = SQLITE_OK; - AsyncFileData *p = ((AsyncFile *)pFile)->pData; - if( p->zName ){ - AsyncFileLock *pLock = &p->lock; - async_mutex_enter(ASYNC_MUTEX_QUEUE); - async_mutex_enter(ASYNC_MUTEX_LOCK); - pLock->eLock = MIN(pLock->eLock, eLock); - rc = addNewAsyncWrite(p, ASYNC_UNLOCK, 0, eLock, 0); - async_mutex_leave(ASYNC_MUTEX_LOCK); - async_mutex_leave(ASYNC_MUTEX_QUEUE); - } - return rc; -} - -/* -** This function is called when the pager layer first opens a database file -** and is checking for a hot-journal. -*/ -static int asyncCheckReservedLock(sqlite3_file *pFile, int *pResOut){ - int ret = 0; - AsyncFileLock *pIter; - AsyncFileData *p = ((AsyncFile *)pFile)->pData; - - async_mutex_enter(ASYNC_MUTEX_LOCK); - for(pIter=p->pLock->pList; pIter; pIter=pIter->pNext){ - if( pIter->eLock>=SQLITE_LOCK_RESERVED ){ - ret = 1; - break; - } - } - async_mutex_leave(ASYNC_MUTEX_LOCK); - - ASYNC_TRACE(("CHECK-LOCK %d (%s)\n", ret, p->zName)); - *pResOut = ret; - return SQLITE_OK; -} - -/* -** sqlite3_file_control() implementation. -*/ -static int asyncFileControl(sqlite3_file *id, int op, void *pArg){ - switch( op ){ - case SQLITE_FCNTL_LOCKSTATE: { - async_mutex_enter(ASYNC_MUTEX_LOCK); - *(int*)pArg = ((AsyncFile*)id)->pData->lock.eLock; - async_mutex_leave(ASYNC_MUTEX_LOCK); - return SQLITE_OK; - } - } - return SQLITE_NOTFOUND; -} - -/* -** Return the device characteristics and sector-size of the device. It -** is tricky to implement these correctly, as this backend might -** not have an open file handle at this point. -*/ -static int asyncSectorSize(sqlite3_file *pFile){ - UNUSED_PARAMETER(pFile); - return 512; -} -static int asyncDeviceCharacteristics(sqlite3_file *pFile){ - UNUSED_PARAMETER(pFile); - return 0; -} - -static int unlinkAsyncFile(AsyncFileData *pData){ - AsyncFileLock **ppIter; - int rc = SQLITE_OK; - - if( pData->zName ){ - AsyncLock *pLock = pData->pLock; - for(ppIter=&pLock->pList; *ppIter; ppIter=&((*ppIter)->pNext)){ - if( (*ppIter)==&pData->lock ){ - *ppIter = pData->lock.pNext; - break; - } - } - if( !pLock->pList ){ - AsyncLock **pp; - if( pLock->pFile ){ - pLock->pFile->pMethods->xClose(pLock->pFile); - } - for(pp=&async.pLock; *pp!=pLock; pp=&((*pp)->pNext)); - *pp = pLock->pNext; - sqlite3_free(pLock); - }else{ - rc = getFileLock(pLock); - } - } - - return rc; -} - -/* -** The parameter passed to this function is a copy of a 'flags' parameter -** passed to this modules xOpen() method. This function returns true -** if the file should be opened asynchronously, or false if it should -** be opened immediately. -** -** If the file is to be opened asynchronously, then asyncOpen() will add -** an entry to the event queue and the file will not actually be opened -** until the event is processed. Otherwise, the file is opened directly -** by the caller. -*/ -static int doAsynchronousOpen(int flags){ - return (flags&SQLITE_OPEN_CREATE) && ( - (flags&SQLITE_OPEN_MAIN_JOURNAL) || - (flags&SQLITE_OPEN_TEMP_JOURNAL) || - (flags&SQLITE_OPEN_DELETEONCLOSE) - ); -} - -/* -** Open a file. -*/ -static int asyncOpen( - sqlite3_vfs *pAsyncVfs, - const char *zName, - sqlite3_file *pFile, - int flags, - int *pOutFlags -){ - static sqlite3_io_methods async_methods = { - 1, /* iVersion */ - asyncClose, /* xClose */ - asyncRead, /* xRead */ - asyncWrite, /* xWrite */ - asyncTruncate, /* xTruncate */ - asyncSync, /* xSync */ - asyncFileSize, /* xFileSize */ - asyncLock, /* xLock */ - asyncUnlock, /* xUnlock */ - asyncCheckReservedLock, /* xCheckReservedLock */ - asyncFileControl, /* xFileControl */ - asyncSectorSize, /* xSectorSize */ - asyncDeviceCharacteristics /* xDeviceCharacteristics */ - }; - - sqlite3_vfs *pVfs = (sqlite3_vfs *)pAsyncVfs->pAppData; - AsyncFile *p = (AsyncFile *)pFile; - int nName = 0; - int rc = SQLITE_OK; - int nByte; - AsyncFileData *pData; - AsyncLock *pLock = 0; - char *z; - int isAsyncOpen = doAsynchronousOpen(flags); - - /* If zName is NULL, then the upper layer is requesting an anonymous file. - ** Otherwise, allocate enough space to make a copy of the file name (along - ** with the second nul-terminator byte required by xOpen). - */ - if( zName ){ - nName = (int)strlen(zName); - } - - nByte = ( - sizeof(AsyncFileData) + /* AsyncFileData structure */ - 2 * pVfs->szOsFile + /* AsyncFileData.pBaseRead and pBaseWrite */ - nName + 2 /* AsyncFileData.zName */ - ); - z = sqlite3_malloc(nByte); - if( !z ){ - return SQLITE_NOMEM; - } - memset(z, 0, nByte); - pData = (AsyncFileData*)z; - z += sizeof(pData[0]); - pData->pBaseRead = (sqlite3_file*)z; - z += pVfs->szOsFile; - pData->pBaseWrite = (sqlite3_file*)z; - pData->closeOp.pFileData = pData; - pData->closeOp.op = ASYNC_CLOSE; - - if( zName ){ - z += pVfs->szOsFile; - pData->zName = z; - pData->nName = nName; - memcpy(pData->zName, zName, nName); - } - - if( !isAsyncOpen ){ - int flagsout; - rc = pVfs->xOpen(pVfs, pData->zName, pData->pBaseRead, flags, &flagsout); - if( rc==SQLITE_OK - && (flagsout&SQLITE_OPEN_READWRITE) - && (flags&SQLITE_OPEN_EXCLUSIVE)==0 - ){ - rc = pVfs->xOpen(pVfs, pData->zName, pData->pBaseWrite, flags, 0); - } - if( pOutFlags ){ - *pOutFlags = flagsout; - } - } - - async_mutex_enter(ASYNC_MUTEX_LOCK); - - if( zName && rc==SQLITE_OK ){ - pLock = findLock(pData->zName, pData->nName); - if( !pLock ){ - int nByte = pVfs->szOsFile + sizeof(AsyncLock) + pData->nName + 1; - pLock = (AsyncLock *)sqlite3_malloc(nByte); - if( pLock ){ - memset(pLock, 0, nByte); - if( async.bLockFiles && (flags&SQLITE_OPEN_MAIN_DB) ){ - pLock->pFile = (sqlite3_file *)&pLock[1]; - rc = pVfs->xOpen(pVfs, pData->zName, pLock->pFile, flags, 0); - if( rc!=SQLITE_OK ){ - sqlite3_free(pLock); - pLock = 0; - } - } - if( pLock ){ - pLock->nFile = pData->nName; - pLock->zFile = &((char *)(&pLock[1]))[pVfs->szOsFile]; - memcpy(pLock->zFile, pData->zName, pLock->nFile); - pLock->pNext = async.pLock; - async.pLock = pLock; - } - }else{ - rc = SQLITE_NOMEM; - } - } - } - - if( rc==SQLITE_OK ){ - p->pMethod = &async_methods; - p->pData = pData; - - /* Link AsyncFileData.lock into the linked list of - ** AsyncFileLock structures for this file. - */ - if( zName ){ - pData->lock.pNext = pLock->pList; - pLock->pList = &pData->lock; - pData->zName = pLock->zFile; - } - }else{ - if( pData->pBaseRead->pMethods ){ - pData->pBaseRead->pMethods->xClose(pData->pBaseRead); - } - if( pData->pBaseWrite->pMethods ){ - pData->pBaseWrite->pMethods->xClose(pData->pBaseWrite); - } - sqlite3_free(pData); - } - - async_mutex_leave(ASYNC_MUTEX_LOCK); - - if( rc==SQLITE_OK ){ - pData->pLock = pLock; - } - - if( rc==SQLITE_OK && isAsyncOpen ){ - rc = addNewAsyncWrite(pData, ASYNC_OPENEXCLUSIVE, (sqlite3_int64)flags,0,0); - if( rc==SQLITE_OK ){ - if( pOutFlags ) *pOutFlags = flags; - }else{ - async_mutex_enter(ASYNC_MUTEX_LOCK); - unlinkAsyncFile(pData); - async_mutex_leave(ASYNC_MUTEX_LOCK); - sqlite3_free(pData); - } - } - if( rc!=SQLITE_OK ){ - p->pMethod = 0; - }else{ - incrOpenFileCount(); - } - - return rc; -} - -/* -** Implementation of sqlite3OsDelete. Add an entry to the end of the -** write-op queue to perform the delete. -*/ -static int asyncDelete(sqlite3_vfs *pAsyncVfs, const char *z, int syncDir){ - UNUSED_PARAMETER(pAsyncVfs); - return addNewAsyncWrite(0, ASYNC_DELETE, syncDir, (int)strlen(z)+1, z); -} - -/* -** Implementation of sqlite3OsAccess. This method holds the mutex from -** start to finish. -*/ -static int asyncAccess( - sqlite3_vfs *pAsyncVfs, - const char *zName, - int flags, - int *pResOut -){ - int rc; - int ret; - AsyncWrite *p; - sqlite3_vfs *pVfs = (sqlite3_vfs *)pAsyncVfs->pAppData; - - assert(flags==SQLITE_ACCESS_READWRITE - || flags==SQLITE_ACCESS_READ - || flags==SQLITE_ACCESS_EXISTS - ); - - async_mutex_enter(ASYNC_MUTEX_QUEUE); - rc = pVfs->xAccess(pVfs, zName, flags, &ret); - if( rc==SQLITE_OK && flags==SQLITE_ACCESS_EXISTS ){ - for(p=async.pQueueFirst; p; p = p->pNext){ - if( p->op==ASYNC_DELETE && 0==strcmp(p->zBuf, zName) ){ - ret = 0; - }else if( p->op==ASYNC_OPENEXCLUSIVE - && p->pFileData->zName - && 0==strcmp(p->pFileData->zName, zName) - ){ - ret = 1; - } - } - } - ASYNC_TRACE(("ACCESS(%s): %s = %d\n", - flags==SQLITE_ACCESS_READWRITE?"read-write": - flags==SQLITE_ACCESS_READ?"read":"exists" - , zName, ret) - ); - async_mutex_leave(ASYNC_MUTEX_QUEUE); - *pResOut = ret; - return rc; -} - -/* -** Fill in zPathOut with the full path to the file identified by zPath. -*/ -static int asyncFullPathname( - sqlite3_vfs *pAsyncVfs, - const char *zPath, - int nPathOut, - char *zPathOut -){ - int rc; - sqlite3_vfs *pVfs = (sqlite3_vfs *)pAsyncVfs->pAppData; - rc = pVfs->xFullPathname(pVfs, zPath, nPathOut, zPathOut); - - /* Because of the way intra-process file locking works, this backend - ** needs to return a canonical path. The following block assumes the - ** file-system uses unix style paths. - */ - if( rc==SQLITE_OK ){ - int i, j; - char *z = zPathOut; - int n = (int)strlen(z); - while( n>1 && z[n-1]=='/' ){ n--; } - for(i=j=0; i0 && z[j-1]!='/' ){ j--; } - if( j>0 ){ j--; } - i += 2; - continue; - } - } - z[j++] = z[i]; - } - z[j] = 0; - } - - return rc; -} -static void *asyncDlOpen(sqlite3_vfs *pAsyncVfs, const char *zPath){ - sqlite3_vfs *pVfs = (sqlite3_vfs *)pAsyncVfs->pAppData; - return pVfs->xDlOpen(pVfs, zPath); -} -static void asyncDlError(sqlite3_vfs *pAsyncVfs, int nByte, char *zErrMsg){ - sqlite3_vfs *pVfs = (sqlite3_vfs *)pAsyncVfs->pAppData; - pVfs->xDlError(pVfs, nByte, zErrMsg); -} -static void (*asyncDlSym( - sqlite3_vfs *pAsyncVfs, - void *pHandle, - const char *zSymbol -))(void){ - sqlite3_vfs *pVfs = (sqlite3_vfs *)pAsyncVfs->pAppData; - return pVfs->xDlSym(pVfs, pHandle, zSymbol); -} -static void asyncDlClose(sqlite3_vfs *pAsyncVfs, void *pHandle){ - sqlite3_vfs *pVfs = (sqlite3_vfs *)pAsyncVfs->pAppData; - pVfs->xDlClose(pVfs, pHandle); -} -static int asyncRandomness(sqlite3_vfs *pAsyncVfs, int nByte, char *zBufOut){ - sqlite3_vfs *pVfs = (sqlite3_vfs *)pAsyncVfs->pAppData; - return pVfs->xRandomness(pVfs, nByte, zBufOut); -} -static int asyncSleep(sqlite3_vfs *pAsyncVfs, int nMicro){ - sqlite3_vfs *pVfs = (sqlite3_vfs *)pAsyncVfs->pAppData; - return pVfs->xSleep(pVfs, nMicro); -} -static int asyncCurrentTime(sqlite3_vfs *pAsyncVfs, double *pTimeOut){ - sqlite3_vfs *pVfs = (sqlite3_vfs *)pAsyncVfs->pAppData; - return pVfs->xCurrentTime(pVfs, pTimeOut); -} - -static sqlite3_vfs async_vfs = { - 1, /* iVersion */ - sizeof(AsyncFile), /* szOsFile */ - 0, /* mxPathname */ - 0, /* pNext */ - SQLITEASYNC_VFSNAME, /* zName */ - 0, /* pAppData */ - asyncOpen, /* xOpen */ - asyncDelete, /* xDelete */ - asyncAccess, /* xAccess */ - asyncFullPathname, /* xFullPathname */ - asyncDlOpen, /* xDlOpen */ - asyncDlError, /* xDlError */ - asyncDlSym, /* xDlSym */ - asyncDlClose, /* xDlClose */ - asyncRandomness, /* xDlError */ - asyncSleep, /* xDlSym */ - asyncCurrentTime /* xDlClose */ -}; - -/* -** This procedure runs in a separate thread, reading messages off of the -** write queue and processing them one by one. -** -** If async.writerHaltNow is true, then this procedure exits -** after processing a single message. -** -** If async.writerHaltWhenIdle is true, then this procedure exits when -** the write queue is empty. -** -** If both of the above variables are false, this procedure runs -** indefinately, waiting for operations to be added to the write queue -** and processing them in the order in which they arrive. -** -** An artifical delay of async.ioDelay milliseconds is inserted before -** each write operation in order to simulate the effect of a slow disk. -** -** Only one instance of this procedure may be running at a time. -*/ -static void asyncWriterThread(void){ - sqlite3_vfs *pVfs = (sqlite3_vfs *)(async_vfs.pAppData); - AsyncWrite *p = 0; - int rc = SQLITE_OK; - int holdingMutex = 0; - - async_mutex_enter(ASYNC_MUTEX_WRITER); - - while( async.eHalt!=SQLITEASYNC_HALT_NOW ){ - int doNotFree = 0; - sqlite3_file *pBase = 0; - - if( !holdingMutex ){ - async_mutex_enter(ASYNC_MUTEX_QUEUE); - } - while( (p = async.pQueueFirst)==0 ){ - if( async.eHalt!=SQLITEASYNC_HALT_NEVER ){ - async_mutex_leave(ASYNC_MUTEX_QUEUE); - break; - }else{ - ASYNC_TRACE(("IDLE\n")); - async_cond_wait(ASYNC_COND_QUEUE, ASYNC_MUTEX_QUEUE); - ASYNC_TRACE(("WAKEUP\n")); - } - } - if( p==0 ) break; - holdingMutex = 1; - - /* Right now this thread is holding the mutex on the write-op queue. - ** Variable 'p' points to the first entry in the write-op queue. In - ** the general case, we hold on to the mutex for the entire body of - ** the loop. - ** - ** However in the cases enumerated below, we relinquish the mutex, - ** perform the IO, and then re-request the mutex before removing 'p' from - ** the head of the write-op queue. The idea is to increase concurrency with - ** sqlite threads. - ** - ** * An ASYNC_CLOSE operation. - ** * An ASYNC_OPENEXCLUSIVE operation. For this one, we relinquish - ** the mutex, call the underlying xOpenExclusive() function, then - ** re-aquire the mutex before seting the AsyncFile.pBaseRead - ** variable. - ** * ASYNC_SYNC and ASYNC_WRITE operations, if - ** SQLITE_ASYNC_TWO_FILEHANDLES was set at compile time and two - ** file-handles are open for the particular file being "synced". - */ - if( async.ioError!=SQLITE_OK && p->op!=ASYNC_CLOSE ){ - p->op = ASYNC_NOOP; - } - if( p->pFileData ){ - pBase = p->pFileData->pBaseWrite; - if( - p->op==ASYNC_CLOSE || - p->op==ASYNC_OPENEXCLUSIVE || - (pBase->pMethods && (p->op==ASYNC_SYNC || p->op==ASYNC_WRITE) ) - ){ - async_mutex_leave(ASYNC_MUTEX_QUEUE); - holdingMutex = 0; - } - if( !pBase->pMethods ){ - pBase = p->pFileData->pBaseRead; - } - } - - switch( p->op ){ - case ASYNC_NOOP: - break; - - case ASYNC_WRITE: - assert( pBase ); - ASYNC_TRACE(("WRITE %s %d bytes at %d\n", - p->pFileData->zName, p->nByte, p->iOffset)); - rc = pBase->pMethods->xWrite(pBase, (void *)(p->zBuf), p->nByte, p->iOffset); - break; - - case ASYNC_SYNC: - assert( pBase ); - ASYNC_TRACE(("SYNC %s\n", p->pFileData->zName)); - rc = pBase->pMethods->xSync(pBase, p->nByte); - break; - - case ASYNC_TRUNCATE: - assert( pBase ); - ASYNC_TRACE(("TRUNCATE %s to %d bytes\n", - p->pFileData->zName, p->iOffset)); - rc = pBase->pMethods->xTruncate(pBase, p->iOffset); - break; - - case ASYNC_CLOSE: { - AsyncFileData *pData = p->pFileData; - ASYNC_TRACE(("CLOSE %s\n", p->pFileData->zName)); - if( pData->pBaseWrite->pMethods ){ - pData->pBaseWrite->pMethods->xClose(pData->pBaseWrite); - } - if( pData->pBaseRead->pMethods ){ - pData->pBaseRead->pMethods->xClose(pData->pBaseRead); - } - - /* Unlink AsyncFileData.lock from the linked list of AsyncFileLock - ** structures for this file. Obtain the async.lockMutex mutex - ** before doing so. - */ - async_mutex_enter(ASYNC_MUTEX_LOCK); - rc = unlinkAsyncFile(pData); - async_mutex_leave(ASYNC_MUTEX_LOCK); - - if( !holdingMutex ){ - async_mutex_enter(ASYNC_MUTEX_QUEUE); - holdingMutex = 1; - } - assert_mutex_is_held(ASYNC_MUTEX_QUEUE); - async.pQueueFirst = p->pNext; - sqlite3_free(pData); - doNotFree = 1; - break; - } - - case ASYNC_UNLOCK: { - AsyncWrite *pIter; - AsyncFileData *pData = p->pFileData; - int eLock = p->nByte; - - /* When a file is locked by SQLite using the async backend, it is - ** locked within the 'real' file-system synchronously. When it is - ** unlocked, an ASYNC_UNLOCK event is added to the write-queue to - ** unlock the file asynchronously. The design of the async backend - ** requires that the 'real' file-system file be locked from the - ** time that SQLite first locks it (and probably reads from it) - ** until all asynchronous write events that were scheduled before - ** SQLite unlocked the file have been processed. - ** - ** This is more complex if SQLite locks and unlocks the file multiple - ** times in quick succession. For example, if SQLite does: - ** - ** lock, write, unlock, lock, write, unlock - ** - ** Each "lock" operation locks the file immediately. Each "write" - ** and "unlock" operation adds an event to the event queue. If the - ** second "lock" operation is performed before the first "unlock" - ** operation has been processed asynchronously, then the first - ** "unlock" cannot be safely processed as is, since this would mean - ** the file was unlocked when the second "write" operation is - ** processed. To work around this, when processing an ASYNC_UNLOCK - ** operation, SQLite: - ** - ** 1) Unlocks the file to the minimum of the argument passed to - ** the xUnlock() call and the current lock from SQLite's point - ** of view, and - ** - ** 2) Only unlocks the file at all if this event is the last - ** ASYNC_UNLOCK event on this file in the write-queue. - */ - assert( holdingMutex==1 ); - assert( async.pQueueFirst==p ); - for(pIter=async.pQueueFirst->pNext; pIter; pIter=pIter->pNext){ - if( pIter->pFileData==pData && pIter->op==ASYNC_UNLOCK ) break; - } - if( !pIter ){ - async_mutex_enter(ASYNC_MUTEX_LOCK); - pData->lock.eAsyncLock = MIN( - pData->lock.eAsyncLock, MAX(pData->lock.eLock, eLock) - ); - assert(pData->lock.eAsyncLock>=pData->lock.eLock); - rc = getFileLock(pData->pLock); - async_mutex_leave(ASYNC_MUTEX_LOCK); - } - break; - } - - case ASYNC_DELETE: - ASYNC_TRACE(("DELETE %s\n", p->zBuf)); - rc = pVfs->xDelete(pVfs, p->zBuf, (int)p->iOffset); - if( rc==SQLITE_IOERR_DELETE_NOENT ) rc = SQLITE_OK; - break; - - case ASYNC_OPENEXCLUSIVE: { - int flags = (int)p->iOffset; - AsyncFileData *pData = p->pFileData; - ASYNC_TRACE(("OPEN %s flags=%d\n", p->zBuf, (int)p->iOffset)); - assert(pData->pBaseRead->pMethods==0 && pData->pBaseWrite->pMethods==0); - rc = pVfs->xOpen(pVfs, pData->zName, pData->pBaseRead, flags, 0); - assert( holdingMutex==0 ); - async_mutex_enter(ASYNC_MUTEX_QUEUE); - holdingMutex = 1; - break; - } - - default: assert(!"Illegal value for AsyncWrite.op"); - } - - /* If we didn't hang on to the mutex during the IO op, obtain it now - ** so that the AsyncWrite structure can be safely removed from the - ** global write-op queue. - */ - if( !holdingMutex ){ - async_mutex_enter(ASYNC_MUTEX_QUEUE); - holdingMutex = 1; - } - /* ASYNC_TRACE(("UNLINK %p\n", p)); */ - if( p==async.pQueueLast ){ - async.pQueueLast = 0; - } - if( !doNotFree ){ - assert_mutex_is_held(ASYNC_MUTEX_QUEUE); - async.pQueueFirst = p->pNext; - sqlite3_free(p); - } - assert( holdingMutex ); - - /* An IO error has occurred. We cannot report the error back to the - ** connection that requested the I/O since the error happened - ** asynchronously. The connection has already moved on. There - ** really is nobody to report the error to. - ** - ** The file for which the error occurred may have been a database or - ** journal file. Regardless, none of the currently queued operations - ** associated with the same database should now be performed. Nor should - ** any subsequently requested IO on either a database or journal file - ** handle for the same database be accepted until the main database - ** file handle has been closed and reopened. - ** - ** Furthermore, no further IO should be queued or performed on any file - ** handle associated with a database that may have been part of a - ** multi-file transaction that included the database associated with - ** the IO error (i.e. a database ATTACHed to the same handle at some - ** point in time). - */ - if( rc!=SQLITE_OK ){ - async.ioError = rc; - } - - if( async.ioError && !async.pQueueFirst ){ - async_mutex_enter(ASYNC_MUTEX_LOCK); - if( 0==async.pLock ){ - async.ioError = SQLITE_OK; - } - async_mutex_leave(ASYNC_MUTEX_LOCK); - } - - /* Drop the queue mutex before continuing to the next write operation - ** in order to give other threads a chance to work with the write queue. - */ - if( !async.pQueueFirst || !async.ioError ){ - async_mutex_leave(ASYNC_MUTEX_QUEUE); - holdingMutex = 0; - if( async.ioDelay>0 ){ - pVfs->xSleep(pVfs, async.ioDelay*1000); - }else{ - async_sched_yield(); - } - } - } - - async_mutex_leave(ASYNC_MUTEX_WRITER); - return; -} - -/* -** Install the asynchronous VFS. -*/ -int sqlite3async_initialize(const char *zParent, int isDefault){ - int rc = SQLITE_OK; - if( async_vfs.pAppData==0 ){ - sqlite3_vfs *pParent = sqlite3_vfs_find(zParent); - if( !pParent || async_os_initialize() ){ - rc = SQLITE_ERROR; - }else if( SQLITE_OK!=(rc = sqlite3_vfs_register(&async_vfs, isDefault)) ){ - async_os_shutdown(); - }else{ - async_vfs.pAppData = (void *)pParent; - async_vfs.mxPathname = ((sqlite3_vfs *)async_vfs.pAppData)->mxPathname; - } - } - return rc; -} - -/* -** Uninstall the asynchronous VFS. -*/ -void sqlite3async_shutdown(void){ - if( async_vfs.pAppData ){ - async_os_shutdown(); - sqlite3_vfs_unregister((sqlite3_vfs *)&async_vfs); - async_vfs.pAppData = 0; - } -} - -/* -** Process events on the write-queue. -*/ -void sqlite3async_run(void){ - asyncWriterThread(); -} - -/* -** Control/configure the asynchronous IO system. -*/ -int sqlite3async_control(int op, ...){ - int rc = SQLITE_OK; - va_list ap; - va_start(ap, op); - switch( op ){ - case SQLITEASYNC_HALT: { - int eWhen = va_arg(ap, int); - if( eWhen!=SQLITEASYNC_HALT_NEVER - && eWhen!=SQLITEASYNC_HALT_NOW - && eWhen!=SQLITEASYNC_HALT_IDLE - ){ - rc = SQLITE_MISUSE; - break; - } - async.eHalt = eWhen; - async_mutex_enter(ASYNC_MUTEX_QUEUE); - async_cond_signal(ASYNC_COND_QUEUE); - async_mutex_leave(ASYNC_MUTEX_QUEUE); - break; - } - - case SQLITEASYNC_DELAY: { - int iDelay = va_arg(ap, int); - if( iDelay<0 ){ - rc = SQLITE_MISUSE; - break; - } - async.ioDelay = iDelay; - break; - } - - case SQLITEASYNC_LOCKFILES: { - int bLock = va_arg(ap, int); - async_mutex_enter(ASYNC_MUTEX_QUEUE); - if( async.nFile || async.pQueueFirst ){ - async_mutex_leave(ASYNC_MUTEX_QUEUE); - rc = SQLITE_MISUSE; - break; - } - async.bLockFiles = bLock; - async_mutex_leave(ASYNC_MUTEX_QUEUE); - break; - } - - case SQLITEASYNC_GET_HALT: { - int *peWhen = va_arg(ap, int *); - *peWhen = async.eHalt; - break; - } - case SQLITEASYNC_GET_DELAY: { - int *piDelay = va_arg(ap, int *); - *piDelay = async.ioDelay; - break; - } - case SQLITEASYNC_GET_LOCKFILES: { - int *piDelay = va_arg(ap, int *); - *piDelay = async.bLockFiles; - break; - } - - default: - rc = SQLITE_ERROR; - break; - } - va_end(ap); - return rc; -} - -#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_ASYNCIO) */ diff --git a/ext/async/sqlite3async.h b/ext/async/sqlite3async.h deleted file mode 100644 index 13b23bc6a2..0000000000 --- a/ext/async/sqlite3async.h +++ /dev/null @@ -1,222 +0,0 @@ - -#ifndef __SQLITEASYNC_H_ -#define __SQLITEASYNC_H_ 1 - -/* -** Make sure we can call this stuff from C++. -*/ -#ifdef __cplusplus -extern "C" { -#endif - -#define SQLITEASYNC_VFSNAME "sqlite3async" - -/* -** THREAD SAFETY NOTES: -** -** Of the four API functions in this file, the following are not threadsafe: -** -** sqlite3async_initialize() -** sqlite3async_shutdown() -** -** Care must be taken that neither of these functions is called while -** another thread may be calling either any sqlite3async_XXX() function -** or an sqlite3_XXX() API function related to a database handle that -** is using the asynchronous IO VFS. -** -** These functions: -** -** sqlite3async_run() -** sqlite3async_control() -** -** are threadsafe. It is quite safe to call either of these functions even -** if another thread may also be calling one of them or an sqlite3_XXX() -** function related to a database handle that uses the asynchronous IO VFS. -*/ - -/* -** Initialize the asynchronous IO VFS and register it with SQLite using -** sqlite3_vfs_register(). If the asynchronous VFS is already initialized -** and registered, this function is a no-op. The asynchronous IO VFS -** is registered as "sqlite3async". -** -** The asynchronous IO VFS does not make operating system IO requests -** directly. Instead, it uses an existing VFS implementation for all -** required file-system operations. If the first parameter to this function -** is NULL, then the current default VFS is used for IO. If it is not -** NULL, then it must be the name of an existing VFS. In other words, the -** first argument to this function is passed to sqlite3_vfs_find() to -** locate the VFS to use for all real IO operations. This VFS is known -** as the "parent VFS". -** -** If the second parameter to this function is non-zero, then the -** asynchronous IO VFS is registered as the default VFS for all SQLite -** database connections within the process. Otherwise, the asynchronous IO -** VFS is only used by connections opened using sqlite3_open_v2() that -** specifically request VFS "sqlite3async". -** -** If a parent VFS cannot be located, then SQLITE_ERROR is returned. -** In the unlikely event that operating system specific initialization -** fails (win32 systems create the required critical section and event -** objects within this function), then SQLITE_ERROR is also returned. -** Finally, if the call to sqlite3_vfs_register() returns an error, then -** the error code is returned to the user by this function. In all three -** of these cases, intialization has failed and the asynchronous IO VFS -** is not registered with SQLite. -** -** Otherwise, if no error occurs, SQLITE_OK is returned. -*/ -int sqlite3async_initialize(const char *zParent, int isDefault); - -/* -** This function unregisters the asynchronous IO VFS using -** sqlite3_vfs_unregister(). -** -** On win32 platforms, this function also releases the small number of -** critical section and event objects created by sqlite3async_initialize(). -*/ -void sqlite3async_shutdown(void); - -/* -** This function may only be called when the asynchronous IO VFS is -** installed (after a call to sqlite3async_initialize()). It processes -** zero or more queued write operations before returning. It is expected -** (but not required) that this function will be called by a different -** thread than those threads that use SQLite. The "background thread" -** that performs IO. -** -** How many queued write operations are performed before returning -** depends on the global setting configured by passing the SQLITEASYNC_HALT -** verb to sqlite3async_control() (see below for details). By default -** this function never returns - it processes all pending operations and -** then blocks waiting for new ones. -** -** If multiple simultaneous calls are made to sqlite3async_run() from two -** or more threads, then the calls are serialized internally. -*/ -void sqlite3async_run(void); - -/* -** This function may only be called when the asynchronous IO VFS is -** installed (after a call to sqlite3async_initialize()). It is used -** to query or configure various parameters that affect the operation -** of the asynchronous IO VFS. At present there are three parameters -** supported: -** -** * The "halt" parameter, which configures the circumstances under -** which the sqlite3async_run() parameter is configured. -** -** * The "delay" parameter. Setting the delay parameter to a non-zero -** value causes the sqlite3async_run() function to sleep for the -** configured number of milliseconds between each queued write -** operation. -** -** * The "lockfiles" parameter. This parameter determines whether or -** not the asynchronous IO VFS locks the database files it operates -** on. Disabling file locking can improve throughput. -** -** This function is always passed two arguments. When setting the value -** of a parameter, the first argument must be one of SQLITEASYNC_HALT, -** SQLITEASYNC_DELAY or SQLITEASYNC_LOCKFILES. The second argument must -** be passed the new value for the parameter as type "int". -** -** When querying the current value of a paramter, the first argument must -** be one of SQLITEASYNC_GET_HALT, GET_DELAY or GET_LOCKFILES. The second -** argument to this function must be of type (int *). The current value -** of the queried parameter is copied to the memory pointed to by the -** second argument. For example: -** -** int eCurrentHalt; -** int eNewHalt = SQLITEASYNC_HALT_IDLE; -** -** sqlite3async_control(SQLITEASYNC_HALT, eNewHalt); -** sqlite3async_control(SQLITEASYNC_GET_HALT, &eCurrentHalt); -** assert( eNewHalt==eCurrentHalt ); -** -** See below for more detail on each configuration parameter. -** -** SQLITEASYNC_HALT: -** -** This is used to set the value of the "halt" parameter. The second -** argument must be one of the SQLITEASYNC_HALT_XXX symbols defined -** below (either NEVER, IDLE and NOW). -** -** If the parameter is set to NEVER, then calls to sqlite3async_run() -** never return. This is the default setting. If the parameter is set -** to IDLE, then calls to sqlite3async_run() return as soon as the -** queue of pending write operations is empty. If the parameter is set -** to NOW, then calls to sqlite3async_run() return as quickly as -** possible, without processing any pending write requests. -** -** If an attempt is made to set this parameter to an integer value other -** than SQLITEASYNC_HALT_NEVER, IDLE or NOW, then sqlite3async_control() -** returns SQLITE_MISUSE and the current value of the parameter is not -** modified. -** -** Modifying the "halt" parameter affects calls to sqlite3async_run() -** made by other threads that are currently in progress. -** -** SQLITEASYNC_DELAY: -** -** This is used to set the value of the "delay" parameter. If set to -** a non-zero value, then after completing a pending write request, the -** sqlite3async_run() function sleeps for the configured number of -** milliseconds. -** -** If an attempt is made to set this parameter to a negative value, -** sqlite3async_control() returns SQLITE_MISUSE and the current value -** of the parameter is not modified. -** -** Modifying the "delay" parameter affects calls to sqlite3async_run() -** made by other threads that are currently in progress. -** -** SQLITEASYNC_LOCKFILES: -** -** This is used to set the value of the "lockfiles" parameter. This -** parameter must be set to either 0 or 1. If set to 1, then the -** asynchronous IO VFS uses the xLock() and xUnlock() methods of the -** parent VFS to lock database files being read and/or written. If -** the parameter is set to 0, then these locks are omitted. -** -** This parameter may only be set when there are no open database -** connections using the VFS and the queue of pending write requests -** is empty. Attempting to set it when this is not true, or to set it -** to a value other than 0 or 1 causes sqlite3async_control() to return -** SQLITE_MISUSE and the value of the parameter to remain unchanged. -** -** If this parameter is set to zero, then it is only safe to access the -** database via the asynchronous IO VFS from within a single process. If -** while writing to the database via the asynchronous IO VFS the database -** is also read or written from within another process, or via another -** connection that does not use the asynchronous IO VFS within the same -** process, the results are undefined (and may include crashes or database -** corruption). -** -** Alternatively, if this parameter is set to 1, then it is safe to access -** the database from multiple connections within multiple processes using -** either the asynchronous IO VFS or the parent VFS directly. -*/ -int sqlite3async_control(int op, ...); - -/* -** Values that can be used as the first argument to sqlite3async_control(). -*/ -#define SQLITEASYNC_HALT 1 -#define SQLITEASYNC_GET_HALT 2 -#define SQLITEASYNC_DELAY 3 -#define SQLITEASYNC_GET_DELAY 4 -#define SQLITEASYNC_LOCKFILES 5 -#define SQLITEASYNC_GET_LOCKFILES 6 - -/* -** If the first argument to sqlite3async_control() is SQLITEASYNC_HALT, -** the second argument should be one of the following. -*/ -#define SQLITEASYNC_HALT_NEVER 0 /* Never halt (default value) */ -#define SQLITEASYNC_HALT_NOW 1 /* Halt as soon as possible */ -#define SQLITEASYNC_HALT_IDLE 2 /* Halt when write-queue is empty */ - -#ifdef __cplusplus -} /* End of the 'extern "C"' block */ -#endif -#endif /* ifndef __SQLITEASYNC_H_ */ diff --git a/ext/expert/expert1.test b/ext/expert/expert1.test index 73541122d8..0c3b512af0 100644 --- a/ext/expert/expert1.test +++ b/ext/expert/expert1.test @@ -8,6 +8,7 @@ # May you share freely, never taking more than you give. # #*********************************************************************** +# TESTRUNNER: shell # # The focus of this file is testing the CLI shell tool. Specifically, # the ".recommend" command. @@ -391,6 +392,29 @@ do_setup_rec_test $tn.18.1 { SEARCH SomeObject USING COVERING INDEX SomeObject_idx_00000078 (x=?) } + +do_setup_rec_test $tn.19.0 { + CREATE TABLE t1("index"); +} { + SELECT * FROM t1 ORDER BY "index"; +} { + CREATE INDEX t1_idx_01a7214e ON t1('index'); + SCAN t1 USING COVERING INDEX t1_idx_01a7214e +} + +ifcapable fts5 { + do_setup_rec_test $tn.20.0 { + CREATE VIRTUAL TABLE ft USING fts5(a); + CREATE TABLE t1(x, y); + } { + SELECT * FROM ft, t1 WHERE a=x + } { + CREATE INDEX t1_idx_00000078 ON t1(x); + SCAN ft VIRTUAL TABLE INDEX 0: + SEARCH t1 USING INDEX t1_idx_00000078 (x=?) + } +} + } proc do_candidates_test {tn sql res} { @@ -417,6 +441,8 @@ do_execsql_test 5.0 { WITH s(i) AS ( VALUES(1) UNION ALL SELECT i+1 FROM s WHERE i<100) INSERT INTO t2 SELECT (i-1)/20, (i-1)/5 FROM s; + + CREATE INDEX i1 ON t1( lower(a) ); } do_candidates_test 5.1 { SELECT * FROM t1,t2 WHERE (b=? OR a=?) AND (c=? OR d=?) @@ -446,6 +472,7 @@ do_execsql_test 5.3 { ANALYZE; SELECT * FROM sqlite_stat1 ORDER BY 1, 2; } { + t1 i1 {100 50} t1 t1_idx_00000061 {100 50} t1 t1_idx_00000062 {100 20} t1 t1_idx_000123a7 {100 50 17} @@ -454,4 +481,129 @@ do_execsql_test 5.3 { t2 t2_idx_0001295b {100 20 5} } +do_catchsql_test 5.4 { + SELECT sqlite_expert_rem(123, 123); +} {1 {no such function: sqlite_expert_rem}} +do_catchsql_test 5.5 { + SELECT sqlite_expert_sample(); +} {1 {no such function: sqlite_expert_sample}} + +if 0 { +do_test expert1-6.0 { + catchcmd :memory: { +.expert +select base64(''); +.expert +select name from pragma_collation_list order by name collate uint; +} +} {0 {(no new indexes) + +SCAN CONSTANT ROW + +(no new indexes) + +SCAN pragma_collation_list VIRTUAL TABLE INDEX 0: +USE TEMP B-TREE FOR ORDER BY +}} +} + +do_execsql_test 6.0 { + CREATE TABLE x1(a, b, c, d); + CREATE INDEX x1ab ON x1(a, lower(b)); + CREATE INDEX x1dcba ON x1(d, b+c, a); +} + +do_candidates_test 6.1 { + SELECT * FROM x1 WHERE b=? ORDER BY a; +} { + CREATE INDEX x1_idx_0001267f ON x1(b, a); + CREATE INDEX x1_idx_00000062 ON x1(b); +} + +#------------------------------------------------------------------------- +ifcapable fts5 { + reset_db + do_execsql_test 7.0 { + CREATE VIRTUAL TABLE ft USING fts5(a); + CREATE TABLE t1(x, y); + } + + do_candidates_test 7.1 { + SELECT * FROM ft, t1 WHERE a=x + } { + CREATE INDEX t1_idx_00000078 ON t1(x); + } + + register_tcl_module db + proc vtab_command {method args} { + global G + + switch -- $method { + xConnect { + return "CREATE TABLE t1(a, b, c);" + } + + xBestIndex { + return [list] + } + + xFilter { + return [list sql "SELECT rowid, * FROM t0"] + } + } + + return {} + } + + do_execsql_test 7.2 { + CREATE TABLE t0(a, b, c); + INSERT INTO t0 VALUES(1, 2, 3), (11, 22, 33); + CREATE VIRTUAL TABLE t2 USING tcl(vtab_command); + } + + do_execsql_test 7.3 { + SELECT * FROM t2 + } { + 1 2 3 + 11 22 33 + } + + do_candidates_test 7.4 { + SELECT * FROM ft, t1 WHERE a=x + } { + CREATE INDEX t1_idx_00000078 ON t1(x); + } + + do_test 7.5 { + set expert [sqlite3_expert_new db] + list [catch { $expert sql "SELECT * FROM ft, t2 WHERE b=1" } msg] $msg + } {1 {no such table: t2}} + $expert destroy + + reset_db + do_execsql_test 7.6 { + BEGIN TRANSACTION; + CREATE TABLE IF NOT EXISTS 'bfts_idx_data'(id INTEGER PRIMARY KEY, block BLOB); + CREATE TABLE IF NOT EXISTS 'fts_idx_data'(id INTEGER PRIMARY KEY, block BLOB); + INSERT INTO fts_idx_data VALUES(1,X''); + INSERT INTO fts_idx_data VALUES(10,X'00000000ff000001000000'); + CREATE TABLE IF NOT EXISTS 'fts_idx_idx'(segid, term, pgno, PRIMARY KEY(segid, term)) WITHOUT ROWID; + CREATE TABLE IF NOT EXISTS 'fts_idx_docsize'(id INTEGER PRIMARY KEY, sz BLOB, origin INTEGER); + CREATE TABLE IF NOT EXISTS 'fts_idx_config'(k PRIMARY KEY, v) WITHOUT ROWID; + INSERT INTO fts_idx_config VALUES('version',4); + PRAGMA writable_schema=ON; + INSERT INTO sqlite_schema(type,name,tbl_name,rootpage,sql)VALUES('table','fts_idx','fts_idx',0,'CREATE VIRTUAL TABLE fts_idx USING fts5(Title, Description, Channel, Tags, content='''', contentless_delete=1)'); + + CREATE TABLE f(x BLOB, y); + COMMIT; + PRAGMA writable_schema = RESET; + } + + do_candidates_test 7.4 { + SELECT * FROM fts_idx, f WHERE x = fts_idx.Channel + } { + CREATE INDEX f_idx_00000078 ON f(x); + } +} + finish_test diff --git a/ext/expert/sqlite3expert.c b/ext/expert/sqlite3expert.c index a5eb109b46..c430c3ae95 100644 --- a/ext/expert/sqlite3expert.c +++ b/ext/expert/sqlite3expert.c @@ -32,7 +32,7 @@ #endif /* !defined(SQLITE_AMALGAMATION) */ -#ifndef SQLITE_OMIT_VIRTUALTABLE +#ifndef SQLITE_OMIT_VIRTUALTABLE typedef sqlite3_int64 i64; typedef sqlite3_uint64 u64; @@ -172,11 +172,11 @@ struct sqlite3expert { ** Allocate and return nByte bytes of zeroed memory using sqlite3_malloc(). ** If the allocation fails, set *pRc to SQLITE_NOMEM and return NULL. */ -static void *idxMalloc(int *pRc, int nByte){ +static void *idxMalloc(int *pRc, i64 nByte){ void *pRet; assert( *pRc==SQLITE_OK ); assert( nByte>0 ); - pRet = sqlite3_malloc(nByte); + pRet = sqlite3_malloc64(nByte); if( pRet ){ memset(pRet, 0, nByte); }else{ @@ -243,7 +243,7 @@ static int idxHashAdd( return 1; } } - pEntry = idxMalloc(pRc, sizeof(IdxHashEntry) + nKey+1 + nVal+1); + pEntry = idxMalloc(pRc, sizeof(IdxHashEntry) + (i64)nKey+1 + (i64)nVal+1); if( pEntry ){ pEntry->zKey = (char*)&pEntry[1]; memcpy(pEntry->zKey, zKey, nKey); @@ -378,15 +378,15 @@ struct ExpertCsr { }; static char *expertDequote(const char *zIn){ - int n = STRLEN(zIn); - char *zRet = sqlite3_malloc(n); + i64 n = STRLEN(zIn); + char *zRet = sqlite3_malloc64(n); assert( zIn[0]=='\'' ); assert( zIn[n-1]=='\'' ); if( zRet ){ - int iOut = 0; - int iIn = 0; + i64 iOut = 0; + i64 iIn = 0; for(iIn=1; iIn<(n-1); iIn++){ if( zIn[iIn]=='\'' ){ assert( zIn[iIn+1]=='\'' ); @@ -626,7 +626,7 @@ static int expertFilter( pCsr->pData = 0; if( rc==SQLITE_OK ){ rc = idxPrintfPrepareStmt(pExpert->db, &pCsr->pData, &pVtab->base.zErrMsg, - "SELECT * FROM main.%Q WHERE sample()", pVtab->pTab->zName + "SELECT * FROM main.%Q WHERE sqlite_expert_sample()", pVtab->pTab->zName ); } @@ -662,6 +662,7 @@ static int idxRegisterVtab(sqlite3expert *p){ 0, /* xRelease */ 0, /* xRollbackTo */ 0, /* xShadowName */ + 0, /* xIntegrity */ }; return sqlite3_create_module(p->dbv, "expert", &expertModule, (void*)p); @@ -697,17 +698,25 @@ static int idxGetTableInfo( ){ sqlite3_stmt *p1 = 0; int nCol = 0; - int nTab = STRLEN(zTab); - int nByte = sizeof(IdxTable) + nTab + 1; + int nTab; + i64 nByte; IdxTable *pNew = 0; int rc, rc2; char *pCsr = 0; int nPk = 0; + *ppOut = 0; + if( zTab==0 ) return SQLITE_ERROR; + nTab = STRLEN(zTab); + nByte = sizeof(IdxTable) + nTab + 1; rc = idxPrintfPrepareStmt(db, &p1, pzErrmsg, "PRAGMA table_xinfo=%Q", zTab); while( rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(p1) ){ const char *zCol = (const char*)sqlite3_column_text(p1, 1); const char *zColSeq = 0; + if( zCol==0 ){ + rc = SQLITE_ERROR; + break; + } nByte += 1 + STRLEN(zCol); rc = sqlite3_table_column_metadata( db, "main", zTab, zCol, 0, &zColSeq, 0, 0, 0 @@ -734,7 +743,9 @@ static int idxGetTableInfo( while( rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(p1) ){ const char *zCol = (const char*)sqlite3_column_text(p1, 1); const char *zColSeq = 0; - int nCopy = STRLEN(zCol) + 1; + int nCopy; + if( zCol==0 ) continue; + nCopy = STRLEN(zCol) + 1; pNew->aCol[nCol].zName = pCsr; pNew->aCol[nCol].iPk = (sqlite3_column_int(p1, 5)==1 && nPk==1); memcpy(pCsr, zCol, nCopy); @@ -780,14 +791,14 @@ static char *idxAppendText(int *pRc, char *zIn, const char *zFmt, ...){ va_list ap; char *zAppend = 0; char *zRet = 0; - int nIn = zIn ? STRLEN(zIn) : 0; - int nAppend = 0; + i64 nIn = zIn ? STRLEN(zIn) : 0; + i64 nAppend = 0; va_start(ap, zFmt); if( *pRc==SQLITE_OK ){ zAppend = sqlite3_vmprintf(zFmt, ap); if( zAppend ){ nAppend = STRLEN(zAppend); - zRet = (char*)sqlite3_malloc(nIn + nAppend + 1); + zRet = (char*)sqlite3_malloc64(nIn + nAppend + 1); } if( zAppend && zRet ){ if( nIn ) memcpy(zRet, zIn, nIn); @@ -810,6 +821,10 @@ static char *idxAppendText(int *pRc, char *zIn, const char *zFmt, ...){ */ static int idxIdentifierRequiresQuotes(const char *zId){ int i; + int nId = STRLEN(zId); + + if( sqlite3_keyword_check(zId, nId) ) return 1; + for(i=0; zId[i]; i++){ if( !(zId[i]=='_') && !(zId[i]>='0' && zId[i]<='9') @@ -886,6 +901,7 @@ static int idxFindCompatible( IdxConstraint *pT = pTail; sqlite3_stmt *pInfo = 0; const char *zIdx = (const char*)sqlite3_column_text(pIdxList, 1); + if( zIdx==0 ) continue; /* Zero the IdxConstraint.bFlag values in the pEq list */ for(pIter=pEq; pIter; pIter=pIter->pLink) pIter->bFlag = 0; @@ -1168,7 +1184,7 @@ static void idxWriteFree(IdxWrite *pTab){ ** runs all the queries to see which indexes they prefer, and populates ** IdxStatement.zIdx and IdxStatement.zEQP with the results. */ -int idxFindIndexes( +static int idxFindIndexes( sqlite3expert *p, char **pzErr /* OUT: Error message (sqlite3_malloc) */ ){ @@ -1297,6 +1313,7 @@ static int idxProcessOneTrigger( rc = idxPrintfPrepareStmt(p->db, &pSelect, pzErr, zSql, zTab, zTab); while( rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pSelect) ){ const char *zCreate = (const char*)sqlite3_column_text(pSelect, 0); + if( zCreate==0 ) continue; rc = sqlite3_exec(p->dbv, zCreate, 0, 0, pzErr); } idxFinalize(&rc, pSelect); @@ -1375,6 +1392,66 @@ static int idxProcessTriggers(sqlite3expert *p, char **pzErr){ return rc; } +/* +** This function tests if the schema of the main database of database handle +** db contains an object named zTab. Assuming no error occurs, output parameter +** (*pbContains) is set to true if zTab exists, or false if it does not. +** +** Or, if an error occurs, an SQLite error code is returned. The final value +** of (*pbContains) is undefined in this case. +*/ +static int expertDbContainsObject( + sqlite3 *db, + const char *zTab, + int *pbContains /* OUT: True if object exists */ +){ + const char *zSql = "SELECT 1 FROM sqlite_schema WHERE name = ?"; + sqlite3_stmt *pSql = 0; + int rc = SQLITE_OK; + int ret = 0; + + rc = sqlite3_prepare_v2(db, zSql, -1, &pSql, 0); + if( rc==SQLITE_OK ){ + sqlite3_bind_text(pSql, 1, zTab, -1, SQLITE_STATIC); + if( SQLITE_ROW==sqlite3_step(pSql) ){ + ret = 1; + } + rc = sqlite3_finalize(pSql); + } + + *pbContains = ret; + return rc; +} + +/* +** Execute SQL command zSql using database handle db. If no error occurs, +** set (*pzErr) to NULL and return SQLITE_OK. +** +** If an error does occur, return an SQLite error code and set (*pzErr) to +** point to a buffer containing an English language error message. Except, +** if the error message begins with "no such module:", then ignore the +** error and return as if the SQL statement had succeeded. +** +** This is used to copy as much of the database schema as possible while +** ignoring any errors related to missing virtual table modules. +*/ +static int expertSchemaSql(sqlite3 *db, const char *zSql, char **pzErr){ + int rc = SQLITE_OK; + char *zErr = 0; + + rc = sqlite3_exec(db, zSql, 0, 0, &zErr); + if( rc!=SQLITE_OK && zErr ){ + int nErr = STRLEN(zErr); + if( nErr>=15 && memcmp(zErr, "no such module:", 15)==0 ){ + sqlite3_free(zErr); + rc = SQLITE_OK; + zErr = 0; + } + } + + *pzErr = zErr; + return rc; +} static int idxCreateVtabSchema(sqlite3expert *p, char **pzErrmsg){ int rc = idxRegisterVtab(p); @@ -1386,25 +1463,35 @@ static int idxCreateVtabSchema(sqlite3expert *p, char **pzErrmsg){ ** 2) Create the equivalent virtual table in dbv. */ rc = idxPrepareStmt(p->db, &pSchema, pzErrmsg, - "SELECT type, name, sql, 1 FROM sqlite_schema " - "WHERE type IN ('table','view') AND name NOT LIKE 'sqlite_%%' " + "SELECT type, name, sql, 1, " + " substr(sql,1,14)=='create virtual' COLLATE nocase " + "FROM sqlite_schema " + "WHERE type IN ('table','view') AND " + " substr(name,1,7)!='sqlite_' COLLATE nocase " " UNION ALL " - "SELECT type, name, sql, 2 FROM sqlite_schema " + "SELECT type, name, sql, 2, 0 FROM sqlite_schema " "WHERE type = 'trigger'" " AND tbl_name IN(SELECT name FROM sqlite_schema WHERE type = 'view') " - "ORDER BY 4, 1" + "ORDER BY 4, 5 DESC, 1" ); while( rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pSchema) ){ const char *zType = (const char*)sqlite3_column_text(pSchema, 0); const char *zName = (const char*)sqlite3_column_text(pSchema, 1); const char *zSql = (const char*)sqlite3_column_text(pSchema, 2); + int bVirtual = sqlite3_column_int(pSchema, 4); + int bExists = 0; - if( zType[0]=='v' || zType[1]=='r' ){ - rc = sqlite3_exec(p->dbv, zSql, 0, 0, pzErrmsg); + if( zType==0 || zName==0 ) continue; + rc = expertDbContainsObject(p->dbv, zName, &bExists); + if( rc || bExists ) continue; + + if( zType[0]=='v' || zType[1]=='r' || bVirtual ){ + /* A view. Or a trigger on a view. */ + if( zSql ) rc = expertSchemaSql(p->dbv, zSql, pzErrmsg); }else{ IdxTable *pTab; rc = idxGetTableInfo(p->db, zName, &pTab, pzErrmsg); - if( rc==SQLITE_OK ){ + if( rc==SQLITE_OK && ALWAYS(pTab!=0) ){ int i; char *zInner = 0; char *zOuter = 0; @@ -1475,14 +1562,14 @@ struct IdxRemCtx { int eType; /* SQLITE_NULL, INTEGER, REAL, TEXT, BLOB */ i64 iVal; /* SQLITE_INTEGER value */ double rVal; /* SQLITE_FLOAT value */ - int nByte; /* Bytes of space allocated at z */ - int n; /* Size of buffer z */ + i64 nByte; /* Bytes of space allocated at z */ + i64 n; /* Size of buffer z */ char *z; /* SQLITE_TEXT/BLOB value */ } aSlot[1]; }; /* -** Implementation of scalar function rem(). +** Implementation of scalar function sqlite_expert_rem(). */ static void idxRemFunc( sqlite3_context *pCtx, @@ -1495,7 +1582,7 @@ static void idxRemFunc( assert( argc==2 ); iSlot = sqlite3_value_int(argv[0]); - assert( iSlot<=p->nSlot ); + assert( iSlotnSlot ); pSlot = &p->aSlot[iSlot]; switch( pSlot->eType ){ @@ -1512,11 +1599,13 @@ static void idxRemFunc( break; case SQLITE_BLOB: - sqlite3_result_blob(pCtx, pSlot->z, pSlot->n, SQLITE_TRANSIENT); + assert( pSlot->n <= 0x7fffffff ); + sqlite3_result_blob(pCtx, pSlot->z, (int)pSlot->n, SQLITE_TRANSIENT); break; case SQLITE_TEXT: - sqlite3_result_text(pCtx, pSlot->z, pSlot->n, SQLITE_TRANSIENT); + assert( pSlot->n <= 0x7fffffff ); + sqlite3_result_text(pCtx, pSlot->z, (int)pSlot->n, SQLITE_TRANSIENT); break; } @@ -1536,9 +1625,10 @@ static void idxRemFunc( case SQLITE_BLOB: case SQLITE_TEXT: { - int nByte = sqlite3_value_bytes(argv[1]); + i64 nByte = sqlite3_value_bytes(argv[1]); + const void *pData = 0; if( nByte>pSlot->nByte ){ - char *zNew = (char*)sqlite3_realloc(pSlot->z, nByte*2); + char *zNew = (char*)sqlite3_realloc64(pSlot->z, nByte*2); if( zNew==0 ){ sqlite3_result_error_nomem(pCtx); return; @@ -1548,9 +1638,11 @@ static void idxRemFunc( } pSlot->n = nByte; if( pSlot->eType==SQLITE_BLOB ){ - memcpy(pSlot->z, sqlite3_value_blob(argv[1]), nByte); + pData = sqlite3_value_blob(argv[1]); + if( pData ) memcpy(pSlot->z, pData, nByte); }else{ - memcpy(pSlot->z, sqlite3_value_text(argv[1]), nByte); + pData = sqlite3_value_text(argv[1]); + memcpy(pSlot->z, pData, nByte); } break; } @@ -1591,7 +1683,7 @@ static int idxPopulateOneStat1( int nCol = 0; int i; sqlite3_stmt *pQuery = 0; - int *aStat = 0; + i64 *aStat = 0; int rc = SQLITE_OK; assert( p->iSample>0 ); @@ -1602,8 +1694,15 @@ static int idxPopulateOneStat1( const char *zComma = zCols==0 ? "" : ", "; const char *zName = (const char*)sqlite3_column_text(pIndexXInfo, 0); const char *zColl = (const char*)sqlite3_column_text(pIndexXInfo, 1); + if( zName==0 ){ + /* This index contains an expression. Ignore it. */ + sqlite3_free(zCols); + sqlite3_free(zOrder); + return sqlite3_reset(pIndexXInfo); + } zCols = idxAppendText(&rc, zCols, - "%sx.%Q IS rem(%d, x.%Q) COLLATE %s", zComma, zName, nCol, zName, zColl + "%sx.%Q IS sqlite_expert_rem(%d, x.%Q) COLLATE %s", + zComma, zName, nCol, zName, zColl ); zOrder = idxAppendText(&rc, zOrder, "%s%d", zComma, ++nCol); } @@ -1630,7 +1729,7 @@ static int idxPopulateOneStat1( sqlite3_free(zQuery); if( rc==SQLITE_OK ){ - aStat = (int*)idxMalloc(&rc, sizeof(int)*(nCol+1)); + aStat = (i64*)idxMalloc(&rc, sizeof(i64)*(nCol+1)); } if( rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pQuery) ){ IdxHashEntry *pEntry; @@ -1647,11 +1746,11 @@ static int idxPopulateOneStat1( } if( rc==SQLITE_OK ){ - int s0 = aStat[0]; - zStat = sqlite3_mprintf("%d", s0); + i64 s0 = aStat[0]; + zStat = sqlite3_mprintf("%lld", s0); if( zStat==0 ) rc = SQLITE_NOMEM; for(i=1; rc==SQLITE_OK && i<=nCol; i++){ - zStat = idxAppendText(&rc, zStat, " %d", (s0+aStat[i]/2) / aStat[i]); + zStat = idxAppendText(&rc, zStat, " %lld", (s0+aStat[i]/2) / aStat[i]); } } @@ -1730,24 +1829,24 @@ static int idxPopulateStat1(sqlite3expert *p, char **pzErr){ rc = sqlite3_exec(p->dbm, "ANALYZE; PRAGMA writable_schema=1", 0, 0, 0); if( rc==SQLITE_OK ){ - int nByte = sizeof(struct IdxRemCtx) + (sizeof(struct IdxRemSlot) * nMax); + i64 nByte = sizeof(struct IdxRemCtx) + (sizeof(struct IdxRemSlot) * nMax); pCtx = (struct IdxRemCtx*)idxMalloc(&rc, nByte); } if( rc==SQLITE_OK ){ sqlite3 *dbrem = (p->iSample==100 ? p->db : p->dbv); - rc = sqlite3_create_function( - dbrem, "rem", 2, SQLITE_UTF8, (void*)pCtx, idxRemFunc, 0, 0 + rc = sqlite3_create_function(dbrem, "sqlite_expert_rem", + 2, SQLITE_UTF8, (void*)pCtx, idxRemFunc, 0, 0 ); } if( rc==SQLITE_OK ){ - rc = sqlite3_create_function( - p->db, "sample", 0, SQLITE_UTF8, (void*)&samplectx, idxSampleFunc, 0, 0 + rc = sqlite3_create_function(p->db, "sqlite_expert_sample", + 0, SQLITE_UTF8, (void*)&samplectx, idxSampleFunc, 0, 0 ); } if( rc==SQLITE_OK ){ - pCtx->nSlot = nMax+1; + pCtx->nSlot = (i64)nMax+1; rc = idxPrepareStmt(p->dbm, &pAllIndex, pzErr, zAllIndex); } if( rc==SQLITE_OK ){ @@ -1761,6 +1860,7 @@ static int idxPopulateStat1(sqlite3expert *p, char **pzErr){ i64 iRowid = sqlite3_column_int64(pAllIndex, 0); const char *zTab = (const char*)sqlite3_column_text(pAllIndex, 1); const char *zIdx = (const char*)sqlite3_column_text(pAllIndex, 2); + if( zTab==0 || zIdx==0 ) continue; if( p->iSample<100 && iPrev!=iRowid ){ samplectx.target = (double)p->iSample / 100.0; samplectx.iTarget = p->iSample; @@ -1793,10 +1893,95 @@ static int idxPopulateStat1(sqlite3expert *p, char **pzErr){ rc = sqlite3_exec(p->dbm, "ANALYZE sqlite_schema", 0, 0, 0); } + sqlite3_create_function(p->db, "sqlite_expert_rem", 2, SQLITE_UTF8, 0,0,0,0); + sqlite3_create_function(p->db, "sqlite_expert_sample", 0,SQLITE_UTF8,0,0,0,0); + sqlite3_exec(p->db, "DROP TABLE IF EXISTS temp."UNIQUE_TABLE_NAME,0,0,0); return rc; } +/* +** Define and possibly pretend to use a useless collation sequence. +** This pretense allows expert to accept SQL using custom collations. +*/ +int dummyCompare(void *up1, int up2, const void *up3, int up4, const void *up5){ + (void)up1; + (void)up2; + (void)up3; + (void)up4; + (void)up5; + assert(0); /* VDBE should never be run. */ + return 0; +} +/* And a callback to register above upon actual need */ +void useDummyCS(void *up1, sqlite3 *db, int etr, const char *zName){ + (void)up1; + sqlite3_create_collation_v2(db, zName, etr, 0, dummyCompare, 0); +} + +#if !defined(SQLITE_OMIT_SCHEMA_PRAGMAS) \ + && !defined(SQLITE_OMIT_INTROSPECTION_PRAGMAS) +/* +** dummy functions for no-op implementation of UDFs during expert's work +*/ +void dummyUDF(sqlite3_context *up1, int up2, sqlite3_value **up3){ + (void)up1; + (void)up2; + (void)up3; + assert(0); /* VDBE should never be run. */ +} +void dummyUDFvalue(sqlite3_context *up1){ + (void)up1; + assert(0); /* VDBE should never be run. */ +} + +/* +** Register UDFs from user database with another. +*/ +int registerUDFs(sqlite3 *dbSrc, sqlite3 *dbDst){ + sqlite3_stmt *pStmt; + int rc = sqlite3_prepare_v2(dbSrc, + "SELECT name,type,enc,narg,flags " + "FROM pragma_function_list() " + "WHERE builtin==0", -1, &pStmt, 0); + if( rc==SQLITE_OK ){ + while( SQLITE_ROW==(rc = sqlite3_step(pStmt)) ){ + int nargs = sqlite3_column_int(pStmt,3); + int flags = sqlite3_column_int(pStmt,4); + const char *name = (char*)sqlite3_column_text(pStmt,0); + const char *type = (char*)sqlite3_column_text(pStmt,1); + const char *enc = (char*)sqlite3_column_text(pStmt,2); + if( name==0 || type==0 || enc==0 ){ + /* no-op. Only happens on OOM */ + }else{ + int ienc = SQLITE_UTF8; + int rcf = SQLITE_ERROR; + if( strcmp(enc,"utf16le")==0 ) ienc = SQLITE_UTF16LE; + else if( strcmp(enc,"utf16be")==0 ) ienc = SQLITE_UTF16BE; + ienc |= (flags & (SQLITE_DETERMINISTIC|SQLITE_DIRECTONLY)); + if( strcmp(type,"w")==0 ){ + rcf = sqlite3_create_window_function(dbDst,name,nargs,ienc,0, + dummyUDF,dummyUDFvalue,0,0,0); + }else if( strcmp(type,"a")==0 ){ + rcf = sqlite3_create_function(dbDst,name,nargs,ienc,0, + 0,dummyUDF,dummyUDFvalue); + }else if( strcmp(type,"s")==0 ){ + rcf = sqlite3_create_function(dbDst,name,nargs,ienc,0, + dummyUDF,0,0); + } + if( rcf!=SQLITE_OK ){ + rc = rcf; + break; + } + } + } + sqlite3_finalize(pStmt); + if( rc==SQLITE_DONE ) rc = SQLITE_OK; + } + return rc; +} +#endif + /* ** Allocate a new sqlite3expert object. */ @@ -1823,18 +2008,38 @@ sqlite3expert *sqlite3_expert_new(sqlite3 *db, char **pzErrmsg){ sqlite3_db_config(pNew->dbm, SQLITE_DBCONFIG_TRIGGER_EQP, 1, (int*)0); } } - + + /* Allow custom collations to be dealt with through prepare. */ + if( rc==SQLITE_OK ) rc = sqlite3_collation_needed(pNew->dbm,0,useDummyCS); + if( rc==SQLITE_OK ) rc = sqlite3_collation_needed(pNew->dbv,0,useDummyCS); + +#if !defined(SQLITE_OMIT_SCHEMA_PRAGMAS) \ + && !defined(SQLITE_OMIT_INTROSPECTION_PRAGMAS) + /* Register UDFs from database [db] with [dbm] and [dbv]. */ + if( rc==SQLITE_OK ){ + rc = registerUDFs(pNew->db, pNew->dbm); + } + if( rc==SQLITE_OK ){ + rc = registerUDFs(pNew->db, pNew->dbv); + } +#endif /* Copy the entire schema of database [db] into [dbm]. */ if( rc==SQLITE_OK ){ - sqlite3_stmt *pSql; + sqlite3_stmt *pSql = 0; rc = idxPrintfPrepareStmt(pNew->db, &pSql, pzErrmsg, - "SELECT sql FROM sqlite_schema WHERE name NOT LIKE 'sqlite_%%'" - " AND sql NOT LIKE 'CREATE VIRTUAL %%'" + "SELECT sql, name, substr(sql,1,14)=='create virtual' COLLATE nocase" + " FROM sqlite_schema WHERE substr(name,1,7)!='sqlite_' COLLATE nocase" + " ORDER BY 3 DESC, rowid" ); while( rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pSql) ){ const char *zSql = (const char*)sqlite3_column_text(pSql, 0); - rc = sqlite3_exec(pNew->dbm, zSql, 0, 0, pzErrmsg); + const char *zName = (const char*)sqlite3_column_text(pSql, 1); + int bExists = 0; + rc = expertDbContainsObject(pNew->dbm, zName, &bExists); + if( rc==SQLITE_OK && zSql && bExists==0 ){ + rc = expertSchemaSql(pNew->dbm, zSql, pzErrmsg); + } } idxFinalize(&rc, pSql); } @@ -1849,7 +2054,7 @@ sqlite3expert *sqlite3_expert_new(sqlite3 *db, char **pzErrmsg){ sqlite3_set_authorizer(pNew->dbv, idxAuthCallback, (void*)pNew); } - /* If an error has occurred, free the new object and reutrn NULL. Otherwise, + /* If an error has occurred, free the new object and return NULL. Otherwise, ** return the new sqlite3expert handle. */ if( rc!=SQLITE_OK ){ sqlite3_expert_destroy(pNew); @@ -1899,12 +2104,16 @@ int sqlite3_expert_sql( while( rc==SQLITE_OK && zStmt && zStmt[0] ){ sqlite3_stmt *pStmt = 0; + /* Ensure that the provided statement compiles against user's DB. */ + rc = idxPrepareStmt(p->db, &pStmt, pzErr, zStmt); + if( rc!=SQLITE_OK ) break; + sqlite3_finalize(pStmt); rc = sqlite3_prepare_v2(p->dbv, zStmt, -1, &pStmt, &zStmt); if( rc==SQLITE_OK ){ if( pStmt ){ IdxStatement *pNew; const char *z = sqlite3_sql(pStmt); - int n = STRLEN(z); + i64 n = STRLEN(z); pNew = (IdxStatement*)idxMalloc(&rc, sizeof(IdxStatement) + n+1); if( rc==SQLITE_OK ){ pNew->zSql = (char*)&pNew[1]; diff --git a/ext/expert/test_expert.c b/ext/expert/test_expert.c index 064c1908a9..4383d7c7bb 100644 --- a/ext/expert/test_expert.c +++ b/ext/expert/test_expert.c @@ -16,15 +16,7 @@ #include "sqlite3expert.h" #include #include - -#if defined(INCLUDE_SQLITE_TCL_H) -# include "sqlite_tcl.h" -#else -# include "tcl.h" -# ifndef SQLITE_TCLAPI -# define SQLITE_TCLAPI -# endif -#endif +#include "tclsqlite.h" #ifndef SQLITE_OMIT_VIRTUALTABLE @@ -36,7 +28,7 @@ static int dbHandleFromObj(Tcl_Interp *interp, Tcl_Obj *pObj, sqlite3 **pDb){ Tcl_CmdInfo info; if( 0==Tcl_GetCommandInfo(interp, Tcl_GetString(pObj), &info) ){ - Tcl_AppendResult(interp, "no such handle: ", Tcl_GetString(pObj), 0); + Tcl_AppendResult(interp, "no such handle: ", Tcl_GetString(pObj), NULL); return TCL_ERROR; } diff --git a/ext/fts1/README.txt b/ext/fts1/README.txt deleted file mode 100644 index 292b7daa0b..0000000000 --- a/ext/fts1/README.txt +++ /dev/null @@ -1,2 +0,0 @@ -This folder contains source code to the first full-text search -extension for SQLite. diff --git a/ext/fts1/ft_hash.c b/ext/fts1/ft_hash.c deleted file mode 100644 index 8b3a7064ee..0000000000 --- a/ext/fts1/ft_hash.c +++ /dev/null @@ -1,404 +0,0 @@ -/* -** 2001 September 22 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This is the implementation of generic hash-tables used in SQLite. -** We've modified it slightly to serve as a standalone hash table -** implementation for the full-text indexing module. -*/ -#include -#include -#include - -#include "ft_hash.h" - -void *malloc_and_zero(int n){ - void *p = malloc(n); - if( p ){ - memset(p, 0, n); - } - return p; -} - -/* Turn bulk memory into a hash table object by initializing the -** fields of the Hash structure. -** -** "pNew" is a pointer to the hash table that is to be initialized. -** keyClass is one of the constants HASH_INT, HASH_POINTER, -** HASH_BINARY, or HASH_STRING. The value of keyClass -** determines what kind of key the hash table will use. "copyKey" is -** true if the hash table should make its own private copy of keys and -** false if it should just use the supplied pointer. CopyKey only makes -** sense for HASH_STRING and HASH_BINARY and is ignored -** for other key classes. -*/ -void HashInit(Hash *pNew, int keyClass, int copyKey){ - assert( pNew!=0 ); - assert( keyClass>=HASH_STRING && keyClass<=HASH_BINARY ); - pNew->keyClass = keyClass; -#if 0 - if( keyClass==HASH_POINTER || keyClass==HASH_INT ) copyKey = 0; -#endif - pNew->copyKey = copyKey; - pNew->first = 0; - pNew->count = 0; - pNew->htsize = 0; - pNew->ht = 0; - pNew->xMalloc = malloc_and_zero; - pNew->xFree = free; -} - -/* Remove all entries from a hash table. Reclaim all memory. -** Call this routine to delete a hash table or to reset a hash table -** to the empty state. -*/ -void HashClear(Hash *pH){ - HashElem *elem; /* For looping over all elements of the table */ - - assert( pH!=0 ); - elem = pH->first; - pH->first = 0; - if( pH->ht ) pH->xFree(pH->ht); - pH->ht = 0; - pH->htsize = 0; - while( elem ){ - HashElem *next_elem = elem->next; - if( pH->copyKey && elem->pKey ){ - pH->xFree(elem->pKey); - } - pH->xFree(elem); - elem = next_elem; - } - pH->count = 0; -} - -#if 0 /* NOT USED */ -/* -** Hash and comparison functions when the mode is HASH_INT -*/ -static int intHash(const void *pKey, int nKey){ - return nKey ^ (nKey<<8) ^ (nKey>>8); -} -static int intCompare(const void *pKey1, int n1, const void *pKey2, int n2){ - return n2 - n1; -} -#endif - -#if 0 /* NOT USED */ -/* -** Hash and comparison functions when the mode is HASH_POINTER -*/ -static int ptrHash(const void *pKey, int nKey){ - uptr x = Addr(pKey); - return x ^ (x<<8) ^ (x>>8); -} -static int ptrCompare(const void *pKey1, int n1, const void *pKey2, int n2){ - if( pKey1==pKey2 ) return 0; - if( pKey1 0 ){ - h = (h<<3) ^ h ^ *z++; - nKey--; - } - return h & 0x7fffffff; -} -static int strCompare(const void *pKey1, int n1, const void *pKey2, int n2){ - if( n1!=n2 ) return 1; - return strncmp((const char*)pKey1,(const char*)pKey2,n1); -} - -/* -** Hash and comparison functions when the mode is HASH_BINARY -*/ -static int binHash(const void *pKey, int nKey){ - int h = 0; - const char *z = (const char *)pKey; - while( nKey-- > 0 ){ - h = (h<<3) ^ h ^ *(z++); - } - return h & 0x7fffffff; -} -static int binCompare(const void *pKey1, int n1, const void *pKey2, int n2){ - if( n1!=n2 ) return 1; - return memcmp(pKey1,pKey2,n1); -} - -/* -** Return a pointer to the appropriate hash function given the key class. -** -** The C syntax in this function definition may be unfamilar to some -** programmers, so we provide the following additional explanation: -** -** The name of the function is "hashFunction". The function takes a -** single parameter "keyClass". The return value of hashFunction() -** is a pointer to another function. Specifically, the return value -** of hashFunction() is a pointer to a function that takes two parameters -** with types "const void*" and "int" and returns an "int". -*/ -static int (*hashFunction(int keyClass))(const void*,int){ -#if 0 /* HASH_INT and HASH_POINTER are never used */ - switch( keyClass ){ - case HASH_INT: return &intHash; - case HASH_POINTER: return &ptrHash; - case HASH_STRING: return &strHash; - case HASH_BINARY: return &binHash;; - default: break; - } - return 0; -#else - if( keyClass==HASH_STRING ){ - return &strHash; - }else{ - assert( keyClass==HASH_BINARY ); - return &binHash; - } -#endif -} - -/* -** Return a pointer to the appropriate hash function given the key class. -** -** For help in interpreted the obscure C code in the function definition, -** see the header comment on the previous function. -*/ -static int (*compareFunction(int keyClass))(const void*,int,const void*,int){ -#if 0 /* HASH_INT and HASH_POINTER are never used */ - switch( keyClass ){ - case HASH_INT: return &intCompare; - case HASH_POINTER: return &ptrCompare; - case HASH_STRING: return &strCompare; - case HASH_BINARY: return &binCompare; - default: break; - } - return 0; -#else - if( keyClass==HASH_STRING ){ - return &strCompare; - }else{ - assert( keyClass==HASH_BINARY ); - return &binCompare; - } -#endif -} - -/* Link an element into the hash table -*/ -static void insertElement( - Hash *pH, /* The complete hash table */ - struct _ht *pEntry, /* The entry into which pNew is inserted */ - HashElem *pNew /* The element to be inserted */ -){ - HashElem *pHead; /* First element already in pEntry */ - pHead = pEntry->chain; - if( pHead ){ - pNew->next = pHead; - pNew->prev = pHead->prev; - if( pHead->prev ){ pHead->prev->next = pNew; } - else { pH->first = pNew; } - pHead->prev = pNew; - }else{ - pNew->next = pH->first; - if( pH->first ){ pH->first->prev = pNew; } - pNew->prev = 0; - pH->first = pNew; - } - pEntry->count++; - pEntry->chain = pNew; -} - - -/* Resize the hash table so that it cantains "new_size" buckets. -** "new_size" must be a power of 2. The hash table might fail -** to resize if sqliteMalloc() fails. -*/ -static void rehash(Hash *pH, int new_size){ - struct _ht *new_ht; /* The new hash table */ - HashElem *elem, *next_elem; /* For looping over existing elements */ - int (*xHash)(const void*,int); /* The hash function */ - - assert( (new_size & (new_size-1))==0 ); - new_ht = (struct _ht *)pH->xMalloc( new_size*sizeof(struct _ht) ); - if( new_ht==0 ) return; - if( pH->ht ) pH->xFree(pH->ht); - pH->ht = new_ht; - pH->htsize = new_size; - xHash = hashFunction(pH->keyClass); - for(elem=pH->first, pH->first=0; elem; elem = next_elem){ - int h = (*xHash)(elem->pKey, elem->nKey) & (new_size-1); - next_elem = elem->next; - insertElement(pH, &new_ht[h], elem); - } -} - -/* This function (for internal use only) locates an element in an -** hash table that matches the given key. The hash for this key has -** already been computed and is passed as the 4th parameter. -*/ -static HashElem *findElementGivenHash( - const Hash *pH, /* The pH to be searched */ - const void *pKey, /* The key we are searching for */ - int nKey, - int h /* The hash for this key. */ -){ - HashElem *elem; /* Used to loop thru the element list */ - int count; /* Number of elements left to test */ - int (*xCompare)(const void*,int,const void*,int); /* comparison function */ - - if( pH->ht ){ - struct _ht *pEntry = &pH->ht[h]; - elem = pEntry->chain; - count = pEntry->count; - xCompare = compareFunction(pH->keyClass); - while( count-- && elem ){ - if( (*xCompare)(elem->pKey,elem->nKey,pKey,nKey)==0 ){ - return elem; - } - elem = elem->next; - } - } - return 0; -} - -/* Remove a single entry from the hash table given a pointer to that -** element and a hash on the element's key. -*/ -static void removeElementGivenHash( - Hash *pH, /* The pH containing "elem" */ - HashElem* elem, /* The element to be removed from the pH */ - int h /* Hash value for the element */ -){ - struct _ht *pEntry; - if( elem->prev ){ - elem->prev->next = elem->next; - }else{ - pH->first = elem->next; - } - if( elem->next ){ - elem->next->prev = elem->prev; - } - pEntry = &pH->ht[h]; - if( pEntry->chain==elem ){ - pEntry->chain = elem->next; - } - pEntry->count--; - if( pEntry->count<=0 ){ - pEntry->chain = 0; - } - if( pH->copyKey && elem->pKey ){ - pH->xFree(elem->pKey); - } - pH->xFree( elem ); - pH->count--; - if( pH->count<=0 ){ - assert( pH->first==0 ); - assert( pH->count==0 ); - HashClear(pH); - } -} - -/* Attempt to locate an element of the hash table pH with a key -** that matches pKey,nKey. Return the data for this element if it is -** found, or NULL if there is no match. -*/ -void *HashFind(const Hash *pH, const void *pKey, int nKey){ - int h; /* A hash on key */ - HashElem *elem; /* The element that matches key */ - int (*xHash)(const void*,int); /* The hash function */ - - if( pH==0 || pH->ht==0 ) return 0; - xHash = hashFunction(pH->keyClass); - assert( xHash!=0 ); - h = (*xHash)(pKey,nKey); - assert( (pH->htsize & (pH->htsize-1))==0 ); - elem = findElementGivenHash(pH,pKey,nKey, h & (pH->htsize-1)); - return elem ? elem->data : 0; -} - -/* Insert an element into the hash table pH. The key is pKey,nKey -** and the data is "data". -** -** If no element exists with a matching key, then a new -** element is created. A copy of the key is made if the copyKey -** flag is set. NULL is returned. -** -** If another element already exists with the same key, then the -** new data replaces the old data and the old data is returned. -** The key is not copied in this instance. If a malloc fails, then -** the new data is returned and the hash table is unchanged. -** -** If the "data" parameter to this function is NULL, then the -** element corresponding to "key" is removed from the hash table. -*/ -void *HashInsert(Hash *pH, const void *pKey, int nKey, void *data){ - int hraw; /* Raw hash value of the key */ - int h; /* the hash of the key modulo hash table size */ - HashElem *elem; /* Used to loop thru the element list */ - HashElem *new_elem; /* New element added to the pH */ - int (*xHash)(const void*,int); /* The hash function */ - - assert( pH!=0 ); - xHash = hashFunction(pH->keyClass); - assert( xHash!=0 ); - hraw = (*xHash)(pKey, nKey); - assert( (pH->htsize & (pH->htsize-1))==0 ); - h = hraw & (pH->htsize-1); - elem = findElementGivenHash(pH,pKey,nKey,h); - if( elem ){ - void *old_data = elem->data; - if( data==0 ){ - removeElementGivenHash(pH,elem,h); - }else{ - elem->data = data; - } - return old_data; - } - if( data==0 ) return 0; - new_elem = (HashElem*)pH->xMalloc( sizeof(HashElem) ); - if( new_elem==0 ) return data; - if( pH->copyKey && pKey!=0 ){ - new_elem->pKey = pH->xMalloc( nKey ); - if( new_elem->pKey==0 ){ - pH->xFree(new_elem); - return data; - } - memcpy((void*)new_elem->pKey, pKey, nKey); - }else{ - new_elem->pKey = (void*)pKey; - } - new_elem->nKey = nKey; - pH->count++; - if( pH->htsize==0 ){ - rehash(pH,8); - if( pH->htsize==0 ){ - pH->count = 0; - pH->xFree(new_elem); - return data; - } - } - if( pH->count > pH->htsize ){ - rehash(pH,pH->htsize*2); - } - assert( pH->htsize>0 ); - assert( (pH->htsize & (pH->htsize-1))==0 ); - h = hraw & (pH->htsize-1); - insertElement(pH, &pH->ht[h], new_elem); - new_elem->data = data; - return 0; -} diff --git a/ext/fts1/ft_hash.h b/ext/fts1/ft_hash.h deleted file mode 100644 index 95871a4590..0000000000 --- a/ext/fts1/ft_hash.h +++ /dev/null @@ -1,111 +0,0 @@ -/* -** 2001 September 22 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This is the header file for the generic hash-table implementation -** used in SQLite. We've modified it slightly to serve as a standalone -** hash table implementation for the full-text indexing module. -** -*/ -#ifndef _HASH_H_ -#define _HASH_H_ - -/* Forward declarations of structures. */ -typedef struct Hash Hash; -typedef struct HashElem HashElem; - -/* A complete hash table is an instance of the following structure. -** The internals of this structure are intended to be opaque -- client -** code should not attempt to access or modify the fields of this structure -** directly. Change this structure only by using the routines below. -** However, many of the "procedures" and "functions" for modifying and -** accessing this structure are really macros, so we can't really make -** this structure opaque. -*/ -struct Hash { - char keyClass; /* HASH_INT, _POINTER, _STRING, _BINARY */ - char copyKey; /* True if copy of key made on insert */ - int count; /* Number of entries in this table */ - HashElem *first; /* The first element of the array */ - void *(*xMalloc)(int); /* malloc() function to use */ - void (*xFree)(void *); /* free() function to use */ - int htsize; /* Number of buckets in the hash table */ - struct _ht { /* the hash table */ - int count; /* Number of entries with this hash */ - HashElem *chain; /* Pointer to first entry with this hash */ - } *ht; -}; - -/* Each element in the hash table is an instance of the following -** structure. All elements are stored on a single doubly-linked list. -** -** Again, this structure is intended to be opaque, but it can't really -** be opaque because it is used by macros. -*/ -struct HashElem { - HashElem *next, *prev; /* Next and previous elements in the table */ - void *data; /* Data associated with this element */ - void *pKey; int nKey; /* Key associated with this element */ -}; - -/* -** There are 4 different modes of operation for a hash table: -** -** HASH_INT nKey is used as the key and pKey is ignored. -** -** HASH_POINTER pKey is used as the key and nKey is ignored. -** -** HASH_STRING pKey points to a string that is nKey bytes long -** (including the null-terminator, if any). Case -** is respected in comparisons. -** -** HASH_BINARY pKey points to binary data nKey bytes long. -** memcmp() is used to compare keys. -** -** A copy of the key is made for HASH_STRING and HASH_BINARY -** if the copyKey parameter to HashInit is 1. -*/ -/* #define HASH_INT 1 // NOT USED */ -/* #define HASH_POINTER 2 // NOT USED */ -#define HASH_STRING 3 -#define HASH_BINARY 4 - -/* -** Access routines. To delete, insert a NULL pointer. -*/ -void HashInit(Hash*, int keytype, int copyKey); -void *HashInsert(Hash*, const void *pKey, int nKey, void *pData); -void *HashFind(const Hash*, const void *pKey, int nKey); -void HashClear(Hash*); - -/* -** Macros for looping over all elements of a hash table. The idiom is -** like this: -** -** Hash h; -** HashElem *p; -** ... -** for(p=HashFirst(&h); p; p=HashNext(p)){ -** SomeStructure *pData = HashData(p); -** // do something with pData -** } -*/ -#define HashFirst(H) ((H)->first) -#define HashNext(E) ((E)->next) -#define HashData(E) ((E)->data) -#define HashKey(E) ((E)->pKey) -#define HashKeysize(E) ((E)->nKey) - -/* -** Number of entries in a hash table -*/ -#define HashCount(H) ((H)->count) - -#endif /* _HASH_H_ */ diff --git a/ext/fts1/fts1.c b/ext/fts1/fts1.c deleted file mode 100644 index 77fa9e23f5..0000000000 --- a/ext/fts1/fts1.c +++ /dev/null @@ -1,3348 +0,0 @@ -/* fts1 has a design flaw which can lead to database corruption (see -** below). It is recommended not to use it any longer, instead use -** fts3 (or higher). If you believe that your use of fts1 is safe, -** add -DSQLITE_ENABLE_BROKEN_FTS1=1 to your CFLAGS. -*/ -#if (!defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS1)) \ - && !defined(SQLITE_ENABLE_BROKEN_FTS1) -#error fts1 has a design flaw and has been deprecated. -#endif -/* The flaw is that fts1 uses the content table's unaliased rowid as -** the unique docid. fts1 embeds the rowid in the index it builds, -** and expects the rowid to not change. The SQLite VACUUM operation -** will renumber such rowids, thereby breaking fts1. If you are using -** fts1 in a system which has disabled VACUUM, then you can continue -** to use it safely. Note that PRAGMA auto_vacuum does NOT disable -** VACUUM, though systems using auto_vacuum are unlikely to invoke -** VACUUM. -** -** fts1 should be safe even across VACUUM if you only insert documents -** and never delete. -*/ - -/* The author disclaims copyright to this source code. - * - * This is an SQLite module implementing full-text search. - */ - -/* -** The code in this file is only compiled if: -** -** * The FTS1 module is being built as an extension -** (in which case SQLITE_CORE is not defined), or -** -** * The FTS1 module is being built into the core of -** SQLite (in which case SQLITE_ENABLE_FTS1 is defined). -*/ -#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS1) - -#if defined(SQLITE_ENABLE_FTS1) && !defined(SQLITE_CORE) -# define SQLITE_CORE 1 -#endif - -#include -#include -#include -#include -#include - -#include "fts1.h" -#include "fts1_hash.h" -#include "fts1_tokenizer.h" -#include "sqlite3.h" -#include "sqlite3ext.h" -SQLITE_EXTENSION_INIT1 - - -#if 0 -# define TRACE(A) printf A; fflush(stdout) -#else -# define TRACE(A) -#endif - -/* utility functions */ - -typedef struct StringBuffer { - int len; /* length, not including null terminator */ - int alloced; /* Space allocated for s[] */ - char *s; /* Content of the string */ -} StringBuffer; - -static void initStringBuffer(StringBuffer *sb){ - sb->len = 0; - sb->alloced = 100; - sb->s = malloc(100); - sb->s[0] = '\0'; -} - -static void nappend(StringBuffer *sb, const char *zFrom, int nFrom){ - if( sb->len + nFrom >= sb->alloced ){ - sb->alloced = sb->len + nFrom + 100; - sb->s = realloc(sb->s, sb->alloced+1); - if( sb->s==0 ){ - initStringBuffer(sb); - return; - } - } - memcpy(sb->s + sb->len, zFrom, nFrom); - sb->len += nFrom; - sb->s[sb->len] = 0; -} -static void append(StringBuffer *sb, const char *zFrom){ - nappend(sb, zFrom, strlen(zFrom)); -} - -/* We encode variable-length integers in little-endian order using seven bits - * per byte as follows: -** -** KEY: -** A = 0xxxxxxx 7 bits of data and one flag bit -** B = 1xxxxxxx 7 bits of data and one flag bit -** -** 7 bits - A -** 14 bits - BA -** 21 bits - BBA -** and so on. -*/ - -/* We may need up to VARINT_MAX bytes to store an encoded 64-bit integer. */ -#define VARINT_MAX 10 - -/* Write a 64-bit variable-length integer to memory starting at p[0]. - * The length of data written will be between 1 and VARINT_MAX bytes. - * The number of bytes written is returned. */ -static int putVarint(char *p, sqlite_int64 v){ - unsigned char *q = (unsigned char *) p; - sqlite_uint64 vu = v; - do{ - *q++ = (unsigned char) ((vu & 0x7f) | 0x80); - vu >>= 7; - }while( vu!=0 ); - q[-1] &= 0x7f; /* turn off high bit in final byte */ - assert( q - (unsigned char *)p <= VARINT_MAX ); - return (int) (q - (unsigned char *)p); -} - -/* Read a 64-bit variable-length integer from memory starting at p[0]. - * Return the number of bytes read, or 0 on error. - * The value is stored in *v. */ -static int getVarint(const char *p, sqlite_int64 *v){ - const unsigned char *q = (const unsigned char *) p; - sqlite_uint64 x = 0, y = 1; - while( (*q & 0x80) == 0x80 ){ - x += y * (*q++ & 0x7f); - y <<= 7; - if( q - (unsigned char *)p >= VARINT_MAX ){ /* bad data */ - assert( 0 ); - return 0; - } - } - x += y * (*q++); - *v = (sqlite_int64) x; - return (int) (q - (unsigned char *)p); -} - -static int getVarint32(const char *p, int *pi){ - sqlite_int64 i; - int ret = getVarint(p, &i); - *pi = (int) i; - assert( *pi==i ); - return ret; -} - -/*** Document lists *** - * - * A document list holds a sorted list of varint-encoded document IDs. - * - * A doclist with type DL_POSITIONS_OFFSETS is stored like this: - * - * array { - * varint docid; - * array { - * varint position; (delta from previous position plus POS_BASE) - * varint startOffset; (delta from previous startOffset) - * varint endOffset; (delta from startOffset) - * } - * } - * - * Here, array { X } means zero or more occurrences of X, adjacent in memory. - * - * A position list may hold positions for text in multiple columns. A position - * POS_COLUMN is followed by a varint containing the index of the column for - * following positions in the list. Any positions appearing before any - * occurrences of POS_COLUMN are for column 0. - * - * A doclist with type DL_POSITIONS is like the above, but holds only docids - * and positions without offset information. - * - * A doclist with type DL_DOCIDS is like the above, but holds only docids - * without positions or offset information. - * - * On disk, every document list has positions and offsets, so we don't bother - * to serialize a doclist's type. - * - * We don't yet delta-encode document IDs; doing so will probably be a - * modest win. - * - * NOTE(shess) I've thought of a slightly (1%) better offset encoding. - * After the first offset, estimate the next offset by using the - * current token position and the previous token position and offset, - * offset to handle some variance. So the estimate would be - * (iPosition*w->iStartOffset/w->iPosition-64), which is delta-encoded - * as normal. Offsets more than 64 chars from the estimate are - * encoded as the delta to the previous start offset + 128. An - * additional tiny increment can be gained by using the end offset of - * the previous token to make the estimate a tiny bit more precise. -*/ - -/* It is not safe to call isspace(), tolower(), or isalnum() on -** hi-bit-set characters. This is the same solution used in the -** tokenizer. -*/ -/* TODO(shess) The snippet-generation code should be using the -** tokenizer-generated tokens rather than doing its own local -** tokenization. -*/ -/* TODO(shess) Is __isascii() a portable version of (c&0x80)==0? */ -static int safe_isspace(char c){ - return (c&0x80)==0 ? isspace((unsigned char)c) : 0; -} -static int safe_tolower(char c){ - return (c&0x80)==0 ? tolower((unsigned char)c) : c; -} -static int safe_isalnum(char c){ - return (c&0x80)==0 ? isalnum((unsigned char)c) : 0; -} - -typedef enum DocListType { - DL_DOCIDS, /* docids only */ - DL_POSITIONS, /* docids + positions */ - DL_POSITIONS_OFFSETS /* docids + positions + offsets */ -} DocListType; - -/* -** By default, only positions and not offsets are stored in the doclists. -** To change this so that offsets are stored too, compile with -** -** -DDL_DEFAULT=DL_POSITIONS_OFFSETS -** -*/ -#ifndef DL_DEFAULT -# define DL_DEFAULT DL_POSITIONS -#endif - -typedef struct DocList { - char *pData; - int nData; - DocListType iType; - int iLastColumn; /* the last column written */ - int iLastPos; /* the last position written */ - int iLastOffset; /* the last start offset written */ -} DocList; - -enum { - POS_END = 0, /* end of this position list */ - POS_COLUMN, /* followed by new column number */ - POS_BASE -}; - -/* Initialize a new DocList to hold the given data. */ -static void docListInit(DocList *d, DocListType iType, - const char *pData, int nData){ - d->nData = nData; - if( nData>0 ){ - d->pData = malloc(nData); - memcpy(d->pData, pData, nData); - } else { - d->pData = NULL; - } - d->iType = iType; - d->iLastColumn = 0; - d->iLastPos = d->iLastOffset = 0; -} - -/* Create a new dynamically-allocated DocList. */ -static DocList *docListNew(DocListType iType){ - DocList *d = (DocList *) malloc(sizeof(DocList)); - docListInit(d, iType, 0, 0); - return d; -} - -static void docListDestroy(DocList *d){ - free(d->pData); -#ifndef NDEBUG - memset(d, 0x55, sizeof(*d)); -#endif -} - -static void docListDelete(DocList *d){ - docListDestroy(d); - free(d); -} - -static char *docListEnd(DocList *d){ - return d->pData + d->nData; -} - -/* Append a varint to a DocList's data. */ -static void appendVarint(DocList *d, sqlite_int64 i){ - char c[VARINT_MAX]; - int n = putVarint(c, i); - d->pData = realloc(d->pData, d->nData + n); - memcpy(d->pData + d->nData, c, n); - d->nData += n; -} - -static void docListAddDocid(DocList *d, sqlite_int64 iDocid){ - appendVarint(d, iDocid); - if( d->iType>=DL_POSITIONS ){ - appendVarint(d, POS_END); /* initially empty position list */ - d->iLastColumn = 0; - d->iLastPos = d->iLastOffset = 0; - } -} - -/* helper function for docListAddPos and docListAddPosOffset */ -static void addPos(DocList *d, int iColumn, int iPos){ - assert( d->nData>0 ); - --d->nData; /* remove previous terminator */ - if( iColumn!=d->iLastColumn ){ - assert( iColumn>d->iLastColumn ); - appendVarint(d, POS_COLUMN); - appendVarint(d, iColumn); - d->iLastColumn = iColumn; - d->iLastPos = d->iLastOffset = 0; - } - assert( iPos>=d->iLastPos ); - appendVarint(d, iPos-d->iLastPos+POS_BASE); - d->iLastPos = iPos; -} - -/* Add a position to the last position list in a doclist. */ -static void docListAddPos(DocList *d, int iColumn, int iPos){ - assert( d->iType==DL_POSITIONS ); - addPos(d, iColumn, iPos); - appendVarint(d, POS_END); /* add new terminator */ -} - -/* -** Add a position and starting and ending offsets to a doclist. -** -** If the doclist is setup to handle only positions, then insert -** the position only and ignore the offsets. -*/ -static void docListAddPosOffset( - DocList *d, /* Doclist under construction */ - int iColumn, /* Column the inserted term is part of */ - int iPos, /* Position of the inserted term */ - int iStartOffset, /* Starting offset of inserted term */ - int iEndOffset /* Ending offset of inserted term */ -){ - assert( d->iType>=DL_POSITIONS ); - addPos(d, iColumn, iPos); - if( d->iType==DL_POSITIONS_OFFSETS ){ - assert( iStartOffset>=d->iLastOffset ); - appendVarint(d, iStartOffset-d->iLastOffset); - d->iLastOffset = iStartOffset; - assert( iEndOffset>=iStartOffset ); - appendVarint(d, iEndOffset-iStartOffset); - } - appendVarint(d, POS_END); /* add new terminator */ -} - -/* -** A DocListReader object is a cursor into a doclist. Initialize -** the cursor to the beginning of the doclist by calling readerInit(). -** Then use routines -** -** peekDocid() -** readDocid() -** readPosition() -** skipPositionList() -** and so forth... -** -** to read information out of the doclist. When we reach the end -** of the doclist, atEnd() returns TRUE. -*/ -typedef struct DocListReader { - DocList *pDoclist; /* The document list we are stepping through */ - char *p; /* Pointer to next unread byte in the doclist */ - int iLastColumn; - int iLastPos; /* the last position read, or -1 when not in a position list */ -} DocListReader; - -/* -** Initialize the DocListReader r to point to the beginning of pDoclist. -*/ -static void readerInit(DocListReader *r, DocList *pDoclist){ - r->pDoclist = pDoclist; - if( pDoclist!=NULL ){ - r->p = pDoclist->pData; - } - r->iLastColumn = -1; - r->iLastPos = -1; -} - -/* -** Return TRUE if we have reached then end of pReader and there is -** nothing else left to read. -*/ -static int atEnd(DocListReader *pReader){ - return pReader->pDoclist==0 || (pReader->p >= docListEnd(pReader->pDoclist)); -} - -/* Peek at the next docid without advancing the read pointer. -*/ -static sqlite_int64 peekDocid(DocListReader *pReader){ - sqlite_int64 ret; - assert( !atEnd(pReader) ); - assert( pReader->iLastPos==-1 ); - getVarint(pReader->p, &ret); - return ret; -} - -/* Read the next docid. See also nextDocid(). -*/ -static sqlite_int64 readDocid(DocListReader *pReader){ - sqlite_int64 ret; - assert( !atEnd(pReader) ); - assert( pReader->iLastPos==-1 ); - pReader->p += getVarint(pReader->p, &ret); - if( pReader->pDoclist->iType>=DL_POSITIONS ){ - pReader->iLastColumn = 0; - pReader->iLastPos = 0; - } - return ret; -} - -/* Read the next position and column index from a position list. - * Returns the position, or -1 at the end of the list. */ -static int readPosition(DocListReader *pReader, int *iColumn){ - int i; - int iType = pReader->pDoclist->iType; - - if( pReader->iLastPos==-1 ){ - return -1; - } - assert( !atEnd(pReader) ); - - if( iTypep += getVarint32(pReader->p, &i); - if( i==POS_END ){ - pReader->iLastColumn = pReader->iLastPos = -1; - *iColumn = -1; - return -1; - } - if( i==POS_COLUMN ){ - pReader->p += getVarint32(pReader->p, &pReader->iLastColumn); - pReader->iLastPos = 0; - pReader->p += getVarint32(pReader->p, &i); - assert( i>=POS_BASE ); - } - pReader->iLastPos += ((int) i)-POS_BASE; - if( iType>=DL_POSITIONS_OFFSETS ){ - /* Skip over offsets, ignoring them for now. */ - int iStart, iEnd; - pReader->p += getVarint32(pReader->p, &iStart); - pReader->p += getVarint32(pReader->p, &iEnd); - } - *iColumn = pReader->iLastColumn; - return pReader->iLastPos; -} - -/* Skip past the end of a position list. */ -static void skipPositionList(DocListReader *pReader){ - DocList *p = pReader->pDoclist; - if( p && p->iType>=DL_POSITIONS ){ - int iColumn; - while( readPosition(pReader, &iColumn)!=-1 ){} - } -} - -/* Skip over a docid, including its position list if the doclist has - * positions. */ -static void skipDocument(DocListReader *pReader){ - readDocid(pReader); - skipPositionList(pReader); -} - -/* Skip past all docids which are less than [iDocid]. Returns 1 if a docid - * matching [iDocid] was found. */ -static int skipToDocid(DocListReader *pReader, sqlite_int64 iDocid){ - sqlite_int64 d = 0; - while( !atEnd(pReader) && (d=peekDocid(pReader))iType>=DL_POSITIONS ){ - int iPos, iCol; - const char *zDiv = ""; - printf("("); - while( (iPos = readPosition(&r, &iCol))>=0 ){ - printf("%s%d:%d", zDiv, iCol, iPos); - zDiv = ":"; - } - printf(")"); - } - } - printf("\n"); - fflush(stdout); -} -#endif /* SQLITE_DEBUG */ - -/* Trim the given doclist to contain only positions in column - * [iRestrictColumn]. */ -static void docListRestrictColumn(DocList *in, int iRestrictColumn){ - DocListReader r; - DocList out; - - assert( in->iType>=DL_POSITIONS ); - readerInit(&r, in); - docListInit(&out, DL_POSITIONS, NULL, 0); - - while( !atEnd(&r) ){ - sqlite_int64 iDocid = readDocid(&r); - int iPos, iColumn; - - docListAddDocid(&out, iDocid); - while( (iPos = readPosition(&r, &iColumn)) != -1 ){ - if( iColumn==iRestrictColumn ){ - docListAddPos(&out, iColumn, iPos); - } - } - } - - docListDestroy(in); - *in = out; -} - -/* Trim the given doclist by discarding any docids without any remaining - * positions. */ -static void docListDiscardEmpty(DocList *in) { - DocListReader r; - DocList out; - - /* TODO: It would be nice to implement this operation in place; that - * could save a significant amount of memory in queries with long doclists. */ - assert( in->iType>=DL_POSITIONS ); - readerInit(&r, in); - docListInit(&out, DL_POSITIONS, NULL, 0); - - while( !atEnd(&r) ){ - sqlite_int64 iDocid = readDocid(&r); - int match = 0; - int iPos, iColumn; - while( (iPos = readPosition(&r, &iColumn)) != -1 ){ - if( !match ){ - docListAddDocid(&out, iDocid); - match = 1; - } - docListAddPos(&out, iColumn, iPos); - } - } - - docListDestroy(in); - *in = out; -} - -/* Helper function for docListUpdate() and docListAccumulate(). -** Splices a doclist element into the doclist represented by r, -** leaving r pointing after the newly spliced element. -*/ -static void docListSpliceElement(DocListReader *r, sqlite_int64 iDocid, - const char *pSource, int nSource){ - DocList *d = r->pDoclist; - char *pTarget; - int nTarget, found; - - found = skipToDocid(r, iDocid); - - /* Describe slice in d to place pSource/nSource. */ - pTarget = r->p; - if( found ){ - skipDocument(r); - nTarget = r->p-pTarget; - }else{ - nTarget = 0; - } - - /* The sense of the following is that there are three possibilities. - ** If nTarget==nSource, we should not move any memory nor realloc. - ** If nTarget>nSource, trim target and realloc. - ** If nTargetnSource ){ - memmove(pTarget+nSource, pTarget+nTarget, docListEnd(d)-(pTarget+nTarget)); - } - if( nTarget!=nSource ){ - int iDoclist = pTarget-d->pData; - d->pData = realloc(d->pData, d->nData+nSource-nTarget); - pTarget = d->pData+iDoclist; - } - if( nTargetnData += nSource-nTarget; - r->p = pTarget+nSource; -} - -/* Insert/update pUpdate into the doclist. */ -static void docListUpdate(DocList *d, DocList *pUpdate){ - DocListReader reader; - - assert( d!=NULL && pUpdate!=NULL ); - assert( d->iType==pUpdate->iType); - - readerInit(&reader, d); - docListSpliceElement(&reader, firstDocid(pUpdate), - pUpdate->pData, pUpdate->nData); -} - -/* Propagate elements from pUpdate to pAcc, overwriting elements with -** matching docids. -*/ -static void docListAccumulate(DocList *pAcc, DocList *pUpdate){ - DocListReader accReader, updateReader; - - /* Handle edge cases where one doclist is empty. */ - assert( pAcc!=NULL ); - if( pUpdate==NULL || pUpdate->nData==0 ) return; - if( pAcc->nData==0 ){ - pAcc->pData = malloc(pUpdate->nData); - memcpy(pAcc->pData, pUpdate->pData, pUpdate->nData); - pAcc->nData = pUpdate->nData; - return; - } - - readerInit(&accReader, pAcc); - readerInit(&updateReader, pUpdate); - - while( !atEnd(&updateReader) ){ - char *pSource = updateReader.p; - sqlite_int64 iDocid = readDocid(&updateReader); - skipPositionList(&updateReader); - docListSpliceElement(&accReader, iDocid, pSource, updateReader.p-pSource); - } -} - -/* -** Read the next docid off of pIn. Return 0 if we reach the end. -* -* TODO: This assumes that docids are never 0, but they may actually be 0 since -* users can choose docids when inserting into a full-text table. Fix this. -*/ -static sqlite_int64 nextDocid(DocListReader *pIn){ - skipPositionList(pIn); - return atEnd(pIn) ? 0 : readDocid(pIn); -} - -/* -** pLeft and pRight are two DocListReaders that are pointing to -** positions lists of the same document: iDocid. -** -** If there are no instances in pLeft or pRight where the position -** of pLeft is one less than the position of pRight, then this -** routine adds nothing to pOut. -** -** If there are one or more instances where positions from pLeft -** are exactly one less than positions from pRight, then add a new -** document record to pOut. If pOut wants to hold positions, then -** include the positions from pRight that are one more than a -** position in pLeft. In other words: pRight.iPos==pLeft.iPos+1. -** -** pLeft and pRight are left pointing at the next document record. -*/ -static void mergePosList( - DocListReader *pLeft, /* Left position list */ - DocListReader *pRight, /* Right position list */ - sqlite_int64 iDocid, /* The docid from pLeft and pRight */ - DocList *pOut /* Write the merged document record here */ -){ - int iLeftCol, iLeftPos = readPosition(pLeft, &iLeftCol); - int iRightCol, iRightPos = readPosition(pRight, &iRightCol); - int match = 0; - - /* Loop until we've reached the end of both position lists. */ - while( iLeftPos!=-1 && iRightPos!=-1 ){ - if( iLeftCol==iRightCol && iLeftPos+1==iRightPos ){ - if( !match ){ - docListAddDocid(pOut, iDocid); - match = 1; - } - if( pOut->iType>=DL_POSITIONS ){ - docListAddPos(pOut, iRightCol, iRightPos); - } - iLeftPos = readPosition(pLeft, &iLeftCol); - iRightPos = readPosition(pRight, &iRightCol); - }else if( iRightCol=0 ) skipPositionList(pLeft); - if( iRightPos>=0 ) skipPositionList(pRight); -} - -/* We have two doclists: pLeft and pRight. -** Write the phrase intersection of these two doclists into pOut. -** -** A phrase intersection means that two documents only match -** if pLeft.iPos+1==pRight.iPos. -** -** The output pOut may or may not contain positions. If pOut -** does contain positions, they are the positions of pRight. -*/ -static void docListPhraseMerge( - DocList *pLeft, /* Doclist resulting from the words on the left */ - DocList *pRight, /* Doclist for the next word to the right */ - DocList *pOut /* Write the combined doclist here */ -){ - DocListReader left, right; - sqlite_int64 docidLeft, docidRight; - - readerInit(&left, pLeft); - readerInit(&right, pRight); - docidLeft = nextDocid(&left); - docidRight = nextDocid(&right); - - while( docidLeft>0 && docidRight>0 ){ - if( docidLeftiType0 && docidRight>0 ){ - if( docidLeft0 && docidRight>0 ){ - if( docidLeft<=docidRight ){ - docListAddDocid(pOut, docidLeft); - }else{ - docListAddDocid(pOut, docidRight); - } - priorLeft = docidLeft; - if( docidLeft<=docidRight ){ - docidLeft = nextDocid(&left); - } - if( docidRight>0 && docidRight<=priorLeft ){ - docidRight = nextDocid(&right); - } - } - while( docidLeft>0 ){ - docListAddDocid(pOut, docidLeft); - docidLeft = nextDocid(&left); - } - while( docidRight>0 ){ - docListAddDocid(pOut, docidRight); - docidRight = nextDocid(&right); - } -} - -/* We have two doclists: pLeft and pRight. -** Write into pOut all documents that occur in pLeft but not -** in pRight. -** -** Only docids are matched. Position information is ignored. -** -** The output pOut never holds positions. -*/ -static void docListExceptMerge( - DocList *pLeft, /* Doclist resulting from the words on the left */ - DocList *pRight, /* Doclist for the next word to the right */ - DocList *pOut /* Write the combined doclist here */ -){ - DocListReader left, right; - sqlite_int64 docidLeft, docidRight, priorLeft; - - readerInit(&left, pLeft); - readerInit(&right, pRight); - docidLeft = nextDocid(&left); - docidRight = nextDocid(&right); - - while( docidLeft>0 && docidRight>0 ){ - priorLeft = docidLeft; - if( docidLeft0 && docidRight<=priorLeft ){ - docidRight = nextDocid(&right); - } - } - while( docidLeft>0 ){ - docListAddDocid(pOut, docidLeft); - docidLeft = nextDocid(&left); - } -} - -static char *string_dup_n(const char *s, int n){ - char *str = malloc(n + 1); - memcpy(str, s, n); - str[n] = '\0'; - return str; -} - -/* Duplicate a string; the caller must free() the returned string. - * (We don't use strdup() since it is not part of the standard C library and - * may not be available everywhere.) */ -static char *string_dup(const char *s){ - return string_dup_n(s, strlen(s)); -} - -/* Format a string, replacing each occurrence of the % character with - * zDb.zName. This may be more convenient than sqlite_mprintf() - * when one string is used repeatedly in a format string. - * The caller must free() the returned string. */ -static char *string_format(const char *zFormat, - const char *zDb, const char *zName){ - const char *p; - size_t len = 0; - size_t nDb = strlen(zDb); - size_t nName = strlen(zName); - size_t nFullTableName = nDb+1+nName; - char *result; - char *r; - - /* first compute length needed */ - for(p = zFormat ; *p ; ++p){ - len += (*p=='%' ? nFullTableName : 1); - } - len += 1; /* for null terminator */ - - r = result = malloc(len); - for(p = zFormat; *p; ++p){ - if( *p=='%' ){ - memcpy(r, zDb, nDb); - r += nDb; - *r++ = '.'; - memcpy(r, zName, nName); - r += nName; - } else { - *r++ = *p; - } - } - *r++ = '\0'; - assert( r == result + len ); - return result; -} - -static int sql_exec(sqlite3 *db, const char *zDb, const char *zName, - const char *zFormat){ - char *zCommand = string_format(zFormat, zDb, zName); - int rc; - TRACE(("FTS1 sql: %s\n", zCommand)); - rc = sqlite3_exec(db, zCommand, NULL, 0, NULL); - free(zCommand); - return rc; -} - -static int sql_prepare(sqlite3 *db, const char *zDb, const char *zName, - sqlite3_stmt **ppStmt, const char *zFormat){ - char *zCommand = string_format(zFormat, zDb, zName); - int rc; - TRACE(("FTS1 prepare: %s\n", zCommand)); - rc = sqlite3_prepare(db, zCommand, -1, ppStmt, NULL); - free(zCommand); - return rc; -} - -/* end utility functions */ - -/* Forward reference */ -typedef struct fulltext_vtab fulltext_vtab; - -/* A single term in a query is represented by an instances of -** the following structure. -*/ -typedef struct QueryTerm { - short int nPhrase; /* How many following terms are part of the same phrase */ - short int iPhrase; /* This is the i-th term of a phrase. */ - short int iColumn; /* Column of the index that must match this term */ - signed char isOr; /* this term is preceded by "OR" */ - signed char isNot; /* this term is preceded by "-" */ - char *pTerm; /* text of the term. '\000' terminated. malloced */ - int nTerm; /* Number of bytes in pTerm[] */ -} QueryTerm; - - -/* A query string is parsed into a Query structure. - * - * We could, in theory, allow query strings to be complicated - * nested expressions with precedence determined by parentheses. - * But none of the major search engines do this. (Perhaps the - * feeling is that an parenthesized expression is two complex of - * an idea for the average user to grasp.) Taking our lead from - * the major search engines, we will allow queries to be a list - * of terms (with an implied AND operator) or phrases in double-quotes, - * with a single optional "-" before each non-phrase term to designate - * negation and an optional OR connector. - * - * OR binds more tightly than the implied AND, which is what the - * major search engines seem to do. So, for example: - * - * [one two OR three] ==> one AND (two OR three) - * [one OR two three] ==> (one OR two) AND three - * - * A "-" before a term matches all entries that lack that term. - * The "-" must occur immediately before the term with in intervening - * space. This is how the search engines do it. - * - * A NOT term cannot be the right-hand operand of an OR. If this - * occurs in the query string, the NOT is ignored: - * - * [one OR -two] ==> one OR two - * - */ -typedef struct Query { - fulltext_vtab *pFts; /* The full text index */ - int nTerms; /* Number of terms in the query */ - QueryTerm *pTerms; /* Array of terms. Space obtained from malloc() */ - int nextIsOr; /* Set the isOr flag on the next inserted term */ - int nextColumn; /* Next word parsed must be in this column */ - int dfltColumn; /* The default column */ -} Query; - - -/* -** An instance of the following structure keeps track of generated -** matching-word offset information and snippets. -*/ -typedef struct Snippet { - int nMatch; /* Total number of matches */ - int nAlloc; /* Space allocated for aMatch[] */ - struct snippetMatch { /* One entry for each matching term */ - char snStatus; /* Status flag for use while constructing snippets */ - short int iCol; /* The column that contains the match */ - short int iTerm; /* The index in Query.pTerms[] of the matching term */ - short int nByte; /* Number of bytes in the term */ - int iStart; /* The offset to the first character of the term */ - } *aMatch; /* Points to space obtained from malloc */ - char *zOffset; /* Text rendering of aMatch[] */ - int nOffset; /* strlen(zOffset) */ - char *zSnippet; /* Snippet text */ - int nSnippet; /* strlen(zSnippet) */ -} Snippet; - - -typedef enum QueryType { - QUERY_GENERIC, /* table scan */ - QUERY_ROWID, /* lookup by rowid */ - QUERY_FULLTEXT /* QUERY_FULLTEXT + [i] is a full-text search for column i*/ -} QueryType; - -/* TODO(shess) CHUNK_MAX controls how much data we allow in segment 0 -** before we start aggregating into larger segments. Lower CHUNK_MAX -** means that for a given input we have more individual segments per -** term, which means more rows in the table and a bigger index (due to -** both more rows and bigger rowids). But it also reduces the average -** cost of adding new elements to the segment 0 doclist, and it seems -** to reduce the number of pages read and written during inserts. 256 -** was chosen by measuring insertion times for a certain input (first -** 10k documents of Enron corpus), though including query performance -** in the decision may argue for a larger value. -*/ -#define CHUNK_MAX 256 - -typedef enum fulltext_statement { - CONTENT_INSERT_STMT, - CONTENT_SELECT_STMT, - CONTENT_UPDATE_STMT, - CONTENT_DELETE_STMT, - - TERM_SELECT_STMT, - TERM_SELECT_ALL_STMT, - TERM_INSERT_STMT, - TERM_UPDATE_STMT, - TERM_DELETE_STMT, - - MAX_STMT /* Always at end! */ -} fulltext_statement; - -/* These must exactly match the enum above. */ -/* TODO(adam): Is there some risk that a statement (in particular, -** pTermSelectStmt) will be used in two cursors at once, e.g. if a -** query joins a virtual table to itself? If so perhaps we should -** move some of these to the cursor object. -*/ -static const char *const fulltext_zStatement[MAX_STMT] = { - /* CONTENT_INSERT */ NULL, /* generated in contentInsertStatement() */ - /* CONTENT_SELECT */ "select * from %_content where rowid = ?", - /* CONTENT_UPDATE */ NULL, /* generated in contentUpdateStatement() */ - /* CONTENT_DELETE */ "delete from %_content where rowid = ?", - - /* TERM_SELECT */ - "select rowid, doclist from %_term where term = ? and segment = ?", - /* TERM_SELECT_ALL */ - "select doclist from %_term where term = ? order by segment", - /* TERM_INSERT */ - "insert into %_term (rowid, term, segment, doclist) values (?, ?, ?, ?)", - /* TERM_UPDATE */ "update %_term set doclist = ? where rowid = ?", - /* TERM_DELETE */ "delete from %_term where rowid = ?", -}; - -/* -** A connection to a fulltext index is an instance of the following -** structure. The xCreate and xConnect methods create an instance -** of this structure and xDestroy and xDisconnect free that instance. -** All other methods receive a pointer to the structure as one of their -** arguments. -*/ -struct fulltext_vtab { - sqlite3_vtab base; /* Base class used by SQLite core */ - sqlite3 *db; /* The database connection */ - const char *zDb; /* logical database name */ - const char *zName; /* virtual table name */ - int nColumn; /* number of columns in virtual table */ - char **azColumn; /* column names. malloced */ - char **azContentColumn; /* column names in content table; malloced */ - sqlite3_tokenizer *pTokenizer; /* tokenizer for inserts and queries */ - - /* Precompiled statements which we keep as long as the table is - ** open. - */ - sqlite3_stmt *pFulltextStatements[MAX_STMT]; -}; - -/* -** When the core wants to do a query, it create a cursor using a -** call to xOpen. This structure is an instance of a cursor. It -** is destroyed by xClose. -*/ -typedef struct fulltext_cursor { - sqlite3_vtab_cursor base; /* Base class used by SQLite core */ - QueryType iCursorType; /* Copy of sqlite3_index_info.idxNum */ - sqlite3_stmt *pStmt; /* Prepared statement in use by the cursor */ - int eof; /* True if at End Of Results */ - Query q; /* Parsed query string */ - Snippet snippet; /* Cached snippet for the current row */ - int iColumn; /* Column being searched */ - DocListReader result; /* used when iCursorType == QUERY_FULLTEXT */ -} fulltext_cursor; - -static struct fulltext_vtab *cursor_vtab(fulltext_cursor *c){ - return (fulltext_vtab *) c->base.pVtab; -} - -static const sqlite3_module fulltextModule; /* forward declaration */ - -/* Append a list of strings separated by commas to a StringBuffer. */ -static void appendList(StringBuffer *sb, int nString, char **azString){ - int i; - for(i=0; i0 ) append(sb, ", "); - append(sb, azString[i]); - } -} - -/* Return a dynamically generated statement of the form - * insert into %_content (rowid, ...) values (?, ...) - */ -static const char *contentInsertStatement(fulltext_vtab *v){ - StringBuffer sb; - int i; - - initStringBuffer(&sb); - append(&sb, "insert into %_content (rowid, "); - appendList(&sb, v->nColumn, v->azContentColumn); - append(&sb, ") values (?"); - for(i=0; inColumn; ++i) - append(&sb, ", ?"); - append(&sb, ")"); - return sb.s; -} - -/* Return a dynamically generated statement of the form - * update %_content set [col_0] = ?, [col_1] = ?, ... - * where rowid = ? - */ -static const char *contentUpdateStatement(fulltext_vtab *v){ - StringBuffer sb; - int i; - - initStringBuffer(&sb); - append(&sb, "update %_content set "); - for(i=0; inColumn; ++i) { - if( i>0 ){ - append(&sb, ", "); - } - append(&sb, v->azContentColumn[i]); - append(&sb, " = ?"); - } - append(&sb, " where rowid = ?"); - return sb.s; -} - -/* Puts a freshly-prepared statement determined by iStmt in *ppStmt. -** If the indicated statement has never been prepared, it is prepared -** and cached, otherwise the cached version is reset. -*/ -static int sql_get_statement(fulltext_vtab *v, fulltext_statement iStmt, - sqlite3_stmt **ppStmt){ - assert( iStmtpFulltextStatements[iStmt]==NULL ){ - const char *zStmt; - int rc; - switch( iStmt ){ - case CONTENT_INSERT_STMT: - zStmt = contentInsertStatement(v); break; - case CONTENT_UPDATE_STMT: - zStmt = contentUpdateStatement(v); break; - default: - zStmt = fulltext_zStatement[iStmt]; - } - rc = sql_prepare(v->db, v->zDb, v->zName, &v->pFulltextStatements[iStmt], - zStmt); - if( zStmt != fulltext_zStatement[iStmt]) free((void *) zStmt); - if( rc!=SQLITE_OK ) return rc; - } else { - int rc = sqlite3_reset(v->pFulltextStatements[iStmt]); - if( rc!=SQLITE_OK ) return rc; - } - - *ppStmt = v->pFulltextStatements[iStmt]; - return SQLITE_OK; -} - -/* Step the indicated statement, handling errors SQLITE_BUSY (by -** retrying) and SQLITE_SCHEMA (by re-preparing and transferring -** bindings to the new statement). -** TODO(adam): We should extend this function so that it can work with -** statements declared locally, not only globally cached statements. -*/ -static int sql_step_statement(fulltext_vtab *v, fulltext_statement iStmt, - sqlite3_stmt **ppStmt){ - int rc; - sqlite3_stmt *s = *ppStmt; - assert( iStmtpFulltextStatements[iStmt] ); - - while( (rc=sqlite3_step(s))!=SQLITE_DONE && rc!=SQLITE_ROW ){ - if( rc==SQLITE_BUSY ) continue; - if( rc!=SQLITE_ERROR ) return rc; - - /* If an SQLITE_SCHEMA error has occurred, then finalizing this - * statement is going to delete the fulltext_vtab structure. If - * the statement just executed is in the pFulltextStatements[] - * array, it will be finalized twice. So remove it before - * calling sqlite3_finalize(). - */ - v->pFulltextStatements[iStmt] = NULL; - rc = sqlite3_finalize(s); - break; - } - return rc; - - err: - sqlite3_finalize(s); - return rc; -} - -/* Like sql_step_statement(), but convert SQLITE_DONE to SQLITE_OK. -** Useful for statements like UPDATE, where we expect no results. -*/ -static int sql_single_step_statement(fulltext_vtab *v, - fulltext_statement iStmt, - sqlite3_stmt **ppStmt){ - int rc = sql_step_statement(v, iStmt, ppStmt); - return (rc==SQLITE_DONE) ? SQLITE_OK : rc; -} - -/* insert into %_content (rowid, ...) values ([rowid], [pValues]) */ -static int content_insert(fulltext_vtab *v, sqlite3_value *rowid, - sqlite3_value **pValues){ - sqlite3_stmt *s; - int i; - int rc = sql_get_statement(v, CONTENT_INSERT_STMT, &s); - if( rc!=SQLITE_OK ) return rc; - - rc = sqlite3_bind_value(s, 1, rowid); - if( rc!=SQLITE_OK ) return rc; - - for(i=0; inColumn; ++i){ - rc = sqlite3_bind_value(s, 2+i, pValues[i]); - if( rc!=SQLITE_OK ) return rc; - } - - return sql_single_step_statement(v, CONTENT_INSERT_STMT, &s); -} - -/* update %_content set col0 = pValues[0], col1 = pValues[1], ... - * where rowid = [iRowid] */ -static int content_update(fulltext_vtab *v, sqlite3_value **pValues, - sqlite_int64 iRowid){ - sqlite3_stmt *s; - int i; - int rc = sql_get_statement(v, CONTENT_UPDATE_STMT, &s); - if( rc!=SQLITE_OK ) return rc; - - for(i=0; inColumn; ++i){ - rc = sqlite3_bind_value(s, 1+i, pValues[i]); - if( rc!=SQLITE_OK ) return rc; - } - - rc = sqlite3_bind_int64(s, 1+v->nColumn, iRowid); - if( rc!=SQLITE_OK ) return rc; - - return sql_single_step_statement(v, CONTENT_UPDATE_STMT, &s); -} - -static void freeStringArray(int nString, const char **pString){ - int i; - - for (i=0 ; i < nString ; ++i) { - if( pString[i]!=NULL ) free((void *) pString[i]); - } - free((void *) pString); -} - -/* select * from %_content where rowid = [iRow] - * The caller must delete the returned array and all strings in it. - * null fields will be NULL in the returned array. - * - * TODO: Perhaps we should return pointer/length strings here for consistency - * with other code which uses pointer/length. */ -static int content_select(fulltext_vtab *v, sqlite_int64 iRow, - const char ***pValues){ - sqlite3_stmt *s; - const char **values; - int i; - int rc; - - *pValues = NULL; - - rc = sql_get_statement(v, CONTENT_SELECT_STMT, &s); - if( rc!=SQLITE_OK ) return rc; - - rc = sqlite3_bind_int64(s, 1, iRow); - if( rc!=SQLITE_OK ) return rc; - - rc = sql_step_statement(v, CONTENT_SELECT_STMT, &s); - if( rc!=SQLITE_ROW ) return rc; - - values = (const char **) malloc(v->nColumn * sizeof(const char *)); - for(i=0; inColumn; ++i){ - if( sqlite3_column_type(s, i)==SQLITE_NULL ){ - values[i] = NULL; - }else{ - values[i] = string_dup((char*)sqlite3_column_text(s, i)); - } - } - - /* We expect only one row. We must execute another sqlite3_step() - * to complete the iteration; otherwise the table will remain locked. */ - rc = sqlite3_step(s); - if( rc==SQLITE_DONE ){ - *pValues = values; - return SQLITE_OK; - } - - freeStringArray(v->nColumn, values); - return rc; -} - -/* delete from %_content where rowid = [iRow ] */ -static int content_delete(fulltext_vtab *v, sqlite_int64 iRow){ - sqlite3_stmt *s; - int rc = sql_get_statement(v, CONTENT_DELETE_STMT, &s); - if( rc!=SQLITE_OK ) return rc; - - rc = sqlite3_bind_int64(s, 1, iRow); - if( rc!=SQLITE_OK ) return rc; - - return sql_single_step_statement(v, CONTENT_DELETE_STMT, &s); -} - -/* select rowid, doclist from %_term - * where term = [pTerm] and segment = [iSegment] - * If found, returns SQLITE_ROW; the caller must free the - * returned doclist. If no rows found, returns SQLITE_DONE. */ -static int term_select(fulltext_vtab *v, const char *pTerm, int nTerm, - int iSegment, - sqlite_int64 *rowid, DocList *out){ - sqlite3_stmt *s; - int rc = sql_get_statement(v, TERM_SELECT_STMT, &s); - if( rc!=SQLITE_OK ) return rc; - - rc = sqlite3_bind_text(s, 1, pTerm, nTerm, SQLITE_STATIC); - if( rc!=SQLITE_OK ) return rc; - - rc = sqlite3_bind_int(s, 2, iSegment); - if( rc!=SQLITE_OK ) return rc; - - rc = sql_step_statement(v, TERM_SELECT_STMT, &s); - if( rc!=SQLITE_ROW ) return rc; - - *rowid = sqlite3_column_int64(s, 0); - docListInit(out, DL_DEFAULT, - sqlite3_column_blob(s, 1), sqlite3_column_bytes(s, 1)); - - /* We expect only one row. We must execute another sqlite3_step() - * to complete the iteration; otherwise the table will remain locked. */ - rc = sqlite3_step(s); - return rc==SQLITE_DONE ? SQLITE_ROW : rc; -} - -/* Load the segment doclists for term pTerm and merge them in -** appropriate order into out. Returns SQLITE_OK if successful. If -** there are no segments for pTerm, successfully returns an empty -** doclist in out. -** -** Each document consists of 1 or more "columns". The number of -** columns is v->nColumn. If iColumn==v->nColumn, then return -** position information about all columns. If iColumnnColumn, -** then only return position information about the iColumn-th column -** (where the first column is 0). -*/ -static int term_select_all( - fulltext_vtab *v, /* The fulltext index we are querying against */ - int iColumn, /* If nColumn ){ /* querying a single column */ - docListRestrictColumn(&old, iColumn); - } - - /* doclist contains the newer data, so write it over old. Then - ** steal accumulated result for doclist. - */ - docListAccumulate(&old, &doclist); - docListDestroy(&doclist); - doclist = old; - } - if( rc!=SQLITE_DONE ){ - docListDestroy(&doclist); - return rc; - } - - docListDiscardEmpty(&doclist); - *out = doclist; - return SQLITE_OK; -} - -/* insert into %_term (rowid, term, segment, doclist) - values ([piRowid], [pTerm], [iSegment], [doclist]) -** Lets sqlite select rowid if piRowid is NULL, else uses *piRowid. -** -** NOTE(shess) piRowid is IN, with values of "space of int64" plus -** null, it is not used to pass data back to the caller. -*/ -static int term_insert(fulltext_vtab *v, sqlite_int64 *piRowid, - const char *pTerm, int nTerm, - int iSegment, DocList *doclist){ - sqlite3_stmt *s; - int rc = sql_get_statement(v, TERM_INSERT_STMT, &s); - if( rc!=SQLITE_OK ) return rc; - - if( piRowid==NULL ){ - rc = sqlite3_bind_null(s, 1); - }else{ - rc = sqlite3_bind_int64(s, 1, *piRowid); - } - if( rc!=SQLITE_OK ) return rc; - - rc = sqlite3_bind_text(s, 2, pTerm, nTerm, SQLITE_STATIC); - if( rc!=SQLITE_OK ) return rc; - - rc = sqlite3_bind_int(s, 3, iSegment); - if( rc!=SQLITE_OK ) return rc; - - rc = sqlite3_bind_blob(s, 4, doclist->pData, doclist->nData, SQLITE_STATIC); - if( rc!=SQLITE_OK ) return rc; - - return sql_single_step_statement(v, TERM_INSERT_STMT, &s); -} - -/* update %_term set doclist = [doclist] where rowid = [rowid] */ -static int term_update(fulltext_vtab *v, sqlite_int64 rowid, - DocList *doclist){ - sqlite3_stmt *s; - int rc = sql_get_statement(v, TERM_UPDATE_STMT, &s); - if( rc!=SQLITE_OK ) return rc; - - rc = sqlite3_bind_blob(s, 1, doclist->pData, doclist->nData, SQLITE_STATIC); - if( rc!=SQLITE_OK ) return rc; - - rc = sqlite3_bind_int64(s, 2, rowid); - if( rc!=SQLITE_OK ) return rc; - - return sql_single_step_statement(v, TERM_UPDATE_STMT, &s); -} - -static int term_delete(fulltext_vtab *v, sqlite_int64 rowid){ - sqlite3_stmt *s; - int rc = sql_get_statement(v, TERM_DELETE_STMT, &s); - if( rc!=SQLITE_OK ) return rc; - - rc = sqlite3_bind_int64(s, 1, rowid); - if( rc!=SQLITE_OK ) return rc; - - return sql_single_step_statement(v, TERM_DELETE_STMT, &s); -} - -/* -** Free the memory used to contain a fulltext_vtab structure. -*/ -static void fulltext_vtab_destroy(fulltext_vtab *v){ - int iStmt, i; - - TRACE(("FTS1 Destroy %p\n", v)); - for( iStmt=0; iStmtpFulltextStatements[iStmt]!=NULL ){ - sqlite3_finalize(v->pFulltextStatements[iStmt]); - v->pFulltextStatements[iStmt] = NULL; - } - } - - if( v->pTokenizer!=NULL ){ - v->pTokenizer->pModule->xDestroy(v->pTokenizer); - v->pTokenizer = NULL; - } - - free(v->azColumn); - for(i = 0; i < v->nColumn; ++i) { - sqlite3_free(v->azContentColumn[i]); - } - free(v->azContentColumn); - free(v); -} - -/* -** Token types for parsing the arguments to xConnect or xCreate. -*/ -#define TOKEN_EOF 0 /* End of file */ -#define TOKEN_SPACE 1 /* Any kind of whitespace */ -#define TOKEN_ID 2 /* An identifier */ -#define TOKEN_STRING 3 /* A string literal */ -#define TOKEN_PUNCT 4 /* A single punctuation character */ - -/* -** If X is a character that can be used in an identifier then -** IdChar(X) will be true. Otherwise it is false. -** -** For ASCII, any character with the high-order bit set is -** allowed in an identifier. For 7-bit characters, -** sqlite3IsIdChar[X] must be 1. -** -** Ticket #1066. the SQL standard does not allow '$' in the -** middle of identfiers. But many SQL implementations do. -** SQLite will allow '$' in identifiers for compatibility. -** But the feature is undocumented. -*/ -static const char isIdChar[] = { -/* x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 xA xB xC xD xE xF */ - 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 2x */ - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, /* 3x */ - 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 4x */ - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, /* 5x */ - 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 6x */ - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, /* 7x */ -}; -#define IdChar(C) (((c=C)&0x80)!=0 || (c>0x1f && isIdChar[c-0x20])) - - -/* -** Return the length of the token that begins at z[0]. -** Store the token type in *tokenType before returning. -*/ -static int getToken(const char *z, int *tokenType){ - int i, c; - switch( *z ){ - case 0: { - *tokenType = TOKEN_EOF; - return 0; - } - case ' ': case '\t': case '\n': case '\f': case '\r': { - for(i=1; safe_isspace(z[i]); i++){} - *tokenType = TOKEN_SPACE; - return i; - } - case '`': - case '\'': - case '"': { - int delim = z[0]; - for(i=1; (c=z[i])!=0; i++){ - if( c==delim ){ - if( z[i+1]==delim ){ - i++; - }else{ - break; - } - } - } - *tokenType = TOKEN_STRING; - return i + (c!=0); - } - case '[': { - for(i=1, c=z[0]; c!=']' && (c=z[i])!=0; i++){} - *tokenType = TOKEN_ID; - return i; - } - default: { - if( !IdChar(*z) ){ - break; - } - for(i=1; IdChar(z[i]); i++){} - *tokenType = TOKEN_ID; - return i; - } - } - *tokenType = TOKEN_PUNCT; - return 1; -} - -/* -** A token extracted from a string is an instance of the following -** structure. -*/ -typedef struct Token { - const char *z; /* Pointer to token text. Not '\000' terminated */ - short int n; /* Length of the token text in bytes. */ -} Token; - -/* -** Given a input string (which is really one of the argv[] parameters -** passed into xConnect or xCreate) split the string up into tokens. -** Return an array of pointers to '\000' terminated strings, one string -** for each non-whitespace token. -** -** The returned array is terminated by a single NULL pointer. -** -** Space to hold the returned array is obtained from a single -** malloc and should be freed by passing the return value to free(). -** The individual strings within the token list are all a part of -** the single memory allocation and will all be freed at once. -*/ -static char **tokenizeString(const char *z, int *pnToken){ - int nToken = 0; - Token *aToken = malloc( strlen(z) * sizeof(aToken[0]) ); - int n = 1; - int e, i; - int totalSize = 0; - char **azToken; - char *zCopy; - while( n>0 ){ - n = getToken(z, &e); - if( e!=TOKEN_SPACE ){ - aToken[nToken].z = z; - aToken[nToken].n = n; - nToken++; - totalSize += n+1; - } - z += n; - } - azToken = (char**)malloc( nToken*sizeof(char*) + totalSize ); - zCopy = (char*)&azToken[nToken]; - nToken--; - for(i=0; i=0 ){ - azIn[j] = azIn[i]; - } - j++; - } - } - azIn[j] = 0; - } -} - - -/* -** Find the first alphanumeric token in the string zIn. Null-terminate -** this token. Remove any quotation marks. And return a pointer to -** the result. -*/ -static char *firstToken(char *zIn, char **pzTail){ - int n, ttype; - while(1){ - n = getToken(zIn, &ttype); - if( ttype==TOKEN_SPACE ){ - zIn += n; - }else if( ttype==TOKEN_EOF ){ - *pzTail = zIn; - return 0; - }else{ - zIn[n] = 0; - *pzTail = &zIn[1]; - dequoteString(zIn); - return zIn; - } - } - /*NOTREACHED*/ -} - -/* Return true if... -** -** * s begins with the string t, ignoring case -** * s is longer than t -** * The first character of s beyond t is not a alphanumeric -** -** Ignore leading space in *s. -** -** To put it another way, return true if the first token of -** s[] is t[]. -*/ -static int startsWith(const char *s, const char *t){ - while( safe_isspace(*s) ){ s++; } - while( *t ){ - if( safe_tolower(*s++)!=safe_tolower(*t++) ) return 0; - } - return *s!='_' && !safe_isalnum(*s); -} - -/* -** An instance of this structure defines the "spec" of a -** full text index. This structure is populated by parseSpec -** and use by fulltextConnect and fulltextCreate. -*/ -typedef struct TableSpec { - const char *zDb; /* Logical database name */ - const char *zName; /* Name of the full-text index */ - int nColumn; /* Number of columns to be indexed */ - char **azColumn; /* Original names of columns to be indexed */ - char **azContentColumn; /* Column names for %_content */ - char **azTokenizer; /* Name of tokenizer and its arguments */ -} TableSpec; - -/* -** Reclaim all of the memory used by a TableSpec -*/ -static void clearTableSpec(TableSpec *p) { - free(p->azColumn); - free(p->azContentColumn); - free(p->azTokenizer); -} - -/* Parse a CREATE VIRTUAL TABLE statement, which looks like this: - * - * CREATE VIRTUAL TABLE email - * USING fts1(subject, body, tokenize mytokenizer(myarg)) - * - * We return parsed information in a TableSpec structure. - * - */ -static int parseSpec(TableSpec *pSpec, int argc, const char *const*argv, - char**pzErr){ - int i, n; - char *z, *zDummy; - char **azArg; - const char *zTokenizer = 0; /* argv[] entry describing the tokenizer */ - - assert( argc>=3 ); - /* Current interface: - ** argv[0] - module name - ** argv[1] - database name - ** argv[2] - table name - ** argv[3..] - columns, optionally followed by tokenizer specification - ** and snippet delimiters specification. - */ - - /* Make a copy of the complete argv[][] array in a single allocation. - ** The argv[][] array is read-only and transient. We can write to the - ** copy in order to modify things and the copy is persistent. - */ - memset(pSpec, 0, sizeof(*pSpec)); - for(i=n=0; izDb = azArg[1]; - pSpec->zName = azArg[2]; - pSpec->nColumn = 0; - pSpec->azColumn = azArg; - zTokenizer = "tokenize simple"; - for(i=3; inColumn] = firstToken(azArg[i], &zDummy); - pSpec->nColumn++; - } - } - if( pSpec->nColumn==0 ){ - azArg[0] = "content"; - pSpec->nColumn = 1; - } - - /* - ** Construct the list of content column names. - ** - ** Each content column name will be of the form cNNAAAA - ** where NN is the column number and AAAA is the sanitized - ** column name. "sanitized" means that special characters are - ** converted to "_". The cNN prefix guarantees that all column - ** names are unique. - ** - ** The AAAA suffix is not strictly necessary. It is included - ** for the convenience of people who might examine the generated - ** %_content table and wonder what the columns are used for. - */ - pSpec->azContentColumn = malloc( pSpec->nColumn * sizeof(char *) ); - if( pSpec->azContentColumn==0 ){ - clearTableSpec(pSpec); - return SQLITE_NOMEM; - } - for(i=0; inColumn; i++){ - char *p; - pSpec->azContentColumn[i] = sqlite3_mprintf("c%d%s", i, azArg[i]); - for (p = pSpec->azContentColumn[i]; *p ; ++p) { - if( !safe_isalnum(*p) ) *p = '_'; - } - } - - /* - ** Parse the tokenizer specification string. - */ - pSpec->azTokenizer = tokenizeString(zTokenizer, &n); - tokenListToIdList(pSpec->azTokenizer); - - return SQLITE_OK; -} - -/* -** Generate a CREATE TABLE statement that describes the schema of -** the virtual table. Return a pointer to this schema string. -** -** Space is obtained from sqlite3_mprintf() and should be freed -** using sqlite3_free(). -*/ -static char *fulltextSchema( - int nColumn, /* Number of columns */ - const char *const* azColumn, /* List of columns */ - const char *zTableName /* Name of the table */ -){ - int i; - char *zSchema, *zNext; - const char *zSep = "("; - zSchema = sqlite3_mprintf("CREATE TABLE x"); - for(i=0; ibase */ - v->db = db; - v->zDb = spec->zDb; /* Freed when azColumn is freed */ - v->zName = spec->zName; /* Freed when azColumn is freed */ - v->nColumn = spec->nColumn; - v->azContentColumn = spec->azContentColumn; - spec->azContentColumn = 0; - v->azColumn = spec->azColumn; - spec->azColumn = 0; - - if( spec->azTokenizer==0 ){ - return SQLITE_NOMEM; - } - /* TODO(shess) For now, add new tokenizers as else if clauses. */ - if( spec->azTokenizer[0]==0 || startsWith(spec->azTokenizer[0], "simple") ){ - sqlite3Fts1SimpleTokenizerModule(&m); - }else if( startsWith(spec->azTokenizer[0], "porter") ){ - sqlite3Fts1PorterTokenizerModule(&m); - }else{ - *pzErr = sqlite3_mprintf("unknown tokenizer: %s", spec->azTokenizer[0]); - rc = SQLITE_ERROR; - goto err; - } - for(n=0; spec->azTokenizer[n]; n++){} - if( n ){ - rc = m->xCreate(n-1, (const char*const*)&spec->azTokenizer[1], - &v->pTokenizer); - }else{ - rc = m->xCreate(0, 0, &v->pTokenizer); - } - if( rc!=SQLITE_OK ) goto err; - v->pTokenizer->pModule = m; - - /* TODO: verify the existence of backing tables foo_content, foo_term */ - - schema = fulltextSchema(v->nColumn, (const char*const*)v->azColumn, - spec->zName); - rc = sqlite3_declare_vtab(db, schema); - sqlite3_free(schema); - if( rc!=SQLITE_OK ) goto err; - - memset(v->pFulltextStatements, 0, sizeof(v->pFulltextStatements)); - - *ppVTab = &v->base; - TRACE(("FTS1 Connect %p\n", v)); - - return rc; - -err: - fulltext_vtab_destroy(v); - return rc; -} - -static int fulltextConnect( - sqlite3 *db, - void *pAux, - int argc, const char *const*argv, - sqlite3_vtab **ppVTab, - char **pzErr -){ - TableSpec spec; - int rc = parseSpec(&spec, argc, argv, pzErr); - if( rc!=SQLITE_OK ) return rc; - - rc = constructVtab(db, &spec, ppVTab, pzErr); - clearTableSpec(&spec); - return rc; -} - - /* The %_content table holds the text of each document, with - ** the rowid used as the docid. - ** - ** The %_term table maps each term to a document list blob - ** containing elements sorted by ascending docid, each element - ** encoded as: - ** - ** docid varint-encoded - ** token elements: - ** position+1 varint-encoded as delta from previous position - ** start offset varint-encoded as delta from previous start offset - ** end offset varint-encoded as delta from start offset - ** - ** The sentinel position of 0 indicates the end of the token list. - ** - ** Additionally, doclist blobs are chunked into multiple segments, - ** using segment to order the segments. New elements are added to - ** the segment at segment 0, until it exceeds CHUNK_MAX. Then - ** segment 0 is deleted, and the doclist is inserted at segment 1. - ** If there is already a doclist at segment 1, the segment 0 doclist - ** is merged with it, the segment 1 doclist is deleted, and the - ** merged doclist is inserted at segment 2, repeating those - ** operations until an insert succeeds. - ** - ** Since this structure doesn't allow us to update elements in place - ** in case of deletion or update, these are simply written to - ** segment 0 (with an empty token list in case of deletion), with - ** docListAccumulate() taking care to retain lower-segment - ** information in preference to higher-segment information. - */ - /* TODO(shess) Provide a VACUUM type operation which both removes - ** deleted elements which are no longer necessary, and duplicated - ** elements. I suspect this will probably not be necessary in - ** practice, though. - */ -static int fulltextCreate(sqlite3 *db, void *pAux, - int argc, const char * const *argv, - sqlite3_vtab **ppVTab, char **pzErr){ - int rc; - TableSpec spec; - StringBuffer schema; - TRACE(("FTS1 Create\n")); - - rc = parseSpec(&spec, argc, argv, pzErr); - if( rc!=SQLITE_OK ) return rc; - - initStringBuffer(&schema); - append(&schema, "CREATE TABLE %_content("); - appendList(&schema, spec.nColumn, spec.azContentColumn); - append(&schema, ")"); - rc = sql_exec(db, spec.zDb, spec.zName, schema.s); - free(schema.s); - if( rc!=SQLITE_OK ) goto out; - - rc = sql_exec(db, spec.zDb, spec.zName, - "create table %_term(term text, segment integer, doclist blob, " - "primary key(term, segment));"); - if( rc!=SQLITE_OK ) goto out; - - rc = constructVtab(db, &spec, ppVTab, pzErr); - -out: - clearTableSpec(&spec); - return rc; -} - -/* Decide how to handle an SQL query. */ -static int fulltextBestIndex(sqlite3_vtab *pVTab, sqlite3_index_info *pInfo){ - int i; - TRACE(("FTS1 BestIndex\n")); - - for(i=0; inConstraint; ++i){ - const struct sqlite3_index_constraint *pConstraint; - pConstraint = &pInfo->aConstraint[i]; - if( pConstraint->usable ) { - if( pConstraint->iColumn==-1 && - pConstraint->op==SQLITE_INDEX_CONSTRAINT_EQ ){ - pInfo->idxNum = QUERY_ROWID; /* lookup by rowid */ - TRACE(("FTS1 QUERY_ROWID\n")); - } else if( pConstraint->iColumn>=0 && - pConstraint->op==SQLITE_INDEX_CONSTRAINT_MATCH ){ - /* full-text search */ - pInfo->idxNum = QUERY_FULLTEXT + pConstraint->iColumn; - TRACE(("FTS1 QUERY_FULLTEXT %d\n", pConstraint->iColumn)); - } else continue; - - pInfo->aConstraintUsage[i].argvIndex = 1; - pInfo->aConstraintUsage[i].omit = 1; - - /* An arbitrary value for now. - * TODO: Perhaps rowid matches should be considered cheaper than - * full-text searches. */ - pInfo->estimatedCost = 1.0; - - return SQLITE_OK; - } - } - pInfo->idxNum = QUERY_GENERIC; - return SQLITE_OK; -} - -static int fulltextDisconnect(sqlite3_vtab *pVTab){ - TRACE(("FTS1 Disconnect %p\n", pVTab)); - fulltext_vtab_destroy((fulltext_vtab *)pVTab); - return SQLITE_OK; -} - -static int fulltextDestroy(sqlite3_vtab *pVTab){ - fulltext_vtab *v = (fulltext_vtab *)pVTab; - int rc; - - TRACE(("FTS1 Destroy %p\n", pVTab)); - rc = sql_exec(v->db, v->zDb, v->zName, - "drop table if exists %_content;" - "drop table if exists %_term;" - ); - if( rc!=SQLITE_OK ) return rc; - - fulltext_vtab_destroy((fulltext_vtab *)pVTab); - return SQLITE_OK; -} - -static int fulltextOpen(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor){ - fulltext_cursor *c; - - c = (fulltext_cursor *) calloc(sizeof(fulltext_cursor), 1); - /* sqlite will initialize c->base */ - *ppCursor = &c->base; - TRACE(("FTS1 Open %p: %p\n", pVTab, c)); - - return SQLITE_OK; -} - - -/* Free all of the dynamically allocated memory held by *q -*/ -static void queryClear(Query *q){ - int i; - for(i = 0; i < q->nTerms; ++i){ - free(q->pTerms[i].pTerm); - } - free(q->pTerms); - memset(q, 0, sizeof(*q)); -} - -/* Free all of the dynamically allocated memory held by the -** Snippet -*/ -static void snippetClear(Snippet *p){ - free(p->aMatch); - free(p->zOffset); - free(p->zSnippet); - memset(p, 0, sizeof(*p)); -} -/* -** Append a single entry to the p->aMatch[] log. -*/ -static void snippetAppendMatch( - Snippet *p, /* Append the entry to this snippet */ - int iCol, int iTerm, /* The column and query term */ - int iStart, int nByte /* Offset and size of the match */ -){ - int i; - struct snippetMatch *pMatch; - if( p->nMatch+1>=p->nAlloc ){ - p->nAlloc = p->nAlloc*2 + 10; - p->aMatch = realloc(p->aMatch, p->nAlloc*sizeof(p->aMatch[0]) ); - if( p->aMatch==0 ){ - p->nMatch = 0; - p->nAlloc = 0; - return; - } - } - i = p->nMatch++; - pMatch = &p->aMatch[i]; - pMatch->iCol = iCol; - pMatch->iTerm = iTerm; - pMatch->iStart = iStart; - pMatch->nByte = nByte; -} - -/* -** Sizing information for the circular buffer used in snippetOffsetsOfColumn() -*/ -#define FTS1_ROTOR_SZ (32) -#define FTS1_ROTOR_MASK (FTS1_ROTOR_SZ-1) - -/* -** Add entries to pSnippet->aMatch[] for every match that occurs against -** document zDoc[0..nDoc-1] which is stored in column iColumn. -*/ -static void snippetOffsetsOfColumn( - Query *pQuery, - Snippet *pSnippet, - int iColumn, - const char *zDoc, - int nDoc -){ - const sqlite3_tokenizer_module *pTModule; /* The tokenizer module */ - sqlite3_tokenizer *pTokenizer; /* The specific tokenizer */ - sqlite3_tokenizer_cursor *pTCursor; /* Tokenizer cursor */ - fulltext_vtab *pVtab; /* The full text index */ - int nColumn; /* Number of columns in the index */ - const QueryTerm *aTerm; /* Query string terms */ - int nTerm; /* Number of query string terms */ - int i, j; /* Loop counters */ - int rc; /* Return code */ - unsigned int match, prevMatch; /* Phrase search bitmasks */ - const char *zToken; /* Next token from the tokenizer */ - int nToken; /* Size of zToken */ - int iBegin, iEnd, iPos; /* Offsets of beginning and end */ - - /* The following variables keep a circular buffer of the last - ** few tokens */ - unsigned int iRotor = 0; /* Index of current token */ - int iRotorBegin[FTS1_ROTOR_SZ]; /* Beginning offset of token */ - int iRotorLen[FTS1_ROTOR_SZ]; /* Length of token */ - - pVtab = pQuery->pFts; - nColumn = pVtab->nColumn; - pTokenizer = pVtab->pTokenizer; - pTModule = pTokenizer->pModule; - rc = pTModule->xOpen(pTokenizer, zDoc, nDoc, &pTCursor); - if( rc ) return; - pTCursor->pTokenizer = pTokenizer; - aTerm = pQuery->pTerms; - nTerm = pQuery->nTerms; - if( nTerm>=FTS1_ROTOR_SZ ){ - nTerm = FTS1_ROTOR_SZ - 1; - } - prevMatch = 0; - while(1){ - rc = pTModule->xNext(pTCursor, &zToken, &nToken, &iBegin, &iEnd, &iPos); - if( rc ) break; - iRotorBegin[iRotor&FTS1_ROTOR_MASK] = iBegin; - iRotorLen[iRotor&FTS1_ROTOR_MASK] = iEnd-iBegin; - match = 0; - for(i=0; i=0 && iCol1 && (prevMatch & (1<=0; j--){ - int k = (iRotor-j) & FTS1_ROTOR_MASK; - snippetAppendMatch(pSnippet, iColumn, i-j, - iRotorBegin[k], iRotorLen[k]); - } - } - } - prevMatch = match<<1; - iRotor++; - } - pTModule->xClose(pTCursor); -} - - -/* -** Compute all offsets for the current row of the query. -** If the offsets have already been computed, this routine is a no-op. -*/ -static void snippetAllOffsets(fulltext_cursor *p){ - int nColumn; - int iColumn, i; - int iFirst, iLast; - fulltext_vtab *pFts; - - if( p->snippet.nMatch ) return; - if( p->q.nTerms==0 ) return; - pFts = p->q.pFts; - nColumn = pFts->nColumn; - iColumn = p->iCursorType - QUERY_FULLTEXT; - if( iColumn<0 || iColumn>=nColumn ){ - iFirst = 0; - iLast = nColumn-1; - }else{ - iFirst = iColumn; - iLast = iColumn; - } - for(i=iFirst; i<=iLast; i++){ - const char *zDoc; - int nDoc; - zDoc = (const char*)sqlite3_column_text(p->pStmt, i+1); - nDoc = sqlite3_column_bytes(p->pStmt, i+1); - snippetOffsetsOfColumn(&p->q, &p->snippet, i, zDoc, nDoc); - } -} - -/* -** Convert the information in the aMatch[] array of the snippet -** into the string zOffset[0..nOffset-1]. -*/ -static void snippetOffsetText(Snippet *p){ - int i; - int cnt = 0; - StringBuffer sb; - char zBuf[200]; - if( p->zOffset ) return; - initStringBuffer(&sb); - for(i=0; inMatch; i++){ - struct snippetMatch *pMatch = &p->aMatch[i]; - zBuf[0] = ' '; - sqlite3_snprintf(sizeof(zBuf)-1, &zBuf[cnt>0], "%d %d %d %d", - pMatch->iCol, pMatch->iTerm, pMatch->iStart, pMatch->nByte); - append(&sb, zBuf); - cnt++; - } - p->zOffset = sb.s; - p->nOffset = sb.len; -} - -/* -** zDoc[0..nDoc-1] is phrase of text. aMatch[0..nMatch-1] are a set -** of matching words some of which might be in zDoc. zDoc is column -** number iCol. -** -** iBreak is suggested spot in zDoc where we could begin or end an -** excerpt. Return a value similar to iBreak but possibly adjusted -** to be a little left or right so that the break point is better. -*/ -static int wordBoundary( - int iBreak, /* The suggested break point */ - const char *zDoc, /* Document text */ - int nDoc, /* Number of bytes in zDoc[] */ - struct snippetMatch *aMatch, /* Matching words */ - int nMatch, /* Number of entries in aMatch[] */ - int iCol /* The column number for zDoc[] */ -){ - int i; - if( iBreak<=10 ){ - return 0; - } - if( iBreak>=nDoc-10 ){ - return nDoc; - } - for(i=0; i0 && aMatch[i-1].iStart+aMatch[i-1].nByte>=iBreak ){ - return aMatch[i-1].iStart; - } - } - for(i=1; i<=10; i++){ - if( safe_isspace(zDoc[iBreak-i]) ){ - return iBreak - i + 1; - } - if( safe_isspace(zDoc[iBreak+i]) ){ - return iBreak + i + 1; - } - } - return iBreak; -} - -/* -** If the StringBuffer does not end in white space, add a single -** space character to the end. -*/ -static void appendWhiteSpace(StringBuffer *p){ - if( p->len==0 ) return; - if( safe_isspace(p->s[p->len-1]) ) return; - append(p, " "); -} - -/* -** Remove white space from teh end of the StringBuffer -*/ -static void trimWhiteSpace(StringBuffer *p){ - while( p->len>0 && safe_isspace(p->s[p->len-1]) ){ - p->len--; - } -} - - - -/* -** Allowed values for Snippet.aMatch[].snStatus -*/ -#define SNIPPET_IGNORE 0 /* It is ok to omit this match from the snippet */ -#define SNIPPET_DESIRED 1 /* We want to include this match in the snippet */ - -/* -** Generate the text of a snippet. -*/ -static void snippetText( - fulltext_cursor *pCursor, /* The cursor we need the snippet for */ - const char *zStartMark, /* Markup to appear before each match */ - const char *zEndMark, /* Markup to appear after each match */ - const char *zEllipsis /* Ellipsis mark */ -){ - int i, j; - struct snippetMatch *aMatch; - int nMatch; - int nDesired; - StringBuffer sb; - int tailCol; - int tailOffset; - int iCol; - int nDoc; - const char *zDoc; - int iStart, iEnd; - int tailEllipsis = 0; - int iMatch; - - - free(pCursor->snippet.zSnippet); - pCursor->snippet.zSnippet = 0; - aMatch = pCursor->snippet.aMatch; - nMatch = pCursor->snippet.nMatch; - initStringBuffer(&sb); - - for(i=0; iq.nTerms; i++){ - for(j=0; j0; i++){ - if( aMatch[i].snStatus!=SNIPPET_DESIRED ) continue; - nDesired--; - iCol = aMatch[i].iCol; - zDoc = (const char*)sqlite3_column_text(pCursor->pStmt, iCol+1); - nDoc = sqlite3_column_bytes(pCursor->pStmt, iCol+1); - iStart = aMatch[i].iStart - 40; - iStart = wordBoundary(iStart, zDoc, nDoc, aMatch, nMatch, iCol); - if( iStart<=10 ){ - iStart = 0; - } - if( iCol==tailCol && iStart<=tailOffset+20 ){ - iStart = tailOffset; - } - if( (iCol!=tailCol && tailCol>=0) || iStart!=tailOffset ){ - trimWhiteSpace(&sb); - appendWhiteSpace(&sb); - append(&sb, zEllipsis); - appendWhiteSpace(&sb); - } - iEnd = aMatch[i].iStart + aMatch[i].nByte + 40; - iEnd = wordBoundary(iEnd, zDoc, nDoc, aMatch, nMatch, iCol); - if( iEnd>=nDoc-10 ){ - iEnd = nDoc; - tailEllipsis = 0; - }else{ - tailEllipsis = 1; - } - while( iMatchsnippet.zSnippet = sb.s; - pCursor->snippet.nSnippet = sb.len; -} - - -/* -** Close the cursor. For additional information see the documentation -** on the xClose method of the virtual table interface. -*/ -static int fulltextClose(sqlite3_vtab_cursor *pCursor){ - fulltext_cursor *c = (fulltext_cursor *) pCursor; - TRACE(("FTS1 Close %p\n", c)); - sqlite3_finalize(c->pStmt); - queryClear(&c->q); - snippetClear(&c->snippet); - if( c->result.pDoclist!=NULL ){ - docListDelete(c->result.pDoclist); - } - free(c); - return SQLITE_OK; -} - -static int fulltextNext(sqlite3_vtab_cursor *pCursor){ - fulltext_cursor *c = (fulltext_cursor *) pCursor; - sqlite_int64 iDocid; - int rc; - - TRACE(("FTS1 Next %p\n", pCursor)); - snippetClear(&c->snippet); - if( c->iCursorType < QUERY_FULLTEXT ){ - /* TODO(shess) Handle SQLITE_SCHEMA AND SQLITE_BUSY. */ - rc = sqlite3_step(c->pStmt); - switch( rc ){ - case SQLITE_ROW: - c->eof = 0; - return SQLITE_OK; - case SQLITE_DONE: - c->eof = 1; - return SQLITE_OK; - default: - c->eof = 1; - return rc; - } - } else { /* full-text query */ - rc = sqlite3_reset(c->pStmt); - if( rc!=SQLITE_OK ) return rc; - - iDocid = nextDocid(&c->result); - if( iDocid==0 ){ - c->eof = 1; - return SQLITE_OK; - } - rc = sqlite3_bind_int64(c->pStmt, 1, iDocid); - if( rc!=SQLITE_OK ) return rc; - /* TODO(shess) Handle SQLITE_SCHEMA AND SQLITE_BUSY. */ - rc = sqlite3_step(c->pStmt); - if( rc==SQLITE_ROW ){ /* the case we expect */ - c->eof = 0; - return SQLITE_OK; - } - /* an error occurred; abort */ - return rc==SQLITE_DONE ? SQLITE_ERROR : rc; - } -} - - -/* Return a DocList corresponding to the query term *pTerm. If *pTerm -** is the first term of a phrase query, go ahead and evaluate the phrase -** query and return the doclist for the entire phrase query. -** -** The result is stored in pTerm->doclist. -*/ -static int docListOfTerm( - fulltext_vtab *v, /* The full text index */ - int iColumn, /* column to restrict to. No restrition if >=nColumn */ - QueryTerm *pQTerm, /* Term we are looking for, or 1st term of a phrase */ - DocList **ppResult /* Write the result here */ -){ - DocList *pLeft, *pRight, *pNew; - int i, rc; - - pLeft = docListNew(DL_POSITIONS); - rc = term_select_all(v, iColumn, pQTerm->pTerm, pQTerm->nTerm, pLeft); - if( rc ){ - docListDelete(pLeft); - return rc; - } - for(i=1; i<=pQTerm->nPhrase; i++){ - pRight = docListNew(DL_POSITIONS); - rc = term_select_all(v, iColumn, pQTerm[i].pTerm, pQTerm[i].nTerm, pRight); - if( rc ){ - docListDelete(pLeft); - return rc; - } - pNew = docListNew(inPhrase ? DL_POSITIONS : DL_DOCIDS); - docListPhraseMerge(pLeft, pRight, pNew); - docListDelete(pLeft); - docListDelete(pRight); - pLeft = pNew; - } - *ppResult = pLeft; - return SQLITE_OK; -} - -/* Add a new term pTerm[0..nTerm-1] to the query *q. -*/ -static void queryAdd(Query *q, const char *pTerm, int nTerm){ - QueryTerm *t; - ++q->nTerms; - q->pTerms = realloc(q->pTerms, q->nTerms * sizeof(q->pTerms[0])); - if( q->pTerms==0 ){ - q->nTerms = 0; - return; - } - t = &q->pTerms[q->nTerms - 1]; - memset(t, 0, sizeof(*t)); - t->pTerm = malloc(nTerm+1); - memcpy(t->pTerm, pTerm, nTerm); - t->pTerm[nTerm] = 0; - t->nTerm = nTerm; - t->isOr = q->nextIsOr; - q->nextIsOr = 0; - t->iColumn = q->nextColumn; - q->nextColumn = q->dfltColumn; -} - -/* -** Check to see if the string zToken[0...nToken-1] matches any -** column name in the virtual table. If it does, -** return the zero-indexed column number. If not, return -1. -*/ -static int checkColumnSpecifier( - fulltext_vtab *pVtab, /* The virtual table */ - const char *zToken, /* Text of the token */ - int nToken /* Number of characters in the token */ -){ - int i; - for(i=0; inColumn; i++){ - if( memcmp(pVtab->azColumn[i], zToken, nToken)==0 - && pVtab->azColumn[i][nToken]==0 ){ - return i; - } - } - return -1; -} - -/* -** Parse the text at pSegment[0..nSegment-1]. Add additional terms -** to the query being assemblied in pQuery. -** -** inPhrase is true if pSegment[0..nSegement-1] is contained within -** double-quotes. If inPhrase is true, then the first term -** is marked with the number of terms in the phrase less one and -** OR and "-" syntax is ignored. If inPhrase is false, then every -** term found is marked with nPhrase=0 and OR and "-" syntax is significant. -*/ -static int tokenizeSegment( - sqlite3_tokenizer *pTokenizer, /* The tokenizer to use */ - const char *pSegment, int nSegment, /* Query expression being parsed */ - int inPhrase, /* True if within "..." */ - Query *pQuery /* Append results here */ -){ - const sqlite3_tokenizer_module *pModule = pTokenizer->pModule; - sqlite3_tokenizer_cursor *pCursor; - int firstIndex = pQuery->nTerms; - int iCol; - int nTerm = 1; - - int rc = pModule->xOpen(pTokenizer, pSegment, nSegment, &pCursor); - if( rc!=SQLITE_OK ) return rc; - pCursor->pTokenizer = pTokenizer; - - while( 1 ){ - const char *pToken; - int nToken, iBegin, iEnd, iPos; - - rc = pModule->xNext(pCursor, - &pToken, &nToken, - &iBegin, &iEnd, &iPos); - if( rc!=SQLITE_OK ) break; - if( !inPhrase && - pSegment[iEnd]==':' && - (iCol = checkColumnSpecifier(pQuery->pFts, pToken, nToken))>=0 ){ - pQuery->nextColumn = iCol; - continue; - } - if( !inPhrase && pQuery->nTerms>0 && nToken==2 - && pSegment[iBegin]=='O' && pSegment[iBegin+1]=='R' ){ - pQuery->nextIsOr = 1; - continue; - } - queryAdd(pQuery, pToken, nToken); - if( !inPhrase && iBegin>0 && pSegment[iBegin-1]=='-' ){ - pQuery->pTerms[pQuery->nTerms-1].isNot = 1; - } - pQuery->pTerms[pQuery->nTerms-1].iPhrase = nTerm; - if( inPhrase ){ - nTerm++; - } - } - - if( inPhrase && pQuery->nTerms>firstIndex ){ - pQuery->pTerms[firstIndex].nPhrase = pQuery->nTerms - firstIndex - 1; - } - - return pModule->xClose(pCursor); -} - -/* Parse a query string, yielding a Query object pQuery. -** -** The calling function will need to queryClear() to clean up -** the dynamically allocated memory held by pQuery. -*/ -static int parseQuery( - fulltext_vtab *v, /* The fulltext index */ - const char *zInput, /* Input text of the query string */ - int nInput, /* Size of the input text */ - int dfltColumn, /* Default column of the index to match against */ - Query *pQuery /* Write the parse results here. */ -){ - int iInput, inPhrase = 0; - - if( zInput==0 ) nInput = 0; - if( nInput<0 ) nInput = strlen(zInput); - pQuery->nTerms = 0; - pQuery->pTerms = NULL; - pQuery->nextIsOr = 0; - pQuery->nextColumn = dfltColumn; - pQuery->dfltColumn = dfltColumn; - pQuery->pFts = v; - - for(iInput=0; iInputiInput ){ - tokenizeSegment(v->pTokenizer, zInput+iInput, i-iInput, inPhrase, - pQuery); - } - iInput = i; - if( i=nColumn -** they are allowed to match against any column. -*/ -static int fulltextQuery( - fulltext_vtab *v, /* The full text index */ - int iColumn, /* Match against this column by default */ - const char *zInput, /* The query string */ - int nInput, /* Number of bytes in zInput[] */ - DocList **pResult, /* Write the result doclist here */ - Query *pQuery /* Put parsed query string here */ -){ - int i, iNext, rc; - DocList *pLeft = NULL; - DocList *pRight, *pNew, *pOr; - int nNot = 0; - QueryTerm *aTerm; - - rc = parseQuery(v, zInput, nInput, iColumn, pQuery); - if( rc!=SQLITE_OK ) return rc; - - /* Merge AND terms. */ - aTerm = pQuery->pTerms; - for(i = 0; inTerms; i=iNext){ - if( aTerm[i].isNot ){ - /* Handle all NOT terms in a separate pass */ - nNot++; - iNext = i + aTerm[i].nPhrase+1; - continue; - } - iNext = i + aTerm[i].nPhrase + 1; - rc = docListOfTerm(v, aTerm[i].iColumn, &aTerm[i], &pRight); - if( rc ){ - queryClear(pQuery); - return rc; - } - while( iNextnTerms && aTerm[iNext].isOr ){ - rc = docListOfTerm(v, aTerm[iNext].iColumn, &aTerm[iNext], &pOr); - iNext += aTerm[iNext].nPhrase + 1; - if( rc ){ - queryClear(pQuery); - return rc; - } - pNew = docListNew(DL_DOCIDS); - docListOrMerge(pRight, pOr, pNew); - docListDelete(pRight); - docListDelete(pOr); - pRight = pNew; - } - if( pLeft==0 ){ - pLeft = pRight; - }else{ - pNew = docListNew(DL_DOCIDS); - docListAndMerge(pLeft, pRight, pNew); - docListDelete(pRight); - docListDelete(pLeft); - pLeft = pNew; - } - } - - if( nNot && pLeft==0 ){ - /* We do not yet know how to handle a query of only NOT terms */ - return SQLITE_ERROR; - } - - /* Do the EXCEPT terms */ - for(i=0; inTerms; i += aTerm[i].nPhrase + 1){ - if( !aTerm[i].isNot ) continue; - rc = docListOfTerm(v, aTerm[i].iColumn, &aTerm[i], &pRight); - if( rc ){ - queryClear(pQuery); - docListDelete(pLeft); - return rc; - } - pNew = docListNew(DL_DOCIDS); - docListExceptMerge(pLeft, pRight, pNew); - docListDelete(pRight); - docListDelete(pLeft); - pLeft = pNew; - } - - *pResult = pLeft; - return rc; -} - -/* -** This is the xFilter interface for the virtual table. See -** the virtual table xFilter method documentation for additional -** information. -** -** If idxNum==QUERY_GENERIC then do a full table scan against -** the %_content table. -** -** If idxNum==QUERY_ROWID then do a rowid lookup for a single entry -** in the %_content table. -** -** If idxNum>=QUERY_FULLTEXT then use the full text index. The -** column on the left-hand side of the MATCH operator is column -** number idxNum-QUERY_FULLTEXT, 0 indexed. argv[0] is the right-hand -** side of the MATCH operator. -*/ -/* TODO(shess) Upgrade the cursor initialization and destruction to -** account for fulltextFilter() being called multiple times on the -** same cursor. The current solution is very fragile. Apply fix to -** fts2 as appropriate. -*/ -static int fulltextFilter( - sqlite3_vtab_cursor *pCursor, /* The cursor used for this query */ - int idxNum, const char *idxStr, /* Which indexing scheme to use */ - int argc, sqlite3_value **argv /* Arguments for the indexing scheme */ -){ - fulltext_cursor *c = (fulltext_cursor *) pCursor; - fulltext_vtab *v = cursor_vtab(c); - int rc; - char *zSql; - - TRACE(("FTS1 Filter %p\n",pCursor)); - - zSql = sqlite3_mprintf("select rowid, * from %%_content %s", - idxNum==QUERY_GENERIC ? "" : "where rowid=?"); - sqlite3_finalize(c->pStmt); - rc = sql_prepare(v->db, v->zDb, v->zName, &c->pStmt, zSql); - sqlite3_free(zSql); - if( rc!=SQLITE_OK ) return rc; - - c->iCursorType = idxNum; - switch( idxNum ){ - case QUERY_GENERIC: - break; - - case QUERY_ROWID: - rc = sqlite3_bind_int64(c->pStmt, 1, sqlite3_value_int64(argv[0])); - if( rc!=SQLITE_OK ) return rc; - break; - - default: /* full-text search */ - { - const char *zQuery = (const char *)sqlite3_value_text(argv[0]); - DocList *pResult; - assert( idxNum<=QUERY_FULLTEXT+v->nColumn); - assert( argc==1 ); - queryClear(&c->q); - rc = fulltextQuery(v, idxNum-QUERY_FULLTEXT, zQuery, -1, &pResult, &c->q); - if( rc!=SQLITE_OK ) return rc; - if( c->result.pDoclist!=NULL ) docListDelete(c->result.pDoclist); - readerInit(&c->result, pResult); - break; - } - } - - return fulltextNext(pCursor); -} - -/* This is the xEof method of the virtual table. The SQLite core -** calls this routine to find out if it has reached the end of -** a query's results set. -*/ -static int fulltextEof(sqlite3_vtab_cursor *pCursor){ - fulltext_cursor *c = (fulltext_cursor *) pCursor; - return c->eof; -} - -/* This is the xColumn method of the virtual table. The SQLite -** core calls this method during a query when it needs the value -** of a column from the virtual table. This method needs to use -** one of the sqlite3_result_*() routines to store the requested -** value back in the pContext. -*/ -static int fulltextColumn(sqlite3_vtab_cursor *pCursor, - sqlite3_context *pContext, int idxCol){ - fulltext_cursor *c = (fulltext_cursor *) pCursor; - fulltext_vtab *v = cursor_vtab(c); - - if( idxColnColumn ){ - sqlite3_value *pVal = sqlite3_column_value(c->pStmt, idxCol+1); - sqlite3_result_value(pContext, pVal); - }else if( idxCol==v->nColumn ){ - /* The extra column whose name is the same as the table. - ** Return a blob which is a pointer to the cursor - */ - sqlite3_result_blob(pContext, &c, sizeof(c), SQLITE_TRANSIENT); - } - return SQLITE_OK; -} - -/* This is the xRowid method. The SQLite core calls this routine to -** retrive the rowid for the current row of the result set. The -** rowid should be written to *pRowid. -*/ -static int fulltextRowid(sqlite3_vtab_cursor *pCursor, sqlite_int64 *pRowid){ - fulltext_cursor *c = (fulltext_cursor *) pCursor; - - *pRowid = sqlite3_column_int64(c->pStmt, 0); - return SQLITE_OK; -} - -/* Add all terms in [zText] to the given hash table. If [iColumn] > 0, - * we also store positions and offsets in the hash table using the given - * column number. */ -static int buildTerms(fulltext_vtab *v, fts1Hash *terms, sqlite_int64 iDocid, - const char *zText, int iColumn){ - sqlite3_tokenizer *pTokenizer = v->pTokenizer; - sqlite3_tokenizer_cursor *pCursor; - const char *pToken; - int nTokenBytes; - int iStartOffset, iEndOffset, iPosition; - int rc; - - rc = pTokenizer->pModule->xOpen(pTokenizer, zText, -1, &pCursor); - if( rc!=SQLITE_OK ) return rc; - - pCursor->pTokenizer = pTokenizer; - while( SQLITE_OK==pTokenizer->pModule->xNext(pCursor, - &pToken, &nTokenBytes, - &iStartOffset, &iEndOffset, - &iPosition) ){ - DocList *p; - - /* Positions can't be negative; we use -1 as a terminator internally. */ - if( iPosition<0 ){ - pTokenizer->pModule->xClose(pCursor); - return SQLITE_ERROR; - } - - p = fts1HashFind(terms, pToken, nTokenBytes); - if( p==NULL ){ - p = docListNew(DL_DEFAULT); - docListAddDocid(p, iDocid); - fts1HashInsert(terms, pToken, nTokenBytes, p); - } - if( iColumn>=0 ){ - docListAddPosOffset(p, iColumn, iPosition, iStartOffset, iEndOffset); - } - } - - /* TODO(shess) Check return? Should this be able to cause errors at - ** this point? Actually, same question about sqlite3_finalize(), - ** though one could argue that failure there means that the data is - ** not durable. *ponder* - */ - pTokenizer->pModule->xClose(pCursor); - return rc; -} - -/* Update the %_terms table to map the term [pTerm] to the given rowid. */ -static int index_insert_term(fulltext_vtab *v, const char *pTerm, int nTerm, - DocList *d){ - sqlite_int64 iIndexRow; - DocList doclist; - int iSegment = 0, rc; - - rc = term_select(v, pTerm, nTerm, iSegment, &iIndexRow, &doclist); - if( rc==SQLITE_DONE ){ - docListInit(&doclist, DL_DEFAULT, 0, 0); - docListUpdate(&doclist, d); - /* TODO(shess) Consider length(doclist)>CHUNK_MAX? */ - rc = term_insert(v, NULL, pTerm, nTerm, iSegment, &doclist); - goto err; - } - if( rc!=SQLITE_ROW ) return SQLITE_ERROR; - - docListUpdate(&doclist, d); - if( doclist.nData<=CHUNK_MAX ){ - rc = term_update(v, iIndexRow, &doclist); - goto err; - } - - /* Doclist doesn't fit, delete what's there, and accumulate - ** forward. - */ - rc = term_delete(v, iIndexRow); - if( rc!=SQLITE_OK ) goto err; - - /* Try to insert the doclist into a higher segment bucket. On - ** failure, accumulate existing doclist with the doclist from that - ** bucket, and put results in the next bucket. - */ - iSegment++; - while( (rc=term_insert(v, &iIndexRow, pTerm, nTerm, iSegment, - &doclist))!=SQLITE_OK ){ - sqlite_int64 iSegmentRow; - DocList old; - int rc2; - - /* Retain old error in case the term_insert() error was really an - ** error rather than a bounced insert. - */ - rc2 = term_select(v, pTerm, nTerm, iSegment, &iSegmentRow, &old); - if( rc2!=SQLITE_ROW ) goto err; - - rc = term_delete(v, iSegmentRow); - if( rc!=SQLITE_OK ) goto err; - - /* Reusing lowest-number deleted row keeps the index smaller. */ - if( iSegmentRownColumn ; ++i){ - char *zText = (char*)sqlite3_value_text(pValues[i]); - int rc = buildTerms(v, terms, iRowid, zText, i); - if( rc!=SQLITE_OK ) return rc; - } - return SQLITE_OK; -} - -/* Add empty doclists for all terms in the given row's content to the hash - * table [pTerms]. */ -static int deleteTerms(fulltext_vtab *v, fts1Hash *pTerms, sqlite_int64 iRowid){ - const char **pValues; - int i; - - int rc = content_select(v, iRowid, &pValues); - if( rc!=SQLITE_OK ) return rc; - - for(i = 0 ; i < v->nColumn; ++i) { - rc = buildTerms(v, pTerms, iRowid, pValues[i], -1); - if( rc!=SQLITE_OK ) break; - } - - freeStringArray(v->nColumn, pValues); - return SQLITE_OK; -} - -/* Insert a row into the %_content table; set *piRowid to be the ID of the - * new row. Fill [pTerms] with new doclists for the %_term table. */ -static int index_insert(fulltext_vtab *v, sqlite3_value *pRequestRowid, - sqlite3_value **pValues, - sqlite_int64 *piRowid, fts1Hash *pTerms){ - int rc; - - rc = content_insert(v, pRequestRowid, pValues); /* execute an SQL INSERT */ - if( rc!=SQLITE_OK ) return rc; - *piRowid = sqlite3_last_insert_rowid(v->db); - return insertTerms(v, pTerms, *piRowid, pValues); -} - -/* Delete a row from the %_content table; fill [pTerms] with empty doclists - * to be written to the %_term table. */ -static int index_delete(fulltext_vtab *v, sqlite_int64 iRow, fts1Hash *pTerms){ - int rc = deleteTerms(v, pTerms, iRow); - if( rc!=SQLITE_OK ) return rc; - return content_delete(v, iRow); /* execute an SQL DELETE */ -} - -/* Update a row in the %_content table; fill [pTerms] with new doclists for the - * %_term table. */ -static int index_update(fulltext_vtab *v, sqlite_int64 iRow, - sqlite3_value **pValues, fts1Hash *pTerms){ - /* Generate an empty doclist for each term that previously appeared in this - * row. */ - int rc = deleteTerms(v, pTerms, iRow); - if( rc!=SQLITE_OK ) return rc; - - rc = content_update(v, pValues, iRow); /* execute an SQL UPDATE */ - if( rc!=SQLITE_OK ) return rc; - - /* Now add positions for terms which appear in the updated row. */ - return insertTerms(v, pTerms, iRow, pValues); -} - -/* This function implements the xUpdate callback; it is the top-level entry - * point for inserting, deleting or updating a row in a full-text table. */ -static int fulltextUpdate(sqlite3_vtab *pVtab, int nArg, sqlite3_value **ppArg, - sqlite_int64 *pRowid){ - fulltext_vtab *v = (fulltext_vtab *) pVtab; - fts1Hash terms; /* maps term string -> PosList */ - int rc; - fts1HashElem *e; - - TRACE(("FTS1 Update %p\n", pVtab)); - - fts1HashInit(&terms, FTS1_HASH_STRING, 1); - - if( nArg<2 ){ - rc = index_delete(v, sqlite3_value_int64(ppArg[0]), &terms); - } else if( sqlite3_value_type(ppArg[0]) != SQLITE_NULL ){ - /* An update: - * ppArg[0] = old rowid - * ppArg[1] = new rowid - * ppArg[2..2+v->nColumn-1] = values - * ppArg[2+v->nColumn] = value for magic column (we ignore this) - */ - sqlite_int64 rowid = sqlite3_value_int64(ppArg[0]); - if( sqlite3_value_type(ppArg[1]) != SQLITE_INTEGER || - sqlite3_value_int64(ppArg[1]) != rowid ){ - rc = SQLITE_ERROR; /* we don't allow changing the rowid */ - } else { - assert( nArg==2+v->nColumn+1); - rc = index_update(v, rowid, &ppArg[2], &terms); - } - } else { - /* An insert: - * ppArg[1] = requested rowid - * ppArg[2..2+v->nColumn-1] = values - * ppArg[2+v->nColumn] = value for magic column (we ignore this) - */ - assert( nArg==2+v->nColumn+1); - rc = index_insert(v, ppArg[1], &ppArg[2], pRowid, &terms); - } - - if( rc==SQLITE_OK ){ - /* Write updated doclists to disk. */ - for(e=fts1HashFirst(&terms); e; e=fts1HashNext(e)){ - DocList *p = fts1HashData(e); - rc = index_insert_term(v, fts1HashKey(e), fts1HashKeysize(e), p); - if( rc!=SQLITE_OK ) break; - } - } - - /* clean up */ - for(e=fts1HashFirst(&terms); e; e=fts1HashNext(e)){ - DocList *p = fts1HashData(e); - docListDelete(p); - } - fts1HashClear(&terms); - - return rc; -} - -/* -** Implementation of the snippet() function for FTS1 -*/ -static void snippetFunc( - sqlite3_context *pContext, - int argc, - sqlite3_value **argv -){ - fulltext_cursor *pCursor; - if( argc<1 ) return; - if( sqlite3_value_type(argv[0])!=SQLITE_BLOB || - sqlite3_value_bytes(argv[0])!=sizeof(pCursor) ){ - sqlite3_result_error(pContext, "illegal first argument to html_snippet",-1); - }else{ - const char *zStart = ""; - const char *zEnd = ""; - const char *zEllipsis = "..."; - memcpy(&pCursor, sqlite3_value_blob(argv[0]), sizeof(pCursor)); - if( argc>=2 ){ - zStart = (const char*)sqlite3_value_text(argv[1]); - if( argc>=3 ){ - zEnd = (const char*)sqlite3_value_text(argv[2]); - if( argc>=4 ){ - zEllipsis = (const char*)sqlite3_value_text(argv[3]); - } - } - } - snippetAllOffsets(pCursor); - snippetText(pCursor, zStart, zEnd, zEllipsis); - sqlite3_result_text(pContext, pCursor->snippet.zSnippet, - pCursor->snippet.nSnippet, SQLITE_STATIC); - } -} - -/* -** Implementation of the offsets() function for FTS1 -*/ -static void snippetOffsetsFunc( - sqlite3_context *pContext, - int argc, - sqlite3_value **argv -){ - fulltext_cursor *pCursor; - if( argc<1 ) return; - if( sqlite3_value_type(argv[0])!=SQLITE_BLOB || - sqlite3_value_bytes(argv[0])!=sizeof(pCursor) ){ - sqlite3_result_error(pContext, "illegal first argument to offsets",-1); - }else{ - memcpy(&pCursor, sqlite3_value_blob(argv[0]), sizeof(pCursor)); - snippetAllOffsets(pCursor); - snippetOffsetText(&pCursor->snippet); - sqlite3_result_text(pContext, - pCursor->snippet.zOffset, pCursor->snippet.nOffset, - SQLITE_STATIC); - } -} - -/* -** This routine implements the xFindFunction method for the FTS1 -** virtual table. -*/ -static int fulltextFindFunction( - sqlite3_vtab *pVtab, - int nArg, - const char *zName, - void (**pxFunc)(sqlite3_context*,int,sqlite3_value**), - void **ppArg -){ - if( strcmp(zName,"snippet")==0 ){ - *pxFunc = snippetFunc; - return 1; - }else if( strcmp(zName,"offsets")==0 ){ - *pxFunc = snippetOffsetsFunc; - return 1; - } - return 0; -} - -/* -** Rename an fts1 table. -*/ -static int fulltextRename( - sqlite3_vtab *pVtab, - const char *zName -){ - fulltext_vtab *p = (fulltext_vtab *)pVtab; - int rc = SQLITE_NOMEM; - char *zSql = sqlite3_mprintf( - "ALTER TABLE %Q.'%q_content' RENAME TO '%q_content';" - "ALTER TABLE %Q.'%q_term' RENAME TO '%q_term';" - , p->zDb, p->zName, zName - , p->zDb, p->zName, zName - ); - if( zSql ){ - rc = sqlite3_exec(p->db, zSql, 0, 0, 0); - sqlite3_free(zSql); - } - return rc; -} - -static const sqlite3_module fulltextModule = { - /* iVersion */ 0, - /* xCreate */ fulltextCreate, - /* xConnect */ fulltextConnect, - /* xBestIndex */ fulltextBestIndex, - /* xDisconnect */ fulltextDisconnect, - /* xDestroy */ fulltextDestroy, - /* xOpen */ fulltextOpen, - /* xClose */ fulltextClose, - /* xFilter */ fulltextFilter, - /* xNext */ fulltextNext, - /* xEof */ fulltextEof, - /* xColumn */ fulltextColumn, - /* xRowid */ fulltextRowid, - /* xUpdate */ fulltextUpdate, - /* xBegin */ 0, - /* xSync */ 0, - /* xCommit */ 0, - /* xRollback */ 0, - /* xFindFunction */ fulltextFindFunction, - /* xRename */ fulltextRename, -}; - -int sqlite3Fts1Init(sqlite3 *db){ - sqlite3_overload_function(db, "snippet", -1); - sqlite3_overload_function(db, "offsets", -1); - return sqlite3_create_module(db, "fts1", &fulltextModule, 0); -} - -#if !SQLITE_CORE -#ifdef _WIN32 -__declspec(dllexport) -#endif -int sqlite3_fts1_init(sqlite3 *db, char **pzErrMsg, - const sqlite3_api_routines *pApi){ - SQLITE_EXTENSION_INIT2(pApi) - return sqlite3Fts1Init(db); -} -#endif - -#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS1) */ diff --git a/ext/fts1/fts1.h b/ext/fts1/fts1.h deleted file mode 100644 index d55e689733..0000000000 --- a/ext/fts1/fts1.h +++ /dev/null @@ -1,11 +0,0 @@ -#include "sqlite3.h" - -#ifdef __cplusplus -extern "C" { -#endif /* __cplusplus */ - -int sqlite3Fts1Init(sqlite3 *db); - -#ifdef __cplusplus -} /* extern "C" */ -#endif /* __cplusplus */ diff --git a/ext/fts1/fts1_hash.c b/ext/fts1/fts1_hash.c deleted file mode 100644 index 463a52b645..0000000000 --- a/ext/fts1/fts1_hash.c +++ /dev/null @@ -1,369 +0,0 @@ -/* -** 2001 September 22 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This is the implementation of generic hash-tables used in SQLite. -** We've modified it slightly to serve as a standalone hash table -** implementation for the full-text indexing module. -*/ -#include -#include -#include - -/* -** The code in this file is only compiled if: -** -** * The FTS1 module is being built as an extension -** (in which case SQLITE_CORE is not defined), or -** -** * The FTS1 module is being built into the core of -** SQLite (in which case SQLITE_ENABLE_FTS1 is defined). -*/ -#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS1) - - -#include "fts1_hash.h" - -static void *malloc_and_zero(int n){ - void *p = malloc(n); - if( p ){ - memset(p, 0, n); - } - return p; -} - -/* Turn bulk memory into a hash table object by initializing the -** fields of the Hash structure. -** -** "pNew" is a pointer to the hash table that is to be initialized. -** keyClass is one of the constants -** FTS1_HASH_BINARY or FTS1_HASH_STRING. The value of keyClass -** determines what kind of key the hash table will use. "copyKey" is -** true if the hash table should make its own private copy of keys and -** false if it should just use the supplied pointer. -*/ -void sqlite3Fts1HashInit(fts1Hash *pNew, int keyClass, int copyKey){ - assert( pNew!=0 ); - assert( keyClass>=FTS1_HASH_STRING && keyClass<=FTS1_HASH_BINARY ); - pNew->keyClass = keyClass; - pNew->copyKey = copyKey; - pNew->first = 0; - pNew->count = 0; - pNew->htsize = 0; - pNew->ht = 0; - pNew->xMalloc = malloc_and_zero; - pNew->xFree = free; -} - -/* Remove all entries from a hash table. Reclaim all memory. -** Call this routine to delete a hash table or to reset a hash table -** to the empty state. -*/ -void sqlite3Fts1HashClear(fts1Hash *pH){ - fts1HashElem *elem; /* For looping over all elements of the table */ - - assert( pH!=0 ); - elem = pH->first; - pH->first = 0; - if( pH->ht ) pH->xFree(pH->ht); - pH->ht = 0; - pH->htsize = 0; - while( elem ){ - fts1HashElem *next_elem = elem->next; - if( pH->copyKey && elem->pKey ){ - pH->xFree(elem->pKey); - } - pH->xFree(elem); - elem = next_elem; - } - pH->count = 0; -} - -/* -** Hash and comparison functions when the mode is FTS1_HASH_STRING -*/ -static int strHash(const void *pKey, int nKey){ - const char *z = (const char *)pKey; - int h = 0; - if( nKey<=0 ) nKey = (int) strlen(z); - while( nKey > 0 ){ - h = (h<<3) ^ h ^ *z++; - nKey--; - } - return h & 0x7fffffff; -} -static int strCompare(const void *pKey1, int n1, const void *pKey2, int n2){ - if( n1!=n2 ) return 1; - return strncmp((const char*)pKey1,(const char*)pKey2,n1); -} - -/* -** Hash and comparison functions when the mode is FTS1_HASH_BINARY -*/ -static int binHash(const void *pKey, int nKey){ - int h = 0; - const char *z = (const char *)pKey; - while( nKey-- > 0 ){ - h = (h<<3) ^ h ^ *(z++); - } - return h & 0x7fffffff; -} -static int binCompare(const void *pKey1, int n1, const void *pKey2, int n2){ - if( n1!=n2 ) return 1; - return memcmp(pKey1,pKey2,n1); -} - -/* -** Return a pointer to the appropriate hash function given the key class. -** -** The C syntax in this function definition may be unfamilar to some -** programmers, so we provide the following additional explanation: -** -** The name of the function is "hashFunction". The function takes a -** single parameter "keyClass". The return value of hashFunction() -** is a pointer to another function. Specifically, the return value -** of hashFunction() is a pointer to a function that takes two parameters -** with types "const void*" and "int" and returns an "int". -*/ -static int (*hashFunction(int keyClass))(const void*,int){ - if( keyClass==FTS1_HASH_STRING ){ - return &strHash; - }else{ - assert( keyClass==FTS1_HASH_BINARY ); - return &binHash; - } -} - -/* -** Return a pointer to the appropriate hash function given the key class. -** -** For help in interpreted the obscure C code in the function definition, -** see the header comment on the previous function. -*/ -static int (*compareFunction(int keyClass))(const void*,int,const void*,int){ - if( keyClass==FTS1_HASH_STRING ){ - return &strCompare; - }else{ - assert( keyClass==FTS1_HASH_BINARY ); - return &binCompare; - } -} - -/* Link an element into the hash table -*/ -static void insertElement( - fts1Hash *pH, /* The complete hash table */ - struct _fts1ht *pEntry, /* The entry into which pNew is inserted */ - fts1HashElem *pNew /* The element to be inserted */ -){ - fts1HashElem *pHead; /* First element already in pEntry */ - pHead = pEntry->chain; - if( pHead ){ - pNew->next = pHead; - pNew->prev = pHead->prev; - if( pHead->prev ){ pHead->prev->next = pNew; } - else { pH->first = pNew; } - pHead->prev = pNew; - }else{ - pNew->next = pH->first; - if( pH->first ){ pH->first->prev = pNew; } - pNew->prev = 0; - pH->first = pNew; - } - pEntry->count++; - pEntry->chain = pNew; -} - - -/* Resize the hash table so that it cantains "new_size" buckets. -** "new_size" must be a power of 2. The hash table might fail -** to resize if sqliteMalloc() fails. -*/ -static void rehash(fts1Hash *pH, int new_size){ - struct _fts1ht *new_ht; /* The new hash table */ - fts1HashElem *elem, *next_elem; /* For looping over existing elements */ - int (*xHash)(const void*,int); /* The hash function */ - - assert( (new_size & (new_size-1))==0 ); - new_ht = (struct _fts1ht *)pH->xMalloc( new_size*sizeof(struct _fts1ht) ); - if( new_ht==0 ) return; - if( pH->ht ) pH->xFree(pH->ht); - pH->ht = new_ht; - pH->htsize = new_size; - xHash = hashFunction(pH->keyClass); - for(elem=pH->first, pH->first=0; elem; elem = next_elem){ - int h = (*xHash)(elem->pKey, elem->nKey) & (new_size-1); - next_elem = elem->next; - insertElement(pH, &new_ht[h], elem); - } -} - -/* This function (for internal use only) locates an element in an -** hash table that matches the given key. The hash for this key has -** already been computed and is passed as the 4th parameter. -*/ -static fts1HashElem *findElementGivenHash( - const fts1Hash *pH, /* The pH to be searched */ - const void *pKey, /* The key we are searching for */ - int nKey, - int h /* The hash for this key. */ -){ - fts1HashElem *elem; /* Used to loop thru the element list */ - int count; /* Number of elements left to test */ - int (*xCompare)(const void*,int,const void*,int); /* comparison function */ - - if( pH->ht ){ - struct _fts1ht *pEntry = &pH->ht[h]; - elem = pEntry->chain; - count = pEntry->count; - xCompare = compareFunction(pH->keyClass); - while( count-- && elem ){ - if( (*xCompare)(elem->pKey,elem->nKey,pKey,nKey)==0 ){ - return elem; - } - elem = elem->next; - } - } - return 0; -} - -/* Remove a single entry from the hash table given a pointer to that -** element and a hash on the element's key. -*/ -static void removeElementGivenHash( - fts1Hash *pH, /* The pH containing "elem" */ - fts1HashElem* elem, /* The element to be removed from the pH */ - int h /* Hash value for the element */ -){ - struct _fts1ht *pEntry; - if( elem->prev ){ - elem->prev->next = elem->next; - }else{ - pH->first = elem->next; - } - if( elem->next ){ - elem->next->prev = elem->prev; - } - pEntry = &pH->ht[h]; - if( pEntry->chain==elem ){ - pEntry->chain = elem->next; - } - pEntry->count--; - if( pEntry->count<=0 ){ - pEntry->chain = 0; - } - if( pH->copyKey && elem->pKey ){ - pH->xFree(elem->pKey); - } - pH->xFree( elem ); - pH->count--; - if( pH->count<=0 ){ - assert( pH->first==0 ); - assert( pH->count==0 ); - fts1HashClear(pH); - } -} - -/* Attempt to locate an element of the hash table pH with a key -** that matches pKey,nKey. Return the data for this element if it is -** found, or NULL if there is no match. -*/ -void *sqlite3Fts1HashFind(const fts1Hash *pH, const void *pKey, int nKey){ - int h; /* A hash on key */ - fts1HashElem *elem; /* The element that matches key */ - int (*xHash)(const void*,int); /* The hash function */ - - if( pH==0 || pH->ht==0 ) return 0; - xHash = hashFunction(pH->keyClass); - assert( xHash!=0 ); - h = (*xHash)(pKey,nKey); - assert( (pH->htsize & (pH->htsize-1))==0 ); - elem = findElementGivenHash(pH,pKey,nKey, h & (pH->htsize-1)); - return elem ? elem->data : 0; -} - -/* Insert an element into the hash table pH. The key is pKey,nKey -** and the data is "data". -** -** If no element exists with a matching key, then a new -** element is created. A copy of the key is made if the copyKey -** flag is set. NULL is returned. -** -** If another element already exists with the same key, then the -** new data replaces the old data and the old data is returned. -** The key is not copied in this instance. If a malloc fails, then -** the new data is returned and the hash table is unchanged. -** -** If the "data" parameter to this function is NULL, then the -** element corresponding to "key" is removed from the hash table. -*/ -void *sqlite3Fts1HashInsert( - fts1Hash *pH, /* The hash table to insert into */ - const void *pKey, /* The key */ - int nKey, /* Number of bytes in the key */ - void *data /* The data */ -){ - int hraw; /* Raw hash value of the key */ - int h; /* the hash of the key modulo hash table size */ - fts1HashElem *elem; /* Used to loop thru the element list */ - fts1HashElem *new_elem; /* New element added to the pH */ - int (*xHash)(const void*,int); /* The hash function */ - - assert( pH!=0 ); - xHash = hashFunction(pH->keyClass); - assert( xHash!=0 ); - hraw = (*xHash)(pKey, nKey); - assert( (pH->htsize & (pH->htsize-1))==0 ); - h = hraw & (pH->htsize-1); - elem = findElementGivenHash(pH,pKey,nKey,h); - if( elem ){ - void *old_data = elem->data; - if( data==0 ){ - removeElementGivenHash(pH,elem,h); - }else{ - elem->data = data; - } - return old_data; - } - if( data==0 ) return 0; - new_elem = (fts1HashElem*)pH->xMalloc( sizeof(fts1HashElem) ); - if( new_elem==0 ) return data; - if( pH->copyKey && pKey!=0 ){ - new_elem->pKey = pH->xMalloc( nKey ); - if( new_elem->pKey==0 ){ - pH->xFree(new_elem); - return data; - } - memcpy((void*)new_elem->pKey, pKey, nKey); - }else{ - new_elem->pKey = (void*)pKey; - } - new_elem->nKey = nKey; - pH->count++; - if( pH->htsize==0 ){ - rehash(pH,8); - if( pH->htsize==0 ){ - pH->count = 0; - pH->xFree(new_elem); - return data; - } - } - if( pH->count > pH->htsize ){ - rehash(pH,pH->htsize*2); - } - assert( pH->htsize>0 ); - assert( (pH->htsize & (pH->htsize-1))==0 ); - h = hraw & (pH->htsize-1); - insertElement(pH, &pH->ht[h], new_elem); - new_elem->data = data; - return 0; -} - -#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS1) */ diff --git a/ext/fts1/fts1_hash.h b/ext/fts1/fts1_hash.h deleted file mode 100644 index 9001152931..0000000000 --- a/ext/fts1/fts1_hash.h +++ /dev/null @@ -1,112 +0,0 @@ -/* -** 2001 September 22 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This is the header file for the generic hash-table implementation -** used in SQLite. We've modified it slightly to serve as a standalone -** hash table implementation for the full-text indexing module. -** -*/ -#ifndef _FTS1_HASH_H_ -#define _FTS1_HASH_H_ - -/* Forward declarations of structures. */ -typedef struct fts1Hash fts1Hash; -typedef struct fts1HashElem fts1HashElem; - -/* A complete hash table is an instance of the following structure. -** The internals of this structure are intended to be opaque -- client -** code should not attempt to access or modify the fields of this structure -** directly. Change this structure only by using the routines below. -** However, many of the "procedures" and "functions" for modifying and -** accessing this structure are really macros, so we can't really make -** this structure opaque. -*/ -struct fts1Hash { - char keyClass; /* HASH_INT, _POINTER, _STRING, _BINARY */ - char copyKey; /* True if copy of key made on insert */ - int count; /* Number of entries in this table */ - fts1HashElem *first; /* The first element of the array */ - void *(*xMalloc)(int); /* malloc() function to use */ - void (*xFree)(void *); /* free() function to use */ - int htsize; /* Number of buckets in the hash table */ - struct _fts1ht { /* the hash table */ - int count; /* Number of entries with this hash */ - fts1HashElem *chain; /* Pointer to first entry with this hash */ - } *ht; -}; - -/* Each element in the hash table is an instance of the following -** structure. All elements are stored on a single doubly-linked list. -** -** Again, this structure is intended to be opaque, but it can't really -** be opaque because it is used by macros. -*/ -struct fts1HashElem { - fts1HashElem *next, *prev; /* Next and previous elements in the table */ - void *data; /* Data associated with this element */ - void *pKey; int nKey; /* Key associated with this element */ -}; - -/* -** There are 2 different modes of operation for a hash table: -** -** FTS1_HASH_STRING pKey points to a string that is nKey bytes long -** (including the null-terminator, if any). Case -** is respected in comparisons. -** -** FTS1_HASH_BINARY pKey points to binary data nKey bytes long. -** memcmp() is used to compare keys. -** -** A copy of the key is made if the copyKey parameter to fts1HashInit is 1. -*/ -#define FTS1_HASH_STRING 1 -#define FTS1_HASH_BINARY 2 - -/* -** Access routines. To delete, insert a NULL pointer. -*/ -void sqlite3Fts1HashInit(fts1Hash*, int keytype, int copyKey); -void *sqlite3Fts1HashInsert(fts1Hash*, const void *pKey, int nKey, void *pData); -void *sqlite3Fts1HashFind(const fts1Hash*, const void *pKey, int nKey); -void sqlite3Fts1HashClear(fts1Hash*); - -/* -** Shorthand for the functions above -*/ -#define fts1HashInit sqlite3Fts1HashInit -#define fts1HashInsert sqlite3Fts1HashInsert -#define fts1HashFind sqlite3Fts1HashFind -#define fts1HashClear sqlite3Fts1HashClear - -/* -** Macros for looping over all elements of a hash table. The idiom is -** like this: -** -** fts1Hash h; -** fts1HashElem *p; -** ... -** for(p=fts1HashFirst(&h); p; p=fts1HashNext(p)){ -** SomeStructure *pData = fts1HashData(p); -** // do something with pData -** } -*/ -#define fts1HashFirst(H) ((H)->first) -#define fts1HashNext(E) ((E)->next) -#define fts1HashData(E) ((E)->data) -#define fts1HashKey(E) ((E)->pKey) -#define fts1HashKeysize(E) ((E)->nKey) - -/* -** Number of entries in a hash table -*/ -#define fts1HashCount(H) ((H)->count) - -#endif /* _FTS1_HASH_H_ */ diff --git a/ext/fts1/fts1_porter.c b/ext/fts1/fts1_porter.c deleted file mode 100644 index 1d26236681..0000000000 --- a/ext/fts1/fts1_porter.c +++ /dev/null @@ -1,643 +0,0 @@ -/* -** 2006 September 30 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** Implementation of the full-text-search tokenizer that implements -** a Porter stemmer. -*/ - -/* -** The code in this file is only compiled if: -** -** * The FTS1 module is being built as an extension -** (in which case SQLITE_CORE is not defined), or -** -** * The FTS1 module is being built into the core of -** SQLite (in which case SQLITE_ENABLE_FTS1 is defined). -*/ -#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS1) - - -#include -#include -#include -#include -#include - -#include "fts1_tokenizer.h" - -/* -** Class derived from sqlite3_tokenizer -*/ -typedef struct porter_tokenizer { - sqlite3_tokenizer base; /* Base class */ -} porter_tokenizer; - -/* -** Class derived from sqlit3_tokenizer_cursor -*/ -typedef struct porter_tokenizer_cursor { - sqlite3_tokenizer_cursor base; - const char *zInput; /* input we are tokenizing */ - int nInput; /* size of the input */ - int iOffset; /* current position in zInput */ - int iToken; /* index of next token to be returned */ - char *zToken; /* storage for current token */ - int nAllocated; /* space allocated to zToken buffer */ -} porter_tokenizer_cursor; - - -/* Forward declaration */ -static const sqlite3_tokenizer_module porterTokenizerModule; - - -/* -** Create a new tokenizer instance. -*/ -static int porterCreate( - int argc, const char * const *argv, - sqlite3_tokenizer **ppTokenizer -){ - porter_tokenizer *t; - t = (porter_tokenizer *) calloc(sizeof(*t), 1); - if( t==NULL ) return SQLITE_NOMEM; - - *ppTokenizer = &t->base; - return SQLITE_OK; -} - -/* -** Destroy a tokenizer -*/ -static int porterDestroy(sqlite3_tokenizer *pTokenizer){ - free(pTokenizer); - return SQLITE_OK; -} - -/* -** Prepare to begin tokenizing a particular string. The input -** string to be tokenized is zInput[0..nInput-1]. A cursor -** used to incrementally tokenize this string is returned in -** *ppCursor. -*/ -static int porterOpen( - sqlite3_tokenizer *pTokenizer, /* The tokenizer */ - const char *zInput, int nInput, /* String to be tokenized */ - sqlite3_tokenizer_cursor **ppCursor /* OUT: Tokenization cursor */ -){ - porter_tokenizer_cursor *c; - - c = (porter_tokenizer_cursor *) malloc(sizeof(*c)); - if( c==NULL ) return SQLITE_NOMEM; - - c->zInput = zInput; - if( zInput==0 ){ - c->nInput = 0; - }else if( nInput<0 ){ - c->nInput = (int)strlen(zInput); - }else{ - c->nInput = nInput; - } - c->iOffset = 0; /* start tokenizing at the beginning */ - c->iToken = 0; - c->zToken = NULL; /* no space allocated, yet. */ - c->nAllocated = 0; - - *ppCursor = &c->base; - return SQLITE_OK; -} - -/* -** Close a tokenization cursor previously opened by a call to -** porterOpen() above. -*/ -static int porterClose(sqlite3_tokenizer_cursor *pCursor){ - porter_tokenizer_cursor *c = (porter_tokenizer_cursor *) pCursor; - free(c->zToken); - free(c); - return SQLITE_OK; -} -/* -** Vowel or consonant -*/ -static const char cType[] = { - 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, - 1, 1, 1, 2, 1 -}; - -/* -** isConsonant() and isVowel() determine if their first character in -** the string they point to is a consonant or a vowel, according -** to Porter ruls. -** -** A consonate is any letter other than 'a', 'e', 'i', 'o', or 'u'. -** 'Y' is a consonant unless it follows another consonant, -** in which case it is a vowel. -** -** In these routine, the letters are in reverse order. So the 'y' rule -** is that 'y' is a consonant unless it is followed by another -** consonent. -*/ -static int isVowel(const char*); -static int isConsonant(const char *z){ - int j; - char x = *z; - if( x==0 ) return 0; - assert( x>='a' && x<='z' ); - j = cType[x-'a']; - if( j<2 ) return j; - return z[1]==0 || isVowel(z + 1); -} -static int isVowel(const char *z){ - int j; - char x = *z; - if( x==0 ) return 0; - assert( x>='a' && x<='z' ); - j = cType[x-'a']; - if( j<2 ) return 1-j; - return isConsonant(z + 1); -} - -/* -** Let any sequence of one or more vowels be represented by V and let -** C be sequence of one or more consonants. Then every word can be -** represented as: -** -** [C] (VC){m} [V] -** -** In prose: A word is an optional consonant followed by zero or -** vowel-consonant pairs followed by an optional vowel. "m" is the -** number of vowel consonant pairs. This routine computes the value -** of m for the first i bytes of a word. -** -** Return true if the m-value for z is 1 or more. In other words, -** return true if z contains at least one vowel that is followed -** by a consonant. -** -** In this routine z[] is in reverse order. So we are really looking -** for an instance of of a consonant followed by a vowel. -*/ -static int m_gt_0(const char *z){ - while( isVowel(z) ){ z++; } - if( *z==0 ) return 0; - while( isConsonant(z) ){ z++; } - return *z!=0; -} - -/* Like mgt0 above except we are looking for a value of m which is -** exactly 1 -*/ -static int m_eq_1(const char *z){ - while( isVowel(z) ){ z++; } - if( *z==0 ) return 0; - while( isConsonant(z) ){ z++; } - if( *z==0 ) return 0; - while( isVowel(z) ){ z++; } - if( *z==0 ) return 1; - while( isConsonant(z) ){ z++; } - return *z==0; -} - -/* Like mgt0 above except we are looking for a value of m>1 instead -** or m>0 -*/ -static int m_gt_1(const char *z){ - while( isVowel(z) ){ z++; } - if( *z==0 ) return 0; - while( isConsonant(z) ){ z++; } - if( *z==0 ) return 0; - while( isVowel(z) ){ z++; } - if( *z==0 ) return 0; - while( isConsonant(z) ){ z++; } - return *z!=0; -} - -/* -** Return TRUE if there is a vowel anywhere within z[0..n-1] -*/ -static int hasVowel(const char *z){ - while( isConsonant(z) ){ z++; } - return *z!=0; -} - -/* -** Return TRUE if the word ends in a double consonant. -** -** The text is reversed here. So we are really looking at -** the first two characters of z[]. -*/ -static int doubleConsonant(const char *z){ - return isConsonant(z) && z[0]==z[1] && isConsonant(z+1); -} - -/* -** Return TRUE if the word ends with three letters which -** are consonant-vowel-consonent and where the final consonant -** is not 'w', 'x', or 'y'. -** -** The word is reversed here. So we are really checking the -** first three letters and the first one cannot be in [wxy]. -*/ -static int star_oh(const char *z){ - return - z[0]!=0 && isConsonant(z) && - z[0]!='w' && z[0]!='x' && z[0]!='y' && - z[1]!=0 && isVowel(z+1) && - z[2]!=0 && isConsonant(z+2); -} - -/* -** If the word ends with zFrom and xCond() is true for the stem -** of the word that preceeds the zFrom ending, then change the -** ending to zTo. -** -** The input word *pz and zFrom are both in reverse order. zTo -** is in normal order. -** -** Return TRUE if zFrom matches. Return FALSE if zFrom does not -** match. Not that TRUE is returned even if xCond() fails and -** no substitution occurs. -*/ -static int stem( - char **pz, /* The word being stemmed (Reversed) */ - const char *zFrom, /* If the ending matches this... (Reversed) */ - const char *zTo, /* ... change the ending to this (not reversed) */ - int (*xCond)(const char*) /* Condition that must be true */ -){ - char *z = *pz; - while( *zFrom && *zFrom==*z ){ z++; zFrom++; } - if( *zFrom!=0 ) return 0; - if( xCond && !xCond(z) ) return 1; - while( *zTo ){ - *(--z) = *(zTo++); - } - *pz = z; - return 1; -} - -/* -** This is the fallback stemmer used when the porter stemmer is -** inappropriate. The input word is copied into the output with -** US-ASCII case folding. If the input word is too long (more -** than 20 bytes if it contains no digits or more than 6 bytes if -** it contains digits) then word is truncated to 20 or 6 bytes -** by taking 10 or 3 bytes from the beginning and end. -*/ -static void copy_stemmer(const char *zIn, int nIn, char *zOut, int *pnOut){ - int i, mx, j; - int hasDigit = 0; - for(i=0; i='A' && c<='Z' ){ - zOut[i] = c - 'A' + 'a'; - }else{ - if( c>='0' && c<='9' ) hasDigit = 1; - zOut[i] = c; - } - } - mx = hasDigit ? 3 : 10; - if( nIn>mx*2 ){ - for(j=mx, i=nIn-mx; i=sizeof(zReverse)-7 ){ - /* The word is too big or too small for the porter stemmer. - ** Fallback to the copy stemmer */ - copy_stemmer(zIn, nIn, zOut, pnOut); - return; - } - for(i=0, j=sizeof(zReverse)-6; i='A' && c<='Z' ){ - zReverse[j] = c + 'a' - 'A'; - }else if( c>='a' && c<='z' ){ - zReverse[j] = c; - }else{ - /* The use of a character not in [a-zA-Z] means that we fallback - ** to the copy stemmer */ - copy_stemmer(zIn, nIn, zOut, pnOut); - return; - } - } - memset(&zReverse[sizeof(zReverse)-5], 0, 5); - z = &zReverse[j+1]; - - - /* Step 1a */ - if( z[0]=='s' ){ - if( - !stem(&z, "sess", "ss", 0) && - !stem(&z, "sei", "i", 0) && - !stem(&z, "ss", "ss", 0) - ){ - z++; - } - } - - /* Step 1b */ - z2 = z; - if( stem(&z, "dee", "ee", m_gt_0) ){ - /* Do nothing. The work was all in the test */ - }else if( - (stem(&z, "gni", "", hasVowel) || stem(&z, "de", "", hasVowel)) - && z!=z2 - ){ - if( stem(&z, "ta", "ate", 0) || - stem(&z, "lb", "ble", 0) || - stem(&z, "zi", "ize", 0) ){ - /* Do nothing. The work was all in the test */ - }else if( doubleConsonant(z) && (*z!='l' && *z!='s' && *z!='z') ){ - z++; - }else if( m_eq_1(z) && star_oh(z) ){ - *(--z) = 'e'; - } - } - - /* Step 1c */ - if( z[0]=='y' && hasVowel(z+1) ){ - z[0] = 'i'; - } - - /* Step 2 */ - switch( z[1] ){ - case 'a': - stem(&z, "lanoita", "ate", m_gt_0) || - stem(&z, "lanoit", "tion", m_gt_0); - break; - case 'c': - stem(&z, "icne", "ence", m_gt_0) || - stem(&z, "icna", "ance", m_gt_0); - break; - case 'e': - stem(&z, "rezi", "ize", m_gt_0); - break; - case 'g': - stem(&z, "igol", "log", m_gt_0); - break; - case 'l': - stem(&z, "ilb", "ble", m_gt_0) || - stem(&z, "illa", "al", m_gt_0) || - stem(&z, "iltne", "ent", m_gt_0) || - stem(&z, "ile", "e", m_gt_0) || - stem(&z, "ilsuo", "ous", m_gt_0); - break; - case 'o': - stem(&z, "noitazi", "ize", m_gt_0) || - stem(&z, "noita", "ate", m_gt_0) || - stem(&z, "rota", "ate", m_gt_0); - break; - case 's': - stem(&z, "msila", "al", m_gt_0) || - stem(&z, "ssenevi", "ive", m_gt_0) || - stem(&z, "ssenluf", "ful", m_gt_0) || - stem(&z, "ssensuo", "ous", m_gt_0); - break; - case 't': - stem(&z, "itila", "al", m_gt_0) || - stem(&z, "itivi", "ive", m_gt_0) || - stem(&z, "itilib", "ble", m_gt_0); - break; - } - - /* Step 3 */ - switch( z[0] ){ - case 'e': - stem(&z, "etaci", "ic", m_gt_0) || - stem(&z, "evita", "", m_gt_0) || - stem(&z, "ezila", "al", m_gt_0); - break; - case 'i': - stem(&z, "itici", "ic", m_gt_0); - break; - case 'l': - stem(&z, "laci", "ic", m_gt_0) || - stem(&z, "luf", "", m_gt_0); - break; - case 's': - stem(&z, "ssen", "", m_gt_0); - break; - } - - /* Step 4 */ - switch( z[1] ){ - case 'a': - if( z[0]=='l' && m_gt_1(z+2) ){ - z += 2; - } - break; - case 'c': - if( z[0]=='e' && z[2]=='n' && (z[3]=='a' || z[3]=='e') && m_gt_1(z+4) ){ - z += 4; - } - break; - case 'e': - if( z[0]=='r' && m_gt_1(z+2) ){ - z += 2; - } - break; - case 'i': - if( z[0]=='c' && m_gt_1(z+2) ){ - z += 2; - } - break; - case 'l': - if( z[0]=='e' && z[2]=='b' && (z[3]=='a' || z[3]=='i') && m_gt_1(z+4) ){ - z += 4; - } - break; - case 'n': - if( z[0]=='t' ){ - if( z[2]=='a' ){ - if( m_gt_1(z+3) ){ - z += 3; - } - }else if( z[2]=='e' ){ - stem(&z, "tneme", "", m_gt_1) || - stem(&z, "tnem", "", m_gt_1) || - stem(&z, "tne", "", m_gt_1); - } - } - break; - case 'o': - if( z[0]=='u' ){ - if( m_gt_1(z+2) ){ - z += 2; - } - }else if( z[3]=='s' || z[3]=='t' ){ - stem(&z, "noi", "", m_gt_1); - } - break; - case 's': - if( z[0]=='m' && z[2]=='i' && m_gt_1(z+3) ){ - z += 3; - } - break; - case 't': - stem(&z, "eta", "", m_gt_1) || - stem(&z, "iti", "", m_gt_1); - break; - case 'u': - if( z[0]=='s' && z[2]=='o' && m_gt_1(z+3) ){ - z += 3; - } - break; - case 'v': - case 'z': - if( z[0]=='e' && z[2]=='i' && m_gt_1(z+3) ){ - z += 3; - } - break; - } - - /* Step 5a */ - if( z[0]=='e' ){ - if( m_gt_1(z+1) ){ - z++; - }else if( m_eq_1(z+1) && !star_oh(z+1) ){ - z++; - } - } - - /* Step 5b */ - if( m_gt_1(z) && z[0]=='l' && z[1]=='l' ){ - z++; - } - - /* z[] is now the stemmed word in reverse order. Flip it back - ** around into forward order and return. - */ - *pnOut = i = strlen(z); - zOut[i] = 0; - while( *z ){ - zOut[--i] = *(z++); - } -} - -/* -** Characters that can be part of a token. We assume any character -** whose value is greater than 0x80 (any UTF character) can be -** part of a token. In other words, delimiters all must have -** values of 0x7f or lower. -*/ -static const char isIdChar[] = { -/* x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 xA xB xC xD xE xF */ - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, /* 3x */ - 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 4x */ - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, /* 5x */ - 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 6x */ - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, /* 7x */ -}; -#define idChar(C) (((ch=C)&0x80)!=0 || (ch>0x2f && isIdChar[ch-0x30])) -#define isDelim(C) (((ch=C)&0x80)==0 && (ch<0x30 || !isIdChar[ch-0x30])) - -/* -** Extract the next token from a tokenization cursor. The cursor must -** have been opened by a prior call to porterOpen(). -*/ -static int porterNext( - sqlite3_tokenizer_cursor *pCursor, /* Cursor returned by porterOpen */ - const char **pzToken, /* OUT: *pzToken is the token text */ - int *pnBytes, /* OUT: Number of bytes in token */ - int *piStartOffset, /* OUT: Starting offset of token */ - int *piEndOffset, /* OUT: Ending offset of token */ - int *piPosition /* OUT: Position integer of token */ -){ - porter_tokenizer_cursor *c = (porter_tokenizer_cursor *) pCursor; - const char *z = c->zInput; - - while( c->iOffsetnInput ){ - int iStartOffset, ch; - - /* Scan past delimiter characters */ - while( c->iOffsetnInput && isDelim(z[c->iOffset]) ){ - c->iOffset++; - } - - /* Count non-delimiter characters. */ - iStartOffset = c->iOffset; - while( c->iOffsetnInput && !isDelim(z[c->iOffset]) ){ - c->iOffset++; - } - - if( c->iOffset>iStartOffset ){ - int n = c->iOffset-iStartOffset; - if( n>c->nAllocated ){ - c->nAllocated = n+20; - c->zToken = realloc(c->zToken, c->nAllocated); - if( c->zToken==NULL ) return SQLITE_NOMEM; - } - porter_stemmer(&z[iStartOffset], n, c->zToken, pnBytes); - *pzToken = c->zToken; - *piStartOffset = iStartOffset; - *piEndOffset = c->iOffset; - *piPosition = c->iToken++; - return SQLITE_OK; - } - } - return SQLITE_DONE; -} - -/* -** The set of routines that implement the porter-stemmer tokenizer -*/ -static const sqlite3_tokenizer_module porterTokenizerModule = { - 0, - porterCreate, - porterDestroy, - porterOpen, - porterClose, - porterNext, -}; - -/* -** Allocate a new porter tokenizer. Return a pointer to the new -** tokenizer in *ppModule -*/ -void sqlite3Fts1PorterTokenizerModule( - sqlite3_tokenizer_module const**ppModule -){ - *ppModule = &porterTokenizerModule; -} - -#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS1) */ diff --git a/ext/fts1/fts1_tokenizer.h b/ext/fts1/fts1_tokenizer.h deleted file mode 100644 index a48cb74519..0000000000 --- a/ext/fts1/fts1_tokenizer.h +++ /dev/null @@ -1,90 +0,0 @@ -/* -** 2006 July 10 -** -** The author disclaims copyright to this source code. -** -************************************************************************* -** Defines the interface to tokenizers used by fulltext-search. There -** are three basic components: -** -** sqlite3_tokenizer_module is a singleton defining the tokenizer -** interface functions. This is essentially the class structure for -** tokenizers. -** -** sqlite3_tokenizer is used to define a particular tokenizer, perhaps -** including customization information defined at creation time. -** -** sqlite3_tokenizer_cursor is generated by a tokenizer to generate -** tokens from a particular input. -*/ -#ifndef _FTS1_TOKENIZER_H_ -#define _FTS1_TOKENIZER_H_ - -/* TODO(shess) Only used for SQLITE_OK and SQLITE_DONE at this time. -** If tokenizers are to be allowed to call sqlite3_*() functions, then -** we will need a way to register the API consistently. -*/ -#include "sqlite3.h" - -/* -** Structures used by the tokenizer interface. -*/ -typedef struct sqlite3_tokenizer sqlite3_tokenizer; -typedef struct sqlite3_tokenizer_cursor sqlite3_tokenizer_cursor; -typedef struct sqlite3_tokenizer_module sqlite3_tokenizer_module; - -struct sqlite3_tokenizer_module { - int iVersion; /* currently 0 */ - - /* - ** Create and destroy a tokenizer. argc/argv are passed down from - ** the fulltext virtual table creation to allow customization. - */ - int (*xCreate)(int argc, const char *const*argv, - sqlite3_tokenizer **ppTokenizer); - int (*xDestroy)(sqlite3_tokenizer *pTokenizer); - - /* - ** Tokenize a particular input. Call xOpen() to prepare to - ** tokenize, xNext() repeatedly until it returns SQLITE_DONE, then - ** xClose() to free any internal state. The pInput passed to - ** xOpen() must exist until the cursor is closed. The ppToken - ** result from xNext() is only valid until the next call to xNext() - ** or until xClose() is called. - */ - /* TODO(shess) current implementation requires pInput to be - ** nul-terminated. This should either be fixed, or pInput/nBytes - ** should be converted to zInput. - */ - int (*xOpen)(sqlite3_tokenizer *pTokenizer, - const char *pInput, int nBytes, - sqlite3_tokenizer_cursor **ppCursor); - int (*xClose)(sqlite3_tokenizer_cursor *pCursor); - int (*xNext)(sqlite3_tokenizer_cursor *pCursor, - const char **ppToken, int *pnBytes, - int *piStartOffset, int *piEndOffset, int *piPosition); -}; - -struct sqlite3_tokenizer { - const sqlite3_tokenizer_module *pModule; /* The module for this tokenizer */ - /* Tokenizer implementations will typically add additional fields */ -}; - -struct sqlite3_tokenizer_cursor { - sqlite3_tokenizer *pTokenizer; /* Tokenizer for this cursor. */ - /* Tokenizer implementations will typically add additional fields */ -}; - -/* -** Get the module for a tokenizer which generates tokens based on a -** set of non-token characters. The default is to break tokens at any -** non-alnum character, though the set of delimiters can also be -** specified by the first argv argument to xCreate(). -*/ -/* TODO(shess) This doesn't belong here. Need some sort of -** registration process. -*/ -void sqlite3Fts1SimpleTokenizerModule(sqlite3_tokenizer_module const**ppModule); -void sqlite3Fts1PorterTokenizerModule(sqlite3_tokenizer_module const**ppModule); - -#endif /* _FTS1_TOKENIZER_H_ */ diff --git a/ext/fts1/fts1_tokenizer1.c b/ext/fts1/fts1_tokenizer1.c deleted file mode 100644 index f58fba8f8e..0000000000 --- a/ext/fts1/fts1_tokenizer1.c +++ /dev/null @@ -1,221 +0,0 @@ -/* -** The author disclaims copyright to this source code. -** -************************************************************************* -** Implementation of the "simple" full-text-search tokenizer. -*/ - -/* -** The code in this file is only compiled if: -** -** * The FTS1 module is being built as an extension -** (in which case SQLITE_CORE is not defined), or -** -** * The FTS1 module is being built into the core of -** SQLite (in which case SQLITE_ENABLE_FTS1 is defined). -*/ -#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS1) - - -#include -#include -#include -#include -#include - -#include "fts1_tokenizer.h" - -typedef struct simple_tokenizer { - sqlite3_tokenizer base; - char delim[128]; /* flag ASCII delimiters */ -} simple_tokenizer; - -typedef struct simple_tokenizer_cursor { - sqlite3_tokenizer_cursor base; - const char *pInput; /* input we are tokenizing */ - int nBytes; /* size of the input */ - int iOffset; /* current position in pInput */ - int iToken; /* index of next token to be returned */ - char *pToken; /* storage for current token */ - int nTokenAllocated; /* space allocated to zToken buffer */ -} simple_tokenizer_cursor; - - -/* Forward declaration */ -static const sqlite3_tokenizer_module simpleTokenizerModule; - -static int isDelim(simple_tokenizer *t, unsigned char c){ - return c<0x80 && t->delim[c]; -} - -/* -** Create a new tokenizer instance. -*/ -static int simpleCreate( - int argc, const char * const *argv, - sqlite3_tokenizer **ppTokenizer -){ - simple_tokenizer *t; - - t = (simple_tokenizer *) calloc(sizeof(*t), 1); - if( t==NULL ) return SQLITE_NOMEM; - - /* TODO(shess) Delimiters need to remain the same from run to run, - ** else we need to reindex. One solution would be a meta-table to - ** track such information in the database, then we'd only want this - ** information on the initial create. - */ - if( argc>1 ){ - int i, n = strlen(argv[1]); - for(i=0; i=0x80 ){ - free(t); - return SQLITE_ERROR; - } - t->delim[ch] = 1; - } - } else { - /* Mark non-alphanumeric ASCII characters as delimiters */ - int i; - for(i=1; i<0x80; i++){ - t->delim[i] = !isalnum(i); - } - } - - *ppTokenizer = &t->base; - return SQLITE_OK; -} - -/* -** Destroy a tokenizer -*/ -static int simpleDestroy(sqlite3_tokenizer *pTokenizer){ - free(pTokenizer); - return SQLITE_OK; -} - -/* -** Prepare to begin tokenizing a particular string. The input -** string to be tokenized is pInput[0..nBytes-1]. A cursor -** used to incrementally tokenize this string is returned in -** *ppCursor. -*/ -static int simpleOpen( - sqlite3_tokenizer *pTokenizer, /* The tokenizer */ - const char *pInput, int nBytes, /* String to be tokenized */ - sqlite3_tokenizer_cursor **ppCursor /* OUT: Tokenization cursor */ -){ - simple_tokenizer_cursor *c; - - c = (simple_tokenizer_cursor *) malloc(sizeof(*c)); - if( c==NULL ) return SQLITE_NOMEM; - - c->pInput = pInput; - if( pInput==0 ){ - c->nBytes = 0; - }else if( nBytes<0 ){ - c->nBytes = (int)strlen(pInput); - }else{ - c->nBytes = nBytes; - } - c->iOffset = 0; /* start tokenizing at the beginning */ - c->iToken = 0; - c->pToken = NULL; /* no space allocated, yet. */ - c->nTokenAllocated = 0; - - *ppCursor = &c->base; - return SQLITE_OK; -} - -/* -** Close a tokenization cursor previously opened by a call to -** simpleOpen() above. -*/ -static int simpleClose(sqlite3_tokenizer_cursor *pCursor){ - simple_tokenizer_cursor *c = (simple_tokenizer_cursor *) pCursor; - free(c->pToken); - free(c); - return SQLITE_OK; -} - -/* -** Extract the next token from a tokenization cursor. The cursor must -** have been opened by a prior call to simpleOpen(). -*/ -static int simpleNext( - sqlite3_tokenizer_cursor *pCursor, /* Cursor returned by simpleOpen */ - const char **ppToken, /* OUT: *ppToken is the token text */ - int *pnBytes, /* OUT: Number of bytes in token */ - int *piStartOffset, /* OUT: Starting offset of token */ - int *piEndOffset, /* OUT: Ending offset of token */ - int *piPosition /* OUT: Position integer of token */ -){ - simple_tokenizer_cursor *c = (simple_tokenizer_cursor *) pCursor; - simple_tokenizer *t = (simple_tokenizer *) pCursor->pTokenizer; - unsigned char *p = (unsigned char *)c->pInput; - - while( c->iOffsetnBytes ){ - int iStartOffset; - - /* Scan past delimiter characters */ - while( c->iOffsetnBytes && isDelim(t, p[c->iOffset]) ){ - c->iOffset++; - } - - /* Count non-delimiter characters. */ - iStartOffset = c->iOffset; - while( c->iOffsetnBytes && !isDelim(t, p[c->iOffset]) ){ - c->iOffset++; - } - - if( c->iOffset>iStartOffset ){ - int i, n = c->iOffset-iStartOffset; - if( n>c->nTokenAllocated ){ - c->nTokenAllocated = n+20; - c->pToken = realloc(c->pToken, c->nTokenAllocated); - if( c->pToken==NULL ) return SQLITE_NOMEM; - } - for(i=0; ipToken[i] = ch<0x80 ? tolower(ch) : ch; - } - *ppToken = c->pToken; - *pnBytes = n; - *piStartOffset = iStartOffset; - *piEndOffset = c->iOffset; - *piPosition = c->iToken++; - - return SQLITE_OK; - } - } - return SQLITE_DONE; -} - -/* -** The set of routines that implement the simple tokenizer -*/ -static const sqlite3_tokenizer_module simpleTokenizerModule = { - 0, - simpleCreate, - simpleDestroy, - simpleOpen, - simpleClose, - simpleNext, -}; - -/* -** Allocate a new simple tokenizer. Return a pointer to the new -** tokenizer in *ppModule -*/ -void sqlite3Fts1SimpleTokenizerModule( - sqlite3_tokenizer_module const**ppModule -){ - *ppModule = &simpleTokenizerModule; -} - -#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS1) */ diff --git a/ext/fts1/fulltext.c b/ext/fts1/fulltext.c deleted file mode 100644 index 313ff303e1..0000000000 --- a/ext/fts1/fulltext.c +++ /dev/null @@ -1,1511 +0,0 @@ -/* The author disclaims copyright to this source code. - * - * This is an SQLite module implementing full-text search. - */ - -#include -#if !defined(__APPLE__) -#include -#else -#include -#endif -#include -#include -#include - -#include "fulltext.h" -#include "ft_hash.h" -#include "tokenizer.h" -#include "sqlite3.h" -#include "sqlite3ext.h" -SQLITE_EXTENSION_INIT1 - -/* utility functions */ - -/* We encode variable-length integers in little-endian order using seven bits - * per byte as follows: -** -** KEY: -** A = 0xxxxxxx 7 bits of data and one flag bit -** B = 1xxxxxxx 7 bits of data and one flag bit -** -** 7 bits - A -** 14 bits - BA -** 21 bits - BBA -** and so on. -*/ - -/* We may need up to VARINT_MAX bytes to store an encoded 64-bit integer. */ -#define VARINT_MAX 10 - -/* Write a 64-bit variable-length integer to memory starting at p[0]. - * The length of data written will be between 1 and VARINT_MAX bytes. - * The number of bytes written is returned. */ -static int putVarint(char *p, sqlite_int64 v){ - unsigned char *q = (unsigned char *) p; - sqlite_uint64 vu = v; - do{ - *q++ = (unsigned char) ((vu & 0x7f) | 0x80); - vu >>= 7; - }while( vu!=0 ); - q[-1] &= 0x7f; /* turn off high bit in final byte */ - assert( q - (unsigned char *)p <= VARINT_MAX ); - return (int) (q - (unsigned char *)p); -} - -/* Read a 64-bit variable-length integer from memory starting at p[0]. - * Return the number of bytes read, or 0 on error. - * The value is stored in *v. */ -static int getVarint(const char *p, sqlite_int64 *v){ - const unsigned char *q = (const unsigned char *) p; - sqlite_uint64 x = 0, y = 1; - while( (*q & 0x80) == 0x80 ){ - x += y * (*q++ & 0x7f); - y <<= 7; - if( q - (unsigned char *)p >= VARINT_MAX ){ /* bad data */ - assert( 0 ); - return 0; - } - } - x += y * (*q++); - *v = (sqlite_int64) x; - return (int) (q - (unsigned char *)p); -} - -static int getVarint32(const char *p, int *pi){ - sqlite_int64 i; - int ret = getVarint(p, &i); - *pi = (int) i; - assert( *pi==i ); - return ret; -} - -/*** Document lists *** - * - * A document list holds a sorted list of varint-encoded document IDs. - * - * A doclist with type DL_POSITIONS_OFFSETS is stored like this: - * - * array { - * varint docid; - * array { - * varint position; (delta from previous position plus 1, or 0 for end) - * varint startOffset; (delta from previous startOffset) - * varint endOffset; (delta from startOffset) - * } - * } - * - * Here, array { X } means zero or more occurrences of X, adjacent in memory. - * - * A doclist with type DL_POSITIONS is like the above, but holds only docids - * and positions without offset information. - * - * A doclist with type DL_DOCIDS is like the above, but holds only docids - * without positions or offset information. - * - * On disk, every document list has positions and offsets, so we don't bother - * to serialize a doclist's type. - * - * We don't yet delta-encode document IDs; doing so will probably be a - * modest win. - * - * NOTE(shess) I've thought of a slightly (1%) better offset encoding. - * After the first offset, estimate the next offset by using the - * current token position and the previous token position and offset, - * offset to handle some variance. So the estimate would be - * (iPosition*w->iStartOffset/w->iPosition-64), which is delta-encoded - * as normal. Offsets more than 64 chars from the estimate are - * encoded as the delta to the previous start offset + 128. An - * additional tiny increment can be gained by using the end offset of - * the previous token to make the estimate a tiny bit more precise. -*/ - -typedef enum DocListType { - DL_DOCIDS, /* docids only */ - DL_POSITIONS, /* docids + positions */ - DL_POSITIONS_OFFSETS /* docids + positions + offsets */ -} DocListType; - -typedef struct DocList { - char *pData; - int nData; - DocListType iType; - int iLastPos; /* the last position written */ - int iLastOffset; /* the last start offset written */ -} DocList; - -/* Initialize a new DocList to hold the given data. */ -static void docListInit(DocList *d, DocListType iType, - const char *pData, int nData){ - d->nData = nData; - if( nData>0 ){ - d->pData = malloc(nData); - memcpy(d->pData, pData, nData); - } else { - d->pData = NULL; - } - d->iType = iType; - d->iLastPos = 0; - d->iLastOffset = 0; -} - -/* Create a new dynamically-allocated DocList. */ -static DocList *docListNew(DocListType iType){ - DocList *d = (DocList *) malloc(sizeof(DocList)); - docListInit(d, iType, 0, 0); - return d; -} - -static void docListDestroy(DocList *d){ - free(d->pData); -#ifndef NDEBUG - memset(d, 0x55, sizeof(*d)); -#endif -} - -static void docListDelete(DocList *d){ - docListDestroy(d); - free(d); -} - -static char *docListEnd(DocList *d){ - return d->pData + d->nData; -} - -/* Append a varint to a DocList's data. */ -static void appendVarint(DocList *d, sqlite_int64 i){ - char c[VARINT_MAX]; - int n = putVarint(c, i); - d->pData = realloc(d->pData, d->nData + n); - memcpy(d->pData + d->nData, c, n); - d->nData += n; -} - -static void docListAddDocid(DocList *d, sqlite_int64 iDocid){ - appendVarint(d, iDocid); - d->iLastPos = 0; -} - -/* Add a position to the last position list in a doclist. */ -static void docListAddPos(DocList *d, int iPos){ - assert( d->iType>=DL_POSITIONS ); - appendVarint(d, iPos-d->iLastPos+1); - d->iLastPos = iPos; -} - -static void docListAddPosOffset(DocList *d, int iPos, - int iStartOffset, int iEndOffset){ - assert( d->iType==DL_POSITIONS_OFFSETS ); - docListAddPos(d, iPos); - appendVarint(d, iStartOffset-d->iLastOffset); - d->iLastOffset = iStartOffset; - appendVarint(d, iEndOffset-iStartOffset); -} - -/* Terminate the last position list in the given doclist. */ -static void docListAddEndPos(DocList *d){ - appendVarint(d, 0); -} - -typedef struct DocListReader { - DocList *pDoclist; - char *p; - int iLastPos; /* the last position read */ -} DocListReader; - -static void readerInit(DocListReader *r, DocList *pDoclist){ - r->pDoclist = pDoclist; - if( pDoclist!=NULL ){ - r->p = pDoclist->pData; - } - r->iLastPos = 0; -} - -static int readerAtEnd(DocListReader *pReader){ - return pReader->p >= docListEnd(pReader->pDoclist); -} - -/* Peek at the next docid without advancing the read pointer. */ -static sqlite_int64 peekDocid(DocListReader *pReader){ - sqlite_int64 ret; - assert( !readerAtEnd(pReader) ); - getVarint(pReader->p, &ret); - return ret; -} - -/* Read the next docid. */ -static sqlite_int64 readDocid(DocListReader *pReader){ - sqlite_int64 ret; - assert( !readerAtEnd(pReader) ); - pReader->p += getVarint(pReader->p, &ret); - pReader->iLastPos = 0; - return ret; -} - -/* Read the next position from a position list. - * Returns the position, or -1 at the end of the list. */ -static int readPosition(DocListReader *pReader){ - int i; - int iType = pReader->pDoclist->iType; - assert( iType>=DL_POSITIONS ); - assert( !readerAtEnd(pReader) ); - - pReader->p += getVarint32(pReader->p, &i); - if( i==0 ){ - pReader->iLastPos = -1; - return -1; - } - pReader->iLastPos += ((int) i)-1; - if( iType>=DL_POSITIONS_OFFSETS ){ - /* Skip over offsets, ignoring them for now. */ - int iStart, iEnd; - pReader->p += getVarint32(pReader->p, &iStart); - pReader->p += getVarint32(pReader->p, &iEnd); - } - return pReader->iLastPos; -} - -/* Skip past the end of a position list. */ -static void skipPositionList(DocListReader *pReader){ - while( readPosition(pReader)!=-1 ) - ; -} - -/* Skip over a docid, including its position list if the doclist has - * positions. */ -static void skipDocument(DocListReader *pReader){ - readDocid(pReader); - if( pReader->pDoclist->iType >= DL_POSITIONS ){ - skipPositionList(pReader); - } -} - -static sqlite_int64 firstDocid(DocList *d){ - DocListReader r; - readerInit(&r, d); - return readDocid(&r); -} - -/* Doclist multi-tool. Pass pUpdate==NULL to delete the indicated docid; - * otherwise pUpdate, which must contain only the single docid [iDocid], is - * inserted (if not present) or updated (if already present). */ -static int docListUpdate(DocList *d, sqlite_int64 iDocid, DocList *pUpdate){ - int modified = 0; - DocListReader reader; - char *p; - - if( pUpdate!=NULL ){ - assert( d->iType==pUpdate->iType); - assert( iDocid==firstDocid(pUpdate) ); - } - - readerInit(&reader, d); - while( !readerAtEnd(&reader) && peekDocid(&reader)nData -= (reader.p - p); - modified = 1; - } - - /* Insert if indicated. */ - if( pUpdate!=NULL ){ - int iDoclist = p-d->pData; - docListAddEndPos(pUpdate); - - d->pData = realloc(d->pData, d->nData+pUpdate->nData); - p = d->pData + iDoclist; - - memmove(p+pUpdate->nData, p, docListEnd(d) - p); - memcpy(p, pUpdate->pData, pUpdate->nData); - d->nData += pUpdate->nData; - modified = 1; - } - - return modified; -} - -/* Split the second half of doclist d into a separate doclist d2. Returns 1 - * if successful, or 0 if d contains a single document and hence can't be - * split. */ -static int docListSplit(DocList *d, DocList *d2){ - const char *pSplitPoint = d->pData + d->nData / 2; - DocListReader reader; - - readerInit(&reader, d); - while( reader.piType, reader.p, docListEnd(d) - reader.p); - d->nData = reader.p - d->pData; - d->pData = realloc(d->pData, d->nData); - return 1; -} - -/* A DocListMerge computes the AND of an in-memory DocList [in] and a chunked - * on-disk doclist, resulting in another in-memory DocList [out]. [in] - * and [out] may or may not store position information according to the - * caller's wishes. The on-disk doclist always comes with positions. - * - * The caller must read each chunk of the on-disk doclist in succession and - * pass it to mergeBlock(). - * - * If [in] has positions, then the merge output contains only documents with - * matching positions in the two input doclists. If [in] does not have - * positions, then the merge output contains all documents common to the two - * input doclists. - * - * If [in] is NULL, then the on-disk doclist is copied to [out] directly. - * - * A merge is performed using an integer [iOffset] provided by the caller. - * [iOffset] is subtracted from each position in the on-disk doclist for the - * purpose of position comparison; this is helpful in implementing phrase - * searches. - * - * A DocListMerge is not yet able to propagate offsets through query - * processing; we should add that capability soon. -*/ -typedef struct DocListMerge { - DocListReader in; - DocList *pOut; - int iOffset; -} DocListMerge; - -static void mergeInit(DocListMerge *m, - DocList *pIn, int iOffset, DocList *pOut){ - readerInit(&m->in, pIn); - m->pOut = pOut; - m->iOffset = iOffset; - - /* can't handle offsets yet */ - assert( pIn==NULL || pIn->iType <= DL_POSITIONS ); - assert( pOut->iType <= DL_POSITIONS ); -} - -/* A helper function for mergeBlock(), below. Merge the position lists - * pointed to by m->in and pBlockReader. - * If the merge matches, write [iDocid] to m->pOut; if m->pOut - * has positions then write all matching positions as well. */ -static void mergePosList(DocListMerge *m, sqlite_int64 iDocid, - DocListReader *pBlockReader){ - int block_pos = readPosition(pBlockReader); - int in_pos = readPosition(&m->in); - int match = 0; - while( block_pos!=-1 || in_pos!=-1 ){ - if( block_pos-m->iOffset==in_pos ){ - if( !match ){ - docListAddDocid(m->pOut, iDocid); - match = 1; - } - if( m->pOut->iType >= DL_POSITIONS ){ - docListAddPos(m->pOut, in_pos); - } - block_pos = readPosition(pBlockReader); - in_pos = readPosition(&m->in); - } else if( in_pos==-1 || (block_pos!=-1 && block_pos-m->iOffsetin); - } - } - if( m->pOut->iType >= DL_POSITIONS && match ){ - docListAddEndPos(m->pOut); - } -} - -/* Merge one block of an on-disk doclist into a DocListMerge. */ -static void mergeBlock(DocListMerge *m, DocList *pBlock){ - DocListReader blockReader; - assert( pBlock->iType >= DL_POSITIONS ); - readerInit(&blockReader, pBlock); - while( !readerAtEnd(&blockReader) ){ - sqlite_int64 iDocid = readDocid(&blockReader); - if( m->in.pDoclist!=NULL ){ - while( 1 ){ - if( readerAtEnd(&m->in) ) return; /* nothing more to merge */ - if( peekDocid(&m->in)>=iDocid ) break; - skipDocument(&m->in); - } - if( peekDocid(&m->in)>iDocid ){ /* [pIn] has no match with iDocid */ - skipPositionList(&blockReader); /* skip this docid in the block */ - continue; - } - readDocid(&m->in); - } - /* We have a document match. */ - if( m->in.pDoclist==NULL || m->in.pDoclist->iType < DL_POSITIONS ){ - /* We don't need to do a poslist merge. */ - docListAddDocid(m->pOut, iDocid); - if( m->pOut->iType >= DL_POSITIONS ){ - /* Copy all positions to the output doclist. */ - while( 1 ){ - int pos = readPosition(&blockReader); - if( pos==-1 ) break; - docListAddPos(m->pOut, pos); - } - docListAddEndPos(m->pOut); - } else skipPositionList(&blockReader); - continue; - } - mergePosList(m, iDocid, &blockReader); - } -} - -static char *string_dup_n(const char *s, int n){ - char *str = malloc(n + 1); - memcpy(str, s, n); - str[n] = '\0'; - return str; -} - -/* Duplicate a string; the caller must free() the returned string. - * (We don't use strdup() since it's not part of the standard C library and - * may not be available everywhere.) */ -static char *string_dup(const char *s){ - return string_dup_n(s, strlen(s)); -} - -/* Format a string, replacing each occurrence of the % character with - * zName. This may be more convenient than sqlite_mprintf() - * when one string is used repeatedly in a format string. - * The caller must free() the returned string. */ -static char *string_format(const char *zFormat, const char *zName){ - const char *p; - size_t len = 0; - size_t nName = strlen(zName); - char *result; - char *r; - - /* first compute length needed */ - for(p = zFormat ; *p ; ++p){ - len += (*p=='%' ? nName : 1); - } - len += 1; /* for null terminator */ - - r = result = malloc(len); - for(p = zFormat; *p; ++p){ - if( *p=='%' ){ - memcpy(r, zName, nName); - r += nName; - } else { - *r++ = *p; - } - } - *r++ = '\0'; - assert( r == result + len ); - return result; -} - -static int sql_exec(sqlite3 *db, const char *zName, const char *zFormat){ - char *zCommand = string_format(zFormat, zName); - int rc = sqlite3_exec(db, zCommand, NULL, 0, NULL); - free(zCommand); - return rc; -} - -static int sql_prepare(sqlite3 *db, const char *zName, sqlite3_stmt **ppStmt, - const char *zFormat){ - char *zCommand = string_format(zFormat, zName); - int rc = sqlite3_prepare(db, zCommand, -1, ppStmt, NULL); - free(zCommand); - return rc; -} - -/* end utility functions */ - -#define QUERY_GENERIC 0 -#define QUERY_FULLTEXT 1 - -#define CHUNK_MAX 1024 - -typedef enum fulltext_statement { - CONTENT_INSERT_STMT, - CONTENT_SELECT_STMT, - CONTENT_DELETE_STMT, - - TERM_SELECT_STMT, - TERM_CHUNK_SELECT_STMT, - TERM_INSERT_STMT, - TERM_UPDATE_STMT, - TERM_DELETE_STMT, - - MAX_STMT /* Always at end! */ -} fulltext_statement; - -/* These must exactly match the enum above. */ -/* TODO(adam): Is there some risk that a statement (in particular, -** pTermSelectStmt) will be used in two cursors at once, e.g. if a -** query joins a virtual table to itself? If so perhaps we should -** move some of these to the cursor object. -*/ -static const char *fulltext_zStatement[MAX_STMT] = { - /* CONTENT_INSERT */ "insert into %_content (rowid, content) values (?, ?)", - /* CONTENT_SELECT */ "select content from %_content where rowid = ?", - /* CONTENT_DELETE */ "delete from %_content where rowid = ?", - - /* TERM_SELECT */ - "select rowid, doclist from %_term where term = ? and first = ?", - /* TERM_CHUNK_SELECT */ - "select max(first) from %_term where term = ? and first <= ?", - /* TERM_INSERT */ - "insert into %_term (term, first, doclist) values (?, ?, ?)", - /* TERM_UPDATE */ "update %_term set doclist = ? where rowid = ?", - /* TERM_DELETE */ "delete from %_term where rowid = ?", -}; - -typedef struct fulltext_vtab { - sqlite3_vtab base; - sqlite3 *db; - const char *zName; /* virtual table name */ - sqlite3_tokenizer *pTokenizer; /* tokenizer for inserts and queries */ - - /* Precompiled statements which we keep as long as the table is - ** open. - */ - sqlite3_stmt *pFulltextStatements[MAX_STMT]; -} fulltext_vtab; - -typedef struct fulltext_cursor { - sqlite3_vtab_cursor base; - int iCursorType; /* QUERY_GENERIC or QUERY_FULLTEXT */ - - sqlite3_stmt *pStmt; - - int eof; - - /* The following is used only when iCursorType == QUERY_FULLTEXT. */ - DocListReader result; -} fulltext_cursor; - -static struct fulltext_vtab *cursor_vtab(fulltext_cursor *c){ - return (fulltext_vtab *) c->base.pVtab; -} - -static sqlite3_module fulltextModule; /* forward declaration */ - -/* Puts a freshly-prepared statement determined by iStmt in *ppStmt. -** If the indicated statement has never been prepared, it is prepared -** and cached, otherwise the cached version is reset. -*/ -static int sql_get_statement(fulltext_vtab *v, fulltext_statement iStmt, - sqlite3_stmt **ppStmt){ - assert( iStmtpFulltextStatements[iStmt]==NULL ){ - int rc = sql_prepare(v->db, v->zName, &v->pFulltextStatements[iStmt], - fulltext_zStatement[iStmt]); - if( rc!=SQLITE_OK ) return rc; - } else { - int rc = sqlite3_reset(v->pFulltextStatements[iStmt]); - if( rc!=SQLITE_OK ) return rc; - } - - *ppStmt = v->pFulltextStatements[iStmt]; - return SQLITE_OK; -} - -/* Step the indicated statement, handling errors SQLITE_BUSY (by -** retrying) and SQLITE_SCHEMA (by re-preparing and transferring -** bindings to the new statement). -** TODO(adam): We should extend this function so that it can work with -** statements declared locally, not only globally cached statements. -*/ -static int sql_step_statement(fulltext_vtab *v, fulltext_statement iStmt, - sqlite3_stmt **ppStmt){ - int rc; - sqlite3_stmt *s = *ppStmt; - assert( iStmtpFulltextStatements[iStmt] ); - - while( (rc=sqlite3_step(s))!=SQLITE_DONE && rc!=SQLITE_ROW ){ - sqlite3_stmt *pNewStmt; - - if( rc==SQLITE_BUSY ) continue; - if( rc!=SQLITE_ERROR ) return rc; - - rc = sqlite3_reset(s); - if( rc!=SQLITE_SCHEMA ) return SQLITE_ERROR; - - v->pFulltextStatements[iStmt] = NULL; /* Still in s */ - rc = sql_get_statement(v, iStmt, &pNewStmt); - if( rc!=SQLITE_OK ) goto err; - *ppStmt = pNewStmt; - - rc = sqlite3_transfer_bindings(s, pNewStmt); - if( rc!=SQLITE_OK ) goto err; - - rc = sqlite3_finalize(s); - if( rc!=SQLITE_OK ) return rc; - s = pNewStmt; - } - return rc; - - err: - sqlite3_finalize(s); - return rc; -} - -/* Like sql_step_statement(), but convert SQLITE_DONE to SQLITE_OK. -** Useful for statements like UPDATE, where we expect no results. -*/ -static int sql_single_step_statement(fulltext_vtab *v, - fulltext_statement iStmt, - sqlite3_stmt **ppStmt){ - int rc = sql_step_statement(v, iStmt, ppStmt); - return (rc==SQLITE_DONE) ? SQLITE_OK : rc; -} - -/* insert into %_content (rowid, content) values ([rowid], [zContent]) */ -static int content_insert(fulltext_vtab *v, sqlite3_value *rowid, - const char *zContent, int nContent){ - sqlite3_stmt *s; - int rc = sql_get_statement(v, CONTENT_INSERT_STMT, &s); - if( rc!=SQLITE_OK ) return rc; - - rc = sqlite3_bind_value(s, 1, rowid); - if( rc!=SQLITE_OK ) return rc; - - rc = sqlite3_bind_text(s, 2, zContent, nContent, SQLITE_STATIC); - if( rc!=SQLITE_OK ) return rc; - - return sql_single_step_statement(v, CONTENT_INSERT_STMT, &s); -} - -/* select content from %_content where rowid = [iRow] - * The caller must delete the returned string. */ -static int content_select(fulltext_vtab *v, sqlite_int64 iRow, - char **pzContent){ - sqlite3_stmt *s; - int rc = sql_get_statement(v, CONTENT_SELECT_STMT, &s); - if( rc!=SQLITE_OK ) return rc; - - rc = sqlite3_bind_int64(s, 1, iRow); - if( rc!=SQLITE_OK ) return rc; - - rc = sql_step_statement(v, CONTENT_SELECT_STMT, &s); - if( rc!=SQLITE_ROW ) return rc; - - *pzContent = string_dup((const char *)sqlite3_column_text(s, 0)); - - /* We expect only one row. We must execute another sqlite3_step() - * to complete the iteration; otherwise the table will remain locked. */ - rc = sqlite3_step(s); - if( rc==SQLITE_DONE ) return SQLITE_OK; - - free(*pzContent); - return rc; -} - -/* delete from %_content where rowid = [iRow ] */ -static int content_delete(fulltext_vtab *v, sqlite_int64 iRow){ - sqlite3_stmt *s; - int rc = sql_get_statement(v, CONTENT_DELETE_STMT, &s); - if( rc!=SQLITE_OK ) return rc; - - rc = sqlite3_bind_int64(s, 1, iRow); - if( rc!=SQLITE_OK ) return rc; - - return sql_single_step_statement(v, CONTENT_DELETE_STMT, &s); -} - -/* select rowid, doclist from %_term where term = [zTerm] and first = [iFirst] - * If found, returns SQLITE_OK; the caller must free the returned doclist. - * If no rows found, returns SQLITE_ERROR. */ -static int term_select(fulltext_vtab *v, const char *zTerm, int nTerm, - sqlite_int64 iFirst, - sqlite_int64 *rowid, - DocList *out){ - sqlite3_stmt *s; - int rc = sql_get_statement(v, TERM_SELECT_STMT, &s); - if( rc!=SQLITE_OK ) return rc; - - rc = sqlite3_bind_text(s, 1, zTerm, nTerm, SQLITE_TRANSIENT); - if( rc!=SQLITE_OK ) return rc; - - rc = sqlite3_bind_int64(s, 2, iFirst); - if( rc!=SQLITE_OK ) return rc; - - rc = sql_step_statement(v, TERM_SELECT_STMT, &s); - if( rc!=SQLITE_ROW ) return rc==SQLITE_DONE ? SQLITE_ERROR : rc; - - *rowid = sqlite3_column_int64(s, 0); - docListInit(out, DL_POSITIONS_OFFSETS, - sqlite3_column_blob(s, 1), sqlite3_column_bytes(s, 1)); - - /* We expect only one row. We must execute another sqlite3_step() - * to complete the iteration; otherwise the table will remain locked. */ - rc = sqlite3_step(s); - return rc==SQLITE_DONE ? SQLITE_OK : rc; -} - -/* select max(first) from %_term where term = [zTerm] and first <= [iFirst] - * If found, returns SQLITE_ROW and result in *piResult; if the query returns - * NULL (meaning no row found) returns SQLITE_DONE. - */ -static int term_chunk_select(fulltext_vtab *v, const char *zTerm, int nTerm, - sqlite_int64 iFirst, sqlite_int64 *piResult){ - sqlite3_stmt *s; - int rc = sql_get_statement(v, TERM_CHUNK_SELECT_STMT, &s); - if( rc!=SQLITE_OK ) return rc; - - rc = sqlite3_bind_text(s, 1, zTerm, nTerm, SQLITE_STATIC); - if( rc!=SQLITE_OK ) return rc; - - rc = sqlite3_bind_int64(s, 2, iFirst); - if( rc!=SQLITE_OK ) return rc; - - rc = sql_step_statement(v, TERM_CHUNK_SELECT_STMT, &s); - if( rc!=SQLITE_ROW ) return rc==SQLITE_DONE ? SQLITE_ERROR : rc; - - switch( sqlite3_column_type(s, 0) ){ - case SQLITE_NULL: - rc = SQLITE_DONE; - break; - case SQLITE_INTEGER: - *piResult = sqlite3_column_int64(s, 0); - break; - default: - return SQLITE_ERROR; - } - /* We expect only one row. We must execute another sqlite3_step() - * to complete the iteration; otherwise the table will remain locked. */ - if( sqlite3_step(s) != SQLITE_DONE ) return SQLITE_ERROR; - return rc; -} - -/* insert into %_term (term, first, doclist) - values ([zTerm], [iFirst], [doclist]) */ -static int term_insert(fulltext_vtab *v, const char *zTerm, int nTerm, - sqlite_int64 iFirst, DocList *doclist){ - sqlite3_stmt *s; - int rc = sql_get_statement(v, TERM_INSERT_STMT, &s); - if( rc!=SQLITE_OK ) return rc; - - rc = sqlite3_bind_text(s, 1, zTerm, nTerm, SQLITE_STATIC); - if( rc!=SQLITE_OK ) return rc; - - rc = sqlite3_bind_int64(s, 2, iFirst); - if( rc!=SQLITE_OK ) return rc; - - rc = sqlite3_bind_blob(s, 3, doclist->pData, doclist->nData, SQLITE_STATIC); - if( rc!=SQLITE_OK ) return rc; - - return sql_single_step_statement(v, TERM_INSERT_STMT, &s); -} - -/* update %_term set doclist = [doclist] where rowid = [rowid] */ -static int term_update(fulltext_vtab *v, sqlite_int64 rowid, - DocList *doclist){ - sqlite3_stmt *s; - int rc = sql_get_statement(v, TERM_UPDATE_STMT, &s); - if( rc!=SQLITE_OK ) return rc; - - rc = sqlite3_bind_blob(s, 1, doclist->pData, doclist->nData, - SQLITE_STATIC); - if( rc!=SQLITE_OK ) return rc; - - rc = sqlite3_bind_int64(s, 2, rowid); - if( rc!=SQLITE_OK ) return rc; - - return sql_single_step_statement(v, TERM_UPDATE_STMT, &s); -} - -static int term_delete(fulltext_vtab *v, sqlite_int64 rowid){ - sqlite3_stmt *s; - int rc = sql_get_statement(v, TERM_DELETE_STMT, &s); - if( rc!=SQLITE_OK ) return rc; - - rc = sqlite3_bind_int64(s, 1, rowid); - if( rc!=SQLITE_OK ) return rc; - - return sql_single_step_statement(v, TERM_DELETE_STMT, &s); -} - -static void fulltext_vtab_destroy(fulltext_vtab *v){ - int iStmt; - - for( iStmt=0; iStmtpFulltextStatements[iStmt]!=NULL ){ - sqlite3_finalize(v->pFulltextStatements[iStmt]); - v->pFulltextStatements[iStmt] = NULL; - } - } - - if( v->pTokenizer!=NULL ){ - v->pTokenizer->pModule->xDestroy(v->pTokenizer); - v->pTokenizer = NULL; - } - - free((void *) v->zName); - free(v); -} - -/* Current interface: -** argv[0] - module name -** argv[1] - database name -** argv[2] - table name -** argv[3] - tokenizer name (optional, a sensible default is provided) -** argv[4..] - passed to tokenizer (optional based on tokenizer) -**/ -static int fulltextConnect( - sqlite3 *db, - void *pAux, - int argc, - const char * const *argv, - sqlite3_vtab **ppVTab, - char **pzErr -){ - int rc; - fulltext_vtab *v; - sqlite3_tokenizer_module *m = NULL; - - assert( argc>=3 ); - v = (fulltext_vtab *) malloc(sizeof(fulltext_vtab)); - /* sqlite will initialize v->base */ - v->db = db; - v->zName = string_dup(argv[2]); - v->pTokenizer = NULL; - - if( argc==3 ){ - get_simple_tokenizer_module(&m); - } else { - /* TODO(shess) For now, add new tokenizers as else if clauses. */ - if( !strcmp(argv[3], "simple") ){ - get_simple_tokenizer_module(&m); - } else { - assert( "unrecognized tokenizer"==NULL ); - } - } - - /* TODO(shess) Since tokenization impacts the index, the parameters - ** to the tokenizer need to be identical when a persistent virtual - ** table is re-created. One solution would be a meta-table to track - ** such information in the database. Then we could verify that the - ** information is identical on subsequent creates. - */ - /* TODO(shess) Why isn't argv already (const char **)? */ - rc = m->xCreate(argc-3, (const char **) (argv+3), &v->pTokenizer); - if( rc!=SQLITE_OK ) return rc; - v->pTokenizer->pModule = m; - - /* TODO: verify the existence of backing tables foo_content, foo_term */ - - rc = sqlite3_declare_vtab(db, "create table x(content text)"); - if( rc!=SQLITE_OK ) return rc; - - memset(v->pFulltextStatements, 0, sizeof(v->pFulltextStatements)); - - *ppVTab = &v->base; - return SQLITE_OK; -} - -static int fulltextCreate( - sqlite3 *db, - void *pAux, - int argc, - const char * const *argv, - sqlite3_vtab **ppVTab, - char **pzErr -){ - int rc; - assert( argc>=3 ); - - /* The %_content table holds the text of each full-text item, with - ** the rowid used as the docid. - ** - ** The %_term table maps each term to a document list blob - ** containing elements sorted by ascending docid, each element - ** encoded as: - ** - ** docid varint-encoded - ** token count varint-encoded - ** "count" token elements (poslist): - ** position varint-encoded as delta from previous position - ** start offset varint-encoded as delta from previous start offset - ** end offset varint-encoded as delta from start offset - ** - ** Additionally, doclist blobs can be chunked into multiple rows, - ** using "first" to order the blobs. "first" is simply the first - ** docid in the blob. - */ - /* - ** NOTE(shess) That last sentence is incorrect in the face of - ** deletion, which can leave a doclist that doesn't contain the - ** first from that row. I _believe_ this does not matter to the - ** operation of the system, but it might be reasonable to update - ** appropriately in case this assumption becomes more important. - */ - rc = sql_exec(db, argv[2], - "create table %_content(content text);" - "create table %_term(term text, first integer, doclist blob);" - "create index %_index on %_term(term, first)"); - if( rc!=SQLITE_OK ) return rc; - - return fulltextConnect(db, pAux, argc, argv, ppVTab, pzErr); -} - -/* Decide how to handle an SQL query. - * At the moment, MATCH queries can include implicit boolean ANDs; we - * haven't implemented phrase searches or OR yet. */ -static int fulltextBestIndex(sqlite3_vtab *pVTab, sqlite3_index_info *pInfo){ - int i; - - for(i=0; inConstraint; ++i){ - const struct sqlite3_index_constraint *pConstraint; - pConstraint = &pInfo->aConstraint[i]; - if( pConstraint->iColumn==0 && - pConstraint->op==SQLITE_INDEX_CONSTRAINT_MATCH && - pConstraint->usable ){ /* a full-text search */ - pInfo->aConstraintUsage[i].argvIndex = 1; - pInfo->aConstraintUsage[i].omit = 1; - pInfo->idxNum = QUERY_FULLTEXT; - pInfo->estimatedCost = 1.0; /* an arbitrary value for now */ - return SQLITE_OK; - } - } - pInfo->idxNum = QUERY_GENERIC; - return SQLITE_OK; -} - -static int fulltextDisconnect(sqlite3_vtab *pVTab){ - fulltext_vtab_destroy((fulltext_vtab *)pVTab); - return SQLITE_OK; -} - -static int fulltextDestroy(sqlite3_vtab *pVTab){ - fulltext_vtab *v = (fulltext_vtab *)pVTab; - - int rc = sql_exec(v->db, v->zName, - "drop table %_content; drop table %_term"); - if( rc!=SQLITE_OK ) return rc; - - fulltext_vtab_destroy((fulltext_vtab *)pVTab); - return SQLITE_OK; -} - -static int fulltextOpen(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor){ - fulltext_cursor *c; - - c = (fulltext_cursor *) calloc(sizeof(fulltext_cursor), 1); - /* sqlite will initialize c->base */ - *ppCursor = &c->base; - - return SQLITE_OK; -} - -static int fulltextClose(sqlite3_vtab_cursor *pCursor){ - fulltext_cursor *c = (fulltext_cursor *) pCursor; - sqlite3_finalize(c->pStmt); - if( c->result.pDoclist!=NULL ){ - docListDelete(c->result.pDoclist); - } - free(c); - return SQLITE_OK; -} - -static int fulltextNext(sqlite3_vtab_cursor *pCursor){ - fulltext_cursor *c = (fulltext_cursor *) pCursor; - sqlite_int64 iDocid; - int rc; - - switch( c->iCursorType ){ - case QUERY_GENERIC: - /* TODO(shess) Handle SQLITE_SCHEMA AND SQLITE_BUSY. */ - rc = sqlite3_step(c->pStmt); - switch( rc ){ - case SQLITE_ROW: - c->eof = 0; - return SQLITE_OK; - case SQLITE_DONE: - c->eof = 1; - return SQLITE_OK; - default: - c->eof = 1; - return rc; - } - case QUERY_FULLTEXT: - rc = sqlite3_reset(c->pStmt); - if( rc!=SQLITE_OK ) return rc; - - if( readerAtEnd(&c->result)){ - c->eof = 1; - return SQLITE_OK; - } - iDocid = readDocid(&c->result); - rc = sqlite3_bind_int64(c->pStmt, 1, iDocid); - if( rc!=SQLITE_OK ) return rc; - /* TODO(shess) Handle SQLITE_SCHEMA AND SQLITE_BUSY. */ - rc = sqlite3_step(c->pStmt); - if( rc==SQLITE_ROW ){ /* the case we expect */ - c->eof = 0; - return SQLITE_OK; - } - /* an error occurred; abort */ - return rc==SQLITE_DONE ? SQLITE_ERROR : rc; - default: - assert( 0 ); - return SQLITE_ERROR; /* not reached */ - } -} - -static int term_select_doclist(fulltext_vtab *v, const char *pTerm, int nTerm, - sqlite3_stmt **ppStmt){ - int rc; - if( *ppStmt ){ - rc = sqlite3_reset(*ppStmt); - } else { - rc = sql_prepare(v->db, v->zName, ppStmt, - "select doclist from %_term where term = ? order by first"); - } - if( rc!=SQLITE_OK ) return rc; - - rc = sqlite3_bind_text(*ppStmt, 1, pTerm, nTerm, SQLITE_TRANSIENT); - if( rc!=SQLITE_OK ) return rc; - - return sqlite3_step(*ppStmt); /* TODO(adamd): handle schema error */ -} - -/* Read the posting list for [zTerm]; AND it with the doclist [in] to - * produce the doclist [out], using the given offset [iOffset] for phrase - * matching. - * (*pSelect) is used to hold an SQLite statement used inside this function; - * the caller should initialize *pSelect to NULL before the first call. - */ -static int query_merge(fulltext_vtab *v, sqlite3_stmt **pSelect, - const char *zTerm, - DocList *pIn, int iOffset, DocList *out){ - int rc; - DocListMerge merge; - - if( pIn!=NULL && !pIn->nData ){ - /* If [pIn] is already empty, there's no point in reading the - * posting list to AND it in; return immediately. */ - return SQLITE_OK; - } - - rc = term_select_doclist(v, zTerm, -1, pSelect); - if( rc!=SQLITE_ROW && rc!=SQLITE_DONE ) return rc; - - mergeInit(&merge, pIn, iOffset, out); - while( rc==SQLITE_ROW ){ - DocList block; - docListInit(&block, DL_POSITIONS_OFFSETS, - sqlite3_column_blob(*pSelect, 0), - sqlite3_column_bytes(*pSelect, 0)); - mergeBlock(&merge, &block); - docListDestroy(&block); - - rc = sqlite3_step(*pSelect); - if( rc!=SQLITE_ROW && rc!=SQLITE_DONE ){ - return rc; - } - } - - return SQLITE_OK; -} - -typedef struct QueryTerm { - int is_phrase; /* true if this term begins a new phrase */ - const char *zTerm; -} QueryTerm; - -/* A parsed query. - * - * As an example, parsing the query ["four score" years "new nation"] will - * yield a Query with 5 terms: - * "four", is_phrase = 1 - * "score", is_phrase = 0 - * "years", is_phrase = 1 - * "new", is_phrase = 1 - * "nation", is_phrase = 0 - */ -typedef struct Query { - int nTerms; - QueryTerm *pTerm; -} Query; - -static void query_add(Query *q, int is_phrase, const char *zTerm){ - QueryTerm *t; - ++q->nTerms; - q->pTerm = realloc(q->pTerm, q->nTerms * sizeof(q->pTerm[0])); - t = &q->pTerm[q->nTerms - 1]; - t->is_phrase = is_phrase; - t->zTerm = zTerm; -} - -static void query_free(Query *q){ - int i; - for(i = 0; i < q->nTerms; ++i){ - free((void *) q->pTerm[i].zTerm); - } - free(q->pTerm); -} - -static int tokenize_segment(sqlite3_tokenizer *pTokenizer, - const char *zQuery, int in_phrase, - Query *pQuery){ - sqlite3_tokenizer_module *pModule = pTokenizer->pModule; - sqlite3_tokenizer_cursor *pCursor; - int is_first = 1; - - int rc = pModule->xOpen(pTokenizer, zQuery, -1, &pCursor); - if( rc!=SQLITE_OK ) return rc; - pCursor->pTokenizer = pTokenizer; - - while( 1 ){ - const char *zToken; - int nToken, iStartOffset, iEndOffset, dummy_pos; - - rc = pModule->xNext(pCursor, - &zToken, &nToken, - &iStartOffset, &iEndOffset, - &dummy_pos); - if( rc!=SQLITE_OK ) break; - query_add(pQuery, !in_phrase || is_first, string_dup_n(zToken, nToken)); - is_first = 0; - } - - return pModule->xClose(pCursor); -} - -/* Parse a query string, yielding a Query object. */ -static int parse_query(fulltext_vtab *v, const char *zQuery, Query *pQuery){ - char *zQuery1 = string_dup(zQuery); - int in_phrase = 0; - char *s = zQuery1; - pQuery->nTerms = 0; - pQuery->pTerm = NULL; - - while( *s ){ - char *t = s; - while( *t ){ - if( *t=='"' ){ - *t++ = '\0'; - break; - } - ++t; - } - if( *s ){ - tokenize_segment(v->pTokenizer, s, in_phrase, pQuery); - } - s = t; - in_phrase = !in_phrase; - } - - free(zQuery1); - return SQLITE_OK; -} - -/* Perform a full-text query; return a list of documents in [pResult]. */ -static int fulltext_query(fulltext_vtab *v, const char *zQuery, - DocList **pResult){ - Query q; - int phrase_start = -1; - int i; - sqlite3_stmt *pSelect = NULL; - DocList *d = NULL; - - int rc = parse_query(v, zQuery, &q); - if( rc!=SQLITE_OK ) return rc; - - /* Merge terms. */ - for(i = 0 ; i < q.nTerms ; ++i){ - /* In each merge step, we need to generate positions whenever we're - * processing a phrase which hasn't ended yet. */ - int need_positions = iiCursorType = idxNum; - switch( idxNum ){ - case QUERY_GENERIC: - zStatement = "select rowid, content from %_content"; - break; - - case QUERY_FULLTEXT: /* full-text search */ - { - const char *zQuery = (const char *)sqlite3_value_text(argv[0]); - DocList *pResult; - assert( argc==1 ); - rc = fulltext_query(v, zQuery, &pResult); - if( rc!=SQLITE_OK ) return rc; - readerInit(&c->result, pResult); - zStatement = "select rowid, content from %_content where rowid = ?"; - break; - } - - default: - assert( 0 ); - } - - rc = sql_prepare(v->db, v->zName, &c->pStmt, zStatement); - if( rc!=SQLITE_OK ) return rc; - - return fulltextNext(pCursor); -} - -static int fulltextEof(sqlite3_vtab_cursor *pCursor){ - fulltext_cursor *c = (fulltext_cursor *) pCursor; - return c->eof; -} - -static int fulltextColumn(sqlite3_vtab_cursor *pCursor, - sqlite3_context *pContext, int idxCol){ - fulltext_cursor *c = (fulltext_cursor *) pCursor; - const char *s; - - assert( idxCol==0 ); - s = (const char *) sqlite3_column_text(c->pStmt, 1); - sqlite3_result_text(pContext, s, -1, SQLITE_TRANSIENT); - - return SQLITE_OK; -} - -static int fulltextRowid(sqlite3_vtab_cursor *pCursor, sqlite_int64 *pRowid){ - fulltext_cursor *c = (fulltext_cursor *) pCursor; - - *pRowid = sqlite3_column_int64(c->pStmt, 0); - return SQLITE_OK; -} - -/* Build a hash table containing all terms in zText. */ -static int build_terms(Hash *terms, sqlite3_tokenizer *pTokenizer, - const char *zText, sqlite_int64 iDocid){ - sqlite3_tokenizer_cursor *pCursor; - const char *pToken; - int nTokenBytes; - int iStartOffset, iEndOffset, iPosition; - - int rc = pTokenizer->pModule->xOpen(pTokenizer, zText, -1, &pCursor); - if( rc!=SQLITE_OK ) return rc; - - pCursor->pTokenizer = pTokenizer; - HashInit(terms, HASH_STRING, 1); - while( SQLITE_OK==pTokenizer->pModule->xNext(pCursor, - &pToken, &nTokenBytes, - &iStartOffset, &iEndOffset, - &iPosition) ){ - DocList *p; - - /* Positions can't be negative; we use -1 as a terminator internally. */ - if( iPosition<0 ) { - rc = SQLITE_ERROR; - goto err; - } - - p = HashFind(terms, pToken, nTokenBytes); - if( p==NULL ){ - p = docListNew(DL_POSITIONS_OFFSETS); - docListAddDocid(p, iDocid); - HashInsert(terms, pToken, nTokenBytes, p); - } - docListAddPosOffset(p, iPosition, iStartOffset, iEndOffset); - } - -err: - /* TODO(shess) Check return? Should this be able to cause errors at - ** this point? Actually, same question about sqlite3_finalize(), - ** though one could argue that failure there means that the data is - ** not durable. *ponder* - */ - pTokenizer->pModule->xClose(pCursor); - return rc; -} -/* Update the %_terms table to map the term [zTerm] to the given rowid. */ -static int index_insert_term(fulltext_vtab *v, const char *zTerm, int nTerm, - sqlite_int64 iDocid, DocList *p){ - sqlite_int64 iFirst; - sqlite_int64 iIndexRow; - DocList doclist; - - int rc = term_chunk_select(v, zTerm, nTerm, iDocid, &iFirst); - if( rc==SQLITE_DONE ){ - docListInit(&doclist, DL_POSITIONS_OFFSETS, 0, 0); - if( docListUpdate(&doclist, iDocid, p) ){ - rc = term_insert(v, zTerm, nTerm, iDocid, &doclist); - docListDestroy(&doclist); - return rc; - } - return SQLITE_OK; - } - if( rc!=SQLITE_ROW ) return SQLITE_ERROR; - - /* This word is in the index; add this document ID to its blob. */ - - rc = term_select(v, zTerm, nTerm, iFirst, &iIndexRow, &doclist); - if( rc!=SQLITE_OK ) return rc; - - if( docListUpdate(&doclist, iDocid, p) ){ - /* If the blob is too big, split it in half. */ - if( doclist.nData>CHUNK_MAX ){ - DocList half; - if( docListSplit(&doclist, &half) ){ - rc = term_insert(v, zTerm, nTerm, firstDocid(&half), &half); - docListDestroy(&half); - if( rc!=SQLITE_OK ) goto err; - } - } - rc = term_update(v, iIndexRow, &doclist); - } - -err: - docListDestroy(&doclist); - return rc; -} - -/* Insert a row into the full-text index; set *piRowid to be the ID of the - * new row. */ -static int index_insert(fulltext_vtab *v, - sqlite3_value *pRequestRowid, const char *zText, - sqlite_int64 *piRowid){ - Hash terms; /* maps term string -> PosList */ - HashElem *e; - - int rc = content_insert(v, pRequestRowid, zText, -1); - if( rc!=SQLITE_OK ) return rc; - *piRowid = sqlite3_last_insert_rowid(v->db); - - if( !zText ) return SQLITE_OK; /* nothing to index */ - - rc = build_terms(&terms, v->pTokenizer, zText, *piRowid); - if( rc!=SQLITE_OK ) return rc; - - for(e=HashFirst(&terms); e; e=HashNext(e)){ - DocList *p = HashData(e); - rc = index_insert_term(v, HashKey(e), HashKeysize(e), *piRowid, p); - if( rc!=SQLITE_OK ) break; - } - - for(e=HashFirst(&terms); e; e=HashNext(e)){ - DocList *p = HashData(e); - docListDelete(p); - } - HashClear(&terms); - return rc; -} - -static int index_delete_term(fulltext_vtab *v, const char *zTerm, int nTerm, - sqlite_int64 iDocid){ - sqlite_int64 iFirst; - sqlite_int64 iIndexRow; - DocList doclist; - - int rc = term_chunk_select(v, zTerm, nTerm, iDocid, &iFirst); - if( rc!=SQLITE_ROW ) return SQLITE_ERROR; - - rc = term_select(v, zTerm, nTerm, iFirst, &iIndexRow, &doclist); - if( rc!=SQLITE_OK ) return rc; - - if( docListUpdate(&doclist, iDocid, NULL) ){ - if( doclist.nData>0 ){ - rc = term_update(v, iIndexRow, &doclist); - } else { /* empty posting list */ - rc = term_delete(v, iIndexRow); - } - } - docListDestroy(&doclist); - return rc; -} - -/* Delete a row from the full-text index. */ -static int index_delete(fulltext_vtab *v, sqlite_int64 iRow){ - char *zText; - Hash terms; - HashElem *e; - - int rc = content_select(v, iRow, &zText); - if( rc!=SQLITE_OK ) return rc; - - rc = build_terms(&terms, v->pTokenizer, zText, iRow); - free(zText); - if( rc!=SQLITE_OK ) return rc; - - for(e=HashFirst(&terms); e; e=HashNext(e)){ - rc = index_delete_term(v, HashKey(e), HashKeysize(e), iRow); - if( rc!=SQLITE_OK ) break; - } - for(e=HashFirst(&terms); e; e=HashNext(e)){ - DocList *p = HashData(e); - docListDelete(p); - } - HashClear(&terms); - - return content_delete(v, iRow); -} - -static int fulltextUpdate(sqlite3_vtab *pVtab, int nArg, sqlite3_value **ppArg, - sqlite_int64 *pRowid){ - fulltext_vtab *v = (fulltext_vtab *) pVtab; - - if( nArg<2 ){ - return index_delete(v, sqlite3_value_int64(ppArg[0])); - } - - if( sqlite3_value_type(ppArg[0]) != SQLITE_NULL ){ - return SQLITE_ERROR; /* an update; not yet supported */ - } - - assert( nArg==3 ); /* ppArg[1] = rowid, ppArg[2] = content */ - return index_insert(v, ppArg[1], - (const char *)sqlite3_value_text(ppArg[2]), pRowid); -} - -static sqlite3_module fulltextModule = { - 0, - fulltextCreate, - fulltextConnect, - fulltextBestIndex, - fulltextDisconnect, - fulltextDestroy, - fulltextOpen, - fulltextClose, - fulltextFilter, - fulltextNext, - fulltextEof, - fulltextColumn, - fulltextRowid, - fulltextUpdate -}; - -int fulltext_init(sqlite3 *db){ - return sqlite3_create_module(db, "fulltext", &fulltextModule, 0); -} - -#if !SQLITE_CORE -#ifdef _WIN32 -__declspec(dllexport) -#endif -int sqlite3_fulltext_init(sqlite3 *db, char **pzErrMsg, - const sqlite3_api_routines *pApi){ - SQLITE_EXTENSION_INIT2(pApi) - return fulltext_init(db); -} -#endif diff --git a/ext/fts1/fulltext.h b/ext/fts1/fulltext.h deleted file mode 100644 index 477dcab2ad..0000000000 --- a/ext/fts1/fulltext.h +++ /dev/null @@ -1,11 +0,0 @@ -#include "sqlite3.h" - -#ifdef __cplusplus -extern "C" { -#endif /* __cplusplus */ - -int fulltext_init(sqlite3 *db); - -#ifdef __cplusplus -} /* extern "C" */ -#endif /* __cplusplus */ diff --git a/ext/fts1/simple_tokenizer.c b/ext/fts1/simple_tokenizer.c deleted file mode 100644 index 0ddc7055af..0000000000 --- a/ext/fts1/simple_tokenizer.c +++ /dev/null @@ -1,174 +0,0 @@ -/* -** The author disclaims copyright to this source code. -** -************************************************************************* -** Implementation of the "simple" full-text-search tokenizer. -*/ - -#include -#if !defined(__APPLE__) -#include -#else -#include -#endif -#include -#include -#include - -#include "tokenizer.h" - -/* Duplicate a string; the caller must free() the returned string. - * (We don't use strdup() since it's not part of the standard C library and - * may not be available everywhere.) */ -/* TODO(shess) Copied from fulltext.c, consider util.c for such -** things. */ -static char *string_dup(const char *s){ - char *str = malloc(strlen(s) + 1); - strcpy(str, s); - return str; -} - -typedef struct simple_tokenizer { - sqlite3_tokenizer base; - const char *zDelim; /* token delimiters */ -} simple_tokenizer; - -typedef struct simple_tokenizer_cursor { - sqlite3_tokenizer_cursor base; - const char *pInput; /* input we are tokenizing */ - int nBytes; /* size of the input */ - const char *pCurrent; /* current position in pInput */ - int iToken; /* index of next token to be returned */ - char *zToken; /* storage for current token */ - int nTokenBytes; /* actual size of current token */ - int nTokenAllocated; /* space allocated to zToken buffer */ -} simple_tokenizer_cursor; - -static sqlite3_tokenizer_module simpleTokenizerModule;/* forward declaration */ - -static int simpleCreate( - int argc, const char **argv, - sqlite3_tokenizer **ppTokenizer -){ - simple_tokenizer *t; - - t = (simple_tokenizer *) malloc(sizeof(simple_tokenizer)); - /* TODO(shess) Delimiters need to remain the same from run to run, - ** else we need to reindex. One solution would be a meta-table to - ** track such information in the database, then we'd only want this - ** information on the initial create. - */ - if( argc>1 ){ - t->zDelim = string_dup(argv[1]); - } else { - /* Build a string excluding alphanumeric ASCII characters */ - char zDelim[0x80]; /* nul-terminated, so nul not a member */ - int i, j; - for(i=1, j=0; i<0x80; i++){ - if( !isalnum(i) ){ - zDelim[j++] = i; - } - } - zDelim[j++] = '\0'; - assert( j<=sizeof(zDelim) ); - t->zDelim = string_dup(zDelim); - } - - *ppTokenizer = &t->base; - return SQLITE_OK; -} - -static int simpleDestroy(sqlite3_tokenizer *pTokenizer){ - simple_tokenizer *t = (simple_tokenizer *) pTokenizer; - - free((void *) t->zDelim); - free(t); - - return SQLITE_OK; -} - -static int simpleOpen( - sqlite3_tokenizer *pTokenizer, - const char *pInput, int nBytes, - sqlite3_tokenizer_cursor **ppCursor -){ - simple_tokenizer_cursor *c; - - c = (simple_tokenizer_cursor *) malloc(sizeof(simple_tokenizer_cursor)); - c->pInput = pInput; - c->nBytes = nBytes<0 ? (int) strlen(pInput) : nBytes; - c->pCurrent = c->pInput; /* start tokenizing at the beginning */ - c->iToken = 0; - c->zToken = NULL; /* no space allocated, yet. */ - c->nTokenBytes = 0; - c->nTokenAllocated = 0; - - *ppCursor = &c->base; - return SQLITE_OK; -} - -static int simpleClose(sqlite3_tokenizer_cursor *pCursor){ - simple_tokenizer_cursor *c = (simple_tokenizer_cursor *) pCursor; - - if( NULL!=c->zToken ){ - free(c->zToken); - } - free(c); - - return SQLITE_OK; -} - -static int simpleNext( - sqlite3_tokenizer_cursor *pCursor, - const char **ppToken, int *pnBytes, - int *piStartOffset, int *piEndOffset, int *piPosition -){ - simple_tokenizer_cursor *c = (simple_tokenizer_cursor *) pCursor; - simple_tokenizer *t = (simple_tokenizer *) pCursor->pTokenizer; - int ii; - - while( c->pCurrent-c->pInputnBytes ){ - int n = (int) strcspn(c->pCurrent, t->zDelim); - if( n>0 ){ - if( n+1>c->nTokenAllocated ){ - c->zToken = realloc(c->zToken, n+1); - } - for(ii=0; iipCurrent[ii]; - c->zToken[ii] = (unsigned char)ch<0x80 ? tolower((unsigned char)ch):ch; - } - c->zToken[n] = '\0'; - *ppToken = c->zToken; - *pnBytes = n; - *piStartOffset = (int) (c->pCurrent-c->pInput); - *piEndOffset = *piStartOffset+n; - *piPosition = c->iToken++; - c->pCurrent += n + 1; - - return SQLITE_OK; - } - c->pCurrent += n + 1; - /* TODO(shess) could strspn() to skip delimiters en masse. Needs - ** to happen in two places, though, which is annoying. - */ - } - return SQLITE_DONE; -} - -static sqlite3_tokenizer_module simpleTokenizerModule = { - 0, - simpleCreate, - simpleDestroy, - simpleOpen, - simpleClose, - simpleNext, -}; - -void get_simple_tokenizer_module( - sqlite3_tokenizer_module **ppModule -){ - *ppModule = &simpleTokenizerModule; -} diff --git a/ext/fts1/tokenizer.h b/ext/fts1/tokenizer.h deleted file mode 100644 index 1d7bd1f670..0000000000 --- a/ext/fts1/tokenizer.h +++ /dev/null @@ -1,89 +0,0 @@ -/* -** 2006 July 10 -** -** The author disclaims copyright to this source code. -** -************************************************************************* -** Defines the interface to tokenizers used by fulltext-search. There -** are three basic components: -** -** sqlite3_tokenizer_module is a singleton defining the tokenizer -** interface functions. This is essentially the class structure for -** tokenizers. -** -** sqlite3_tokenizer is used to define a particular tokenizer, perhaps -** including customization information defined at creation time. -** -** sqlite3_tokenizer_cursor is generated by a tokenizer to generate -** tokens from a particular input. -*/ -#ifndef _TOKENIZER_H_ -#define _TOKENIZER_H_ - -/* TODO(shess) Only used for SQLITE_OK and SQLITE_DONE at this time. -** If tokenizers are to be allowed to call sqlite3_*() functions, then -** we will need a way to register the API consistently. -*/ -#include "sqlite3.h" - -/* -** Structures used by the tokenizer interface. -*/ -typedef struct sqlite3_tokenizer sqlite3_tokenizer; -typedef struct sqlite3_tokenizer_cursor sqlite3_tokenizer_cursor; -typedef struct sqlite3_tokenizer_module sqlite3_tokenizer_module; - -struct sqlite3_tokenizer_module { - int iVersion; /* currently 0 */ - - /* - ** Create and destroy a tokenizer. argc/argv are passed down from - ** the fulltext virtual table creation to allow customization. - */ - int (*xCreate)(int argc, const char **argv, - sqlite3_tokenizer **ppTokenizer); - int (*xDestroy)(sqlite3_tokenizer *pTokenizer); - - /* - ** Tokenize a particular input. Call xOpen() to prepare to - ** tokenize, xNext() repeatedly until it returns SQLITE_DONE, then - ** xClose() to free any internal state. The pInput passed to - ** xOpen() must exist until the cursor is closed. The ppToken - ** result from xNext() is only valid until the next call to xNext() - ** or until xClose() is called. - */ - /* TODO(shess) current implementation requires pInput to be - ** nul-terminated. This should either be fixed, or pInput/nBytes - ** should be converted to zInput. - */ - int (*xOpen)(sqlite3_tokenizer *pTokenizer, - const char *pInput, int nBytes, - sqlite3_tokenizer_cursor **ppCursor); - int (*xClose)(sqlite3_tokenizer_cursor *pCursor); - int (*xNext)(sqlite3_tokenizer_cursor *pCursor, - const char **ppToken, int *pnBytes, - int *piStartOffset, int *piEndOffset, int *piPosition); -}; - -struct sqlite3_tokenizer { - sqlite3_tokenizer_module *pModule; /* The module for this tokenizer */ - /* Tokenizer implementations will typically add additional fields */ -}; - -struct sqlite3_tokenizer_cursor { - sqlite3_tokenizer *pTokenizer; /* Tokenizer for this cursor. */ - /* Tokenizer implementations will typically add additional fields */ -}; - -/* -** Get the module for a tokenizer which generates tokens based on a -** set of non-token characters. The default is to break tokens at any -** non-alnum character, though the set of delimiters can also be -** specified by the first argv argument to xCreate(). -*/ -/* TODO(shess) This doesn't belong here. Need some sort of -** registration process. -*/ -void get_simple_tokenizer_module(sqlite3_tokenizer_module **ppModule); - -#endif /* _TOKENIZER_H_ */ diff --git a/ext/fts2/README.tokenizers b/ext/fts2/README.tokenizers deleted file mode 100644 index 98d2021ba1..0000000000 --- a/ext/fts2/README.tokenizers +++ /dev/null @@ -1,133 +0,0 @@ - -1. FTS2 Tokenizers - - When creating a new full-text table, FTS2 allows the user to select - the text tokenizer implementation to be used when indexing text - by specifying a "tokenizer" clause as part of the CREATE VIRTUAL TABLE - statement: - - CREATE VIRTUAL TABLE USING fts2( - [, tokenizer []] - ); - - The built-in tokenizers (valid values to pass as ) are - "simple" and "porter". - - should consist of zero or more white-space separated - arguments to pass to the selected tokenizer implementation. The - interpretation of the arguments, if any, depends on the individual - tokenizer. - -2. Custom Tokenizers - - FTS2 allows users to provide custom tokenizer implementations. The - interface used to create a new tokenizer is defined and described in - the fts2_tokenizer.h source file. - - Registering a new FTS2 tokenizer is similar to registering a new - virtual table module with SQLite. The user passes a pointer to a - structure containing pointers to various callback functions that - make up the implementation of the new tokenizer type. For tokenizers, - the structure (defined in fts2_tokenizer.h) is called - "sqlite3_tokenizer_module". - - FTS2 does not expose a C-function that users call to register new - tokenizer types with a database handle. Instead, the pointer must - be encoded as an SQL blob value and passed to FTS2 through the SQL - engine by evaluating a special scalar function, "fts2_tokenizer()". - The fts2_tokenizer() function may be called with one or two arguments, - as follows: - - SELECT fts2_tokenizer(); - SELECT fts2_tokenizer(, ); - - Where is a string identifying the tokenizer and - is a pointer to an sqlite3_tokenizer_module - structure encoded as an SQL blob. If the second argument is present, - it is registered as tokenizer and a copy of it - returned. If only one argument is passed, a pointer to the tokenizer - implementation currently registered as is returned, - encoded as a blob. Or, if no such tokenizer exists, an SQL exception - (error) is raised. - - SECURITY: If the fts2 extension is used in an environment where potentially - malicious users may execute arbitrary SQL (i.e. gears), they should be - prevented from invoking the fts2_tokenizer() function, possibly using the - authorisation callback. - - See "Sample code" below for an example of calling the fts2_tokenizer() - function from C code. - -3. ICU Library Tokenizers - - If this extension is compiled with the SQLITE_ENABLE_ICU pre-processor - symbol defined, then there exists a built-in tokenizer named "icu" - implemented using the ICU library. The first argument passed to the - xCreate() method (see fts2_tokenizer.h) of this tokenizer may be - an ICU locale identifier. For example "tr_TR" for Turkish as used - in Turkey, or "en_AU" for English as used in Australia. For example: - - "CREATE VIRTUAL TABLE thai_text USING fts2(text, tokenizer icu th_TH)" - - The ICU tokenizer implementation is very simple. It splits the input - text according to the ICU rules for finding word boundaries and discards - any tokens that consist entirely of white-space. This may be suitable - for some applications in some locales, but not all. If more complex - processing is required, for example to implement stemming or - discard punctuation, this can be done by creating a tokenizer - implementation that uses the ICU tokenizer as part of its implementation. - - When using the ICU tokenizer this way, it is safe to overwrite the - contents of the strings returned by the xNext() method (see - fts2_tokenizer.h). - -4. Sample code. - - The following two code samples illustrate the way C code should invoke - the fts2_tokenizer() scalar function: - - int registerTokenizer( - sqlite3 *db, - char *zName, - const sqlite3_tokenizer_module *p - ){ - int rc; - sqlite3_stmt *pStmt; - const char zSql[] = "SELECT fts2_tokenizer(?, ?)"; - - rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0); - if( rc!=SQLITE_OK ){ - return rc; - } - - sqlite3_bind_text(pStmt, 1, zName, -1, SQLITE_STATIC); - sqlite3_bind_blob(pStmt, 2, &p, sizeof(p), SQLITE_STATIC); - sqlite3_step(pStmt); - - return sqlite3_finalize(pStmt); - } - - int queryTokenizer( - sqlite3 *db, - char *zName, - const sqlite3_tokenizer_module **pp - ){ - int rc; - sqlite3_stmt *pStmt; - const char zSql[] = "SELECT fts2_tokenizer(?)"; - - *pp = 0; - rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0); - if( rc!=SQLITE_OK ){ - return rc; - } - - sqlite3_bind_text(pStmt, 1, zName, -1, SQLITE_STATIC); - if( SQLITE_ROW==sqlite3_step(pStmt) ){ - if( sqlite3_column_type(pStmt, 0)==SQLITE_BLOB ){ - memcpy(pp, sqlite3_column_blob(pStmt, 0), sizeof(*pp)); - } - } - - return sqlite3_finalize(pStmt); - } diff --git a/ext/fts2/README.txt b/ext/fts2/README.txt deleted file mode 100644 index 517a2a0434..0000000000 --- a/ext/fts2/README.txt +++ /dev/null @@ -1,4 +0,0 @@ -This folder contains source code to the second full-text search -extension for SQLite. While the API is the same, this version uses a -substantially different storage schema from fts1, so tables will need -to be rebuilt. diff --git a/ext/fts2/fts2.c b/ext/fts2/fts2.c deleted file mode 100644 index 0405fb7b1e..0000000000 --- a/ext/fts2/fts2.c +++ /dev/null @@ -1,6860 +0,0 @@ -/* fts2 has a design flaw which can lead to database corruption (see -** below). It is recommended not to use it any longer, instead use -** fts3 (or higher). If you believe that your use of fts2 is safe, -** add -DSQLITE_ENABLE_BROKEN_FTS2=1 to your CFLAGS. -*/ -#if (!defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS2)) \ - && !defined(SQLITE_ENABLE_BROKEN_FTS2) -#error fts2 has a design flaw and has been deprecated. -#endif -/* The flaw is that fts2 uses the content table's unaliased rowid as -** the unique docid. fts2 embeds the rowid in the index it builds, -** and expects the rowid to not change. The SQLite VACUUM operation -** will renumber such rowids, thereby breaking fts2. If you are using -** fts2 in a system which has disabled VACUUM, then you can continue -** to use it safely. Note that PRAGMA auto_vacuum does NOT disable -** VACUUM, though systems using auto_vacuum are unlikely to invoke -** VACUUM. -** -** Unlike fts1, which is safe across VACUUM if you never delete -** documents, fts2 has a second exposure to this flaw, in the segments -** table. So fts2 should be considered unsafe across VACUUM in all -** cases. -*/ - -/* -** 2006 Oct 10 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -****************************************************************************** -** -** This is an SQLite module implementing full-text search. -*/ - -/* -** The code in this file is only compiled if: -** -** * The FTS2 module is being built as an extension -** (in which case SQLITE_CORE is not defined), or -** -** * The FTS2 module is being built into the core of -** SQLite (in which case SQLITE_ENABLE_FTS2 is defined). -*/ - -/* TODO(shess) Consider exporting this comment to an HTML file or the -** wiki. -*/ -/* The full-text index is stored in a series of b+tree (-like) -** structures called segments which map terms to doclists. The -** structures are like b+trees in layout, but are constructed from the -** bottom up in optimal fashion and are not updatable. Since trees -** are built from the bottom up, things will be described from the -** bottom up. -** -** -**** Varints **** -** The basic unit of encoding is a variable-length integer called a -** varint. We encode variable-length integers in little-endian order -** using seven bits * per byte as follows: -** -** KEY: -** A = 0xxxxxxx 7 bits of data and one flag bit -** B = 1xxxxxxx 7 bits of data and one flag bit -** -** 7 bits - A -** 14 bits - BA -** 21 bits - BBA -** and so on. -** -** This is identical to how sqlite encodes varints (see util.c). -** -** -**** Document lists **** -** A doclist (document list) holds a docid-sorted list of hits for a -** given term. Doclists hold docids, and can optionally associate -** token positions and offsets with docids. -** -** A DL_POSITIONS_OFFSETS doclist is stored like this: -** -** array { -** varint docid; -** array { (position list for column 0) -** varint position; (delta from previous position plus POS_BASE) -** varint startOffset; (delta from previous startOffset) -** varint endOffset; (delta from startOffset) -** } -** array { -** varint POS_COLUMN; (marks start of position list for new column) -** varint column; (index of new column) -** array { -** varint position; (delta from previous position plus POS_BASE) -** varint startOffset;(delta from previous startOffset) -** varint endOffset; (delta from startOffset) -** } -** } -** varint POS_END; (marks end of positions for this document. -** } -** -** Here, array { X } means zero or more occurrences of X, adjacent in -** memory. A "position" is an index of a token in the token stream -** generated by the tokenizer, while an "offset" is a byte offset, -** both based at 0. Note that POS_END and POS_COLUMN occur in the -** same logical place as the position element, and act as sentinals -** ending a position list array. -** -** A DL_POSITIONS doclist omits the startOffset and endOffset -** information. A DL_DOCIDS doclist omits both the position and -** offset information, becoming an array of varint-encoded docids. -** -** On-disk data is stored as type DL_DEFAULT, so we don't serialize -** the type. Due to how deletion is implemented in the segmentation -** system, on-disk doclists MUST store at least positions. -** -** -**** Segment leaf nodes **** -** Segment leaf nodes store terms and doclists, ordered by term. Leaf -** nodes are written using LeafWriter, and read using LeafReader (to -** iterate through a single leaf node's data) and LeavesReader (to -** iterate through a segment's entire leaf layer). Leaf nodes have -** the format: -** -** varint iHeight; (height from leaf level, always 0) -** varint nTerm; (length of first term) -** char pTerm[nTerm]; (content of first term) -** varint nDoclist; (length of term's associated doclist) -** char pDoclist[nDoclist]; (content of doclist) -** array { -** (further terms are delta-encoded) -** varint nPrefix; (length of prefix shared with previous term) -** varint nSuffix; (length of unshared suffix) -** char pTermSuffix[nSuffix];(unshared suffix of next term) -** varint nDoclist; (length of term's associated doclist) -** char pDoclist[nDoclist]; (content of doclist) -** } -** -** Here, array { X } means zero or more occurrences of X, adjacent in -** memory. -** -** Leaf nodes are broken into blocks which are stored contiguously in -** the %_segments table in sorted order. This means that when the end -** of a node is reached, the next term is in the node with the next -** greater node id. -** -** New data is spilled to a new leaf node when the current node -** exceeds LEAF_MAX bytes (default 2048). New data which itself is -** larger than STANDALONE_MIN (default 1024) is placed in a standalone -** node (a leaf node with a single term and doclist). The goal of -** these settings is to pack together groups of small doclists while -** making it efficient to directly access large doclists. The -** assumption is that large doclists represent terms which are more -** likely to be query targets. -** -** TODO(shess) It may be useful for blocking decisions to be more -** dynamic. For instance, it may make more sense to have a 2.5k leaf -** node rather than splitting into 2k and .5k nodes. My intuition is -** that this might extend through 2x or 4x the pagesize. -** -** -**** Segment interior nodes **** -** Segment interior nodes store blockids for subtree nodes and terms -** to describe what data is stored by the each subtree. Interior -** nodes are written using InteriorWriter, and read using -** InteriorReader. InteriorWriters are created as needed when -** SegmentWriter creates new leaf nodes, or when an interior node -** itself grows too big and must be split. The format of interior -** nodes: -** -** varint iHeight; (height from leaf level, always >0) -** varint iBlockid; (block id of node's leftmost subtree) -** optional { -** varint nTerm; (length of first term) -** char pTerm[nTerm]; (content of first term) -** array { -** (further terms are delta-encoded) -** varint nPrefix; (length of shared prefix with previous term) -** varint nSuffix; (length of unshared suffix) -** char pTermSuffix[nSuffix]; (unshared suffix of next term) -** } -** } -** -** Here, optional { X } means an optional element, while array { X } -** means zero or more occurrences of X, adjacent in memory. -** -** An interior node encodes n terms separating n+1 subtrees. The -** subtree blocks are contiguous, so only the first subtree's blockid -** is encoded. The subtree at iBlockid will contain all terms less -** than the first term encoded (or all terms if no term is encoded). -** Otherwise, for terms greater than or equal to pTerm[i] but less -** than pTerm[i+1], the subtree for that term will be rooted at -** iBlockid+i. Interior nodes only store enough term data to -** distinguish adjacent children (if the rightmost term of the left -** child is "something", and the leftmost term of the right child is -** "wicked", only "w" is stored). -** -** New data is spilled to a new interior node at the same height when -** the current node exceeds INTERIOR_MAX bytes (default 2048). -** INTERIOR_MIN_TERMS (default 7) keeps large terms from monopolizing -** interior nodes and making the tree too skinny. The interior nodes -** at a given height are naturally tracked by interior nodes at -** height+1, and so on. -** -** -**** Segment directory **** -** The segment directory in table %_segdir stores meta-information for -** merging and deleting segments, and also the root node of the -** segment's tree. -** -** The root node is the top node of the segment's tree after encoding -** the entire segment, restricted to ROOT_MAX bytes (default 1024). -** This could be either a leaf node or an interior node. If the top -** node requires more than ROOT_MAX bytes, it is flushed to %_segments -** and a new root interior node is generated (which should always fit -** within ROOT_MAX because it only needs space for 2 varints, the -** height and the blockid of the previous root). -** -** The meta-information in the segment directory is: -** level - segment level (see below) -** idx - index within level -** - (level,idx uniquely identify a segment) -** start_block - first leaf node -** leaves_end_block - last leaf node -** end_block - last block (including interior nodes) -** root - contents of root node -** -** If the root node is a leaf node, then start_block, -** leaves_end_block, and end_block are all 0. -** -** -**** Segment merging **** -** To amortize update costs, segments are groups into levels and -** merged in matches. Each increase in level represents exponentially -** more documents. -** -** New documents (actually, document updates) are tokenized and -** written individually (using LeafWriter) to a level 0 segment, with -** incrementing idx. When idx reaches MERGE_COUNT (default 16), all -** level 0 segments are merged into a single level 1 segment. Level 1 -** is populated like level 0, and eventually MERGE_COUNT level 1 -** segments are merged to a single level 2 segment (representing -** MERGE_COUNT^2 updates), and so on. -** -** A segment merge traverses all segments at a given level in -** parallel, performing a straightforward sorted merge. Since segment -** leaf nodes are written in to the %_segments table in order, this -** merge traverses the underlying sqlite disk structures efficiently. -** After the merge, all segment blocks from the merged level are -** deleted. -** -** MERGE_COUNT controls how often we merge segments. 16 seems to be -** somewhat of a sweet spot for insertion performance. 32 and 64 show -** very similar performance numbers to 16 on insertion, though they're -** a tiny bit slower (perhaps due to more overhead in merge-time -** sorting). 8 is about 20% slower than 16, 4 about 50% slower than -** 16, 2 about 66% slower than 16. -** -** At query time, high MERGE_COUNT increases the number of segments -** which need to be scanned and merged. For instance, with 100k docs -** inserted: -** -** MERGE_COUNT segments -** 16 25 -** 8 12 -** 4 10 -** 2 6 -** -** This appears to have only a moderate impact on queries for very -** frequent terms (which are somewhat dominated by segment merge -** costs), and infrequent and non-existent terms still seem to be fast -** even with many segments. -** -** TODO(shess) That said, it would be nice to have a better query-side -** argument for MERGE_COUNT of 16. Also, it is possible/likely that -** optimizations to things like doclist merging will swing the sweet -** spot around. -** -** -** -**** Handling of deletions and updates **** -** Since we're using a segmented structure, with no docid-oriented -** index into the term index, we clearly cannot simply update the term -** index when a document is deleted or updated. For deletions, we -** write an empty doclist (varint(docid) varint(POS_END)), for updates -** we simply write the new doclist. Segment merges overwrite older -** data for a particular docid with newer data, so deletes or updates -** will eventually overtake the earlier data and knock it out. The -** query logic likewise merges doclists so that newer data knocks out -** older data. -** -** TODO(shess) Provide a VACUUM type operation to clear out all -** deletions and duplications. This would basically be a forced merge -** into a single segment. -*/ - -#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS2) - -#if defined(SQLITE_ENABLE_FTS2) && !defined(SQLITE_CORE) -# define SQLITE_CORE 1 -#endif - -#include -#include -#include -#include -#include "fts2.h" -#include "fts2_hash.h" -#include "fts2_tokenizer.h" -#include "sqlite3.h" -#include "sqlite3ext.h" -SQLITE_EXTENSION_INIT1 - - -/* TODO(shess) MAN, this thing needs some refactoring. At minimum, it -** would be nice to order the file better, perhaps something along the -** lines of: -** -** - utility functions -** - table setup functions -** - table update functions -** - table query functions -** -** Put the query functions last because they're likely to reference -** typedefs or functions from the table update section. -*/ - -#if 0 -# define TRACE(A) printf A; fflush(stdout) -#else -# define TRACE(A) -#endif - -/* It is not safe to call isspace(), tolower(), or isalnum() on -** hi-bit-set characters. This is the same solution used in the -** tokenizer. -*/ -/* TODO(shess) The snippet-generation code should be using the -** tokenizer-generated tokens rather than doing its own local -** tokenization. -*/ -/* TODO(shess) Is __isascii() a portable version of (c&0x80)==0? */ -static int safe_isspace(char c){ - return c==' ' || c=='\t' || c=='\n' || c=='\r' || c=='\v' || c=='\f'; -} -static int safe_tolower(char c){ - return (c>='A' && c<='Z') ? (c - 'A' + 'a') : c; -} -static int safe_isalnum(char c){ - return (c>='0' && c<='9') || (c>='A' && c<='Z') || (c>='a' && c<='z'); -} - -typedef enum DocListType { - DL_DOCIDS, /* docids only */ - DL_POSITIONS, /* docids + positions */ - DL_POSITIONS_OFFSETS /* docids + positions + offsets */ -} DocListType; - -/* -** By default, only positions and not offsets are stored in the doclists. -** To change this so that offsets are stored too, compile with -** -** -DDL_DEFAULT=DL_POSITIONS_OFFSETS -** -** If DL_DEFAULT is set to DL_DOCIDS, your table can only be inserted -** into (no deletes or updates). -*/ -#ifndef DL_DEFAULT -# define DL_DEFAULT DL_POSITIONS -#endif - -enum { - POS_END = 0, /* end of this position list */ - POS_COLUMN, /* followed by new column number */ - POS_BASE -}; - -/* MERGE_COUNT controls how often we merge segments (see comment at -** top of file). -*/ -#define MERGE_COUNT 16 - -/* utility functions */ - -/* CLEAR() and SCRAMBLE() abstract memset() on a pointer to a single -** record to prevent errors of the form: -** -** my_function(SomeType *b){ -** memset(b, '\0', sizeof(b)); // sizeof(b)!=sizeof(*b) -** } -*/ -/* TODO(shess) Obvious candidates for a header file. */ -#define CLEAR(b) memset(b, '\0', sizeof(*(b))) - -#ifndef NDEBUG -# define SCRAMBLE(b) memset(b, 0x55, sizeof(*(b))) -#else -# define SCRAMBLE(b) -#endif - -/* We may need up to VARINT_MAX bytes to store an encoded 64-bit integer. */ -#define VARINT_MAX 10 - -/* Write a 64-bit variable-length integer to memory starting at p[0]. - * The length of data written will be between 1 and VARINT_MAX bytes. - * The number of bytes written is returned. */ -static int putVarint(char *p, sqlite_int64 v){ - unsigned char *q = (unsigned char *) p; - sqlite_uint64 vu = v; - do{ - *q++ = (unsigned char) ((vu & 0x7f) | 0x80); - vu >>= 7; - }while( vu!=0 ); - q[-1] &= 0x7f; /* turn off high bit in final byte */ - assert( q - (unsigned char *)p <= VARINT_MAX ); - return (int) (q - (unsigned char *)p); -} - -/* Read a 64-bit variable-length integer from memory starting at p[0]. - * Return the number of bytes read, or 0 on error. - * The value is stored in *v. */ -static int getVarint(const char *p, sqlite_int64 *v){ - const unsigned char *q = (const unsigned char *) p; - sqlite_uint64 x = 0, y = 1; - while( (*q & 0x80) == 0x80 ){ - x += y * (*q++ & 0x7f); - y <<= 7; - if( q - (unsigned char *)p >= VARINT_MAX ){ /* bad data */ - assert( 0 ); - return 0; - } - } - x += y * (*q++); - *v = (sqlite_int64) x; - return (int) (q - (unsigned char *)p); -} - -static int getVarint32(const char *p, int *pi){ - sqlite_int64 i; - int ret = getVarint(p, &i); - *pi = (int) i; - assert( *pi==i ); - return ret; -} - -/*******************************************************************/ -/* DataBuffer is used to collect data into a buffer in piecemeal -** fashion. It implements the usual distinction between amount of -** data currently stored (nData) and buffer capacity (nCapacity). -** -** dataBufferInit - create a buffer with given initial capacity. -** dataBufferReset - forget buffer's data, retaining capacity. -** dataBufferDestroy - free buffer's data. -** dataBufferSwap - swap contents of two buffers. -** dataBufferExpand - expand capacity without adding data. -** dataBufferAppend - append data. -** dataBufferAppend2 - append two pieces of data at once. -** dataBufferReplace - replace buffer's data. -*/ -typedef struct DataBuffer { - char *pData; /* Pointer to malloc'ed buffer. */ - int nCapacity; /* Size of pData buffer. */ - int nData; /* End of data loaded into pData. */ -} DataBuffer; - -static void dataBufferInit(DataBuffer *pBuffer, int nCapacity){ - assert( nCapacity>=0 ); - pBuffer->nData = 0; - pBuffer->nCapacity = nCapacity; - pBuffer->pData = nCapacity==0 ? NULL : sqlite3_malloc(nCapacity); -} -static void dataBufferReset(DataBuffer *pBuffer){ - pBuffer->nData = 0; -} -static void dataBufferDestroy(DataBuffer *pBuffer){ - if( pBuffer->pData!=NULL ) sqlite3_free(pBuffer->pData); - SCRAMBLE(pBuffer); -} -static void dataBufferSwap(DataBuffer *pBuffer1, DataBuffer *pBuffer2){ - DataBuffer tmp = *pBuffer1; - *pBuffer1 = *pBuffer2; - *pBuffer2 = tmp; -} -static void dataBufferExpand(DataBuffer *pBuffer, int nAddCapacity){ - assert( nAddCapacity>0 ); - /* TODO(shess) Consider expanding more aggressively. Note that the - ** underlying malloc implementation may take care of such things for - ** us already. - */ - if( pBuffer->nData+nAddCapacity>pBuffer->nCapacity ){ - pBuffer->nCapacity = pBuffer->nData+nAddCapacity; - pBuffer->pData = sqlite3_realloc(pBuffer->pData, pBuffer->nCapacity); - } -} -static void dataBufferAppend(DataBuffer *pBuffer, - const char *pSource, int nSource){ - assert( nSource>0 && pSource!=NULL ); - dataBufferExpand(pBuffer, nSource); - memcpy(pBuffer->pData+pBuffer->nData, pSource, nSource); - pBuffer->nData += nSource; -} -static void dataBufferAppend2(DataBuffer *pBuffer, - const char *pSource1, int nSource1, - const char *pSource2, int nSource2){ - assert( nSource1>0 && pSource1!=NULL ); - assert( nSource2>0 && pSource2!=NULL ); - dataBufferExpand(pBuffer, nSource1+nSource2); - memcpy(pBuffer->pData+pBuffer->nData, pSource1, nSource1); - memcpy(pBuffer->pData+pBuffer->nData+nSource1, pSource2, nSource2); - pBuffer->nData += nSource1+nSource2; -} -static void dataBufferReplace(DataBuffer *pBuffer, - const char *pSource, int nSource){ - dataBufferReset(pBuffer); - dataBufferAppend(pBuffer, pSource, nSource); -} - -/* StringBuffer is a null-terminated version of DataBuffer. */ -typedef struct StringBuffer { - DataBuffer b; /* Includes null terminator. */ -} StringBuffer; - -static void initStringBuffer(StringBuffer *sb){ - dataBufferInit(&sb->b, 100); - dataBufferReplace(&sb->b, "", 1); -} -static int stringBufferLength(StringBuffer *sb){ - return sb->b.nData-1; -} -static char *stringBufferData(StringBuffer *sb){ - return sb->b.pData; -} -static void stringBufferDestroy(StringBuffer *sb){ - dataBufferDestroy(&sb->b); -} - -static void nappend(StringBuffer *sb, const char *zFrom, int nFrom){ - assert( sb->b.nData>0 ); - if( nFrom>0 ){ - sb->b.nData--; - dataBufferAppend2(&sb->b, zFrom, nFrom, "", 1); - } -} -static void append(StringBuffer *sb, const char *zFrom){ - nappend(sb, zFrom, strlen(zFrom)); -} - -/* Append a list of strings separated by commas. */ -static void appendList(StringBuffer *sb, int nString, char **azString){ - int i; - for(i=0; i0 ) append(sb, ", "); - append(sb, azString[i]); - } -} - -static int endsInWhiteSpace(StringBuffer *p){ - return stringBufferLength(p)>0 && - safe_isspace(stringBufferData(p)[stringBufferLength(p)-1]); -} - -/* If the StringBuffer ends in something other than white space, add a -** single space character to the end. -*/ -static void appendWhiteSpace(StringBuffer *p){ - if( stringBufferLength(p)==0 ) return; - if( !endsInWhiteSpace(p) ) append(p, " "); -} - -/* Remove white space from the end of the StringBuffer */ -static void trimWhiteSpace(StringBuffer *p){ - while( endsInWhiteSpace(p) ){ - p->b.pData[--p->b.nData-1] = '\0'; - } -} - -/*******************************************************************/ -/* DLReader is used to read document elements from a doclist. The -** current docid is cached, so dlrDocid() is fast. DLReader does not -** own the doclist buffer. -** -** dlrAtEnd - true if there's no more data to read. -** dlrDocid - docid of current document. -** dlrDocData - doclist data for current document (including docid). -** dlrDocDataBytes - length of same. -** dlrAllDataBytes - length of all remaining data. -** dlrPosData - position data for current document. -** dlrPosDataLen - length of pos data for current document (incl POS_END). -** dlrStep - step to current document. -** dlrInit - initial for doclist of given type against given data. -** dlrDestroy - clean up. -** -** Expected usage is something like: -** -** DLReader reader; -** dlrInit(&reader, pData, nData); -** while( !dlrAtEnd(&reader) ){ -** // calls to dlrDocid() and kin. -** dlrStep(&reader); -** } -** dlrDestroy(&reader); -*/ -typedef struct DLReader { - DocListType iType; - const char *pData; - int nData; - - sqlite_int64 iDocid; - int nElement; -} DLReader; - -static int dlrAtEnd(DLReader *pReader){ - assert( pReader->nData>=0 ); - return pReader->nData==0; -} -static sqlite_int64 dlrDocid(DLReader *pReader){ - assert( !dlrAtEnd(pReader) ); - return pReader->iDocid; -} -static const char *dlrDocData(DLReader *pReader){ - assert( !dlrAtEnd(pReader) ); - return pReader->pData; -} -static int dlrDocDataBytes(DLReader *pReader){ - assert( !dlrAtEnd(pReader) ); - return pReader->nElement; -} -static int dlrAllDataBytes(DLReader *pReader){ - assert( !dlrAtEnd(pReader) ); - return pReader->nData; -} -/* TODO(shess) Consider adding a field to track iDocid varint length -** to make these two functions faster. This might matter (a tiny bit) -** for queries. -*/ -static const char *dlrPosData(DLReader *pReader){ - sqlite_int64 iDummy; - int n = getVarint(pReader->pData, &iDummy); - assert( !dlrAtEnd(pReader) ); - return pReader->pData+n; -} -static int dlrPosDataLen(DLReader *pReader){ - sqlite_int64 iDummy; - int n = getVarint(pReader->pData, &iDummy); - assert( !dlrAtEnd(pReader) ); - return pReader->nElement-n; -} -static void dlrStep(DLReader *pReader){ - assert( !dlrAtEnd(pReader) ); - - /* Skip past current doclist element. */ - assert( pReader->nElement<=pReader->nData ); - pReader->pData += pReader->nElement; - pReader->nData -= pReader->nElement; - - /* If there is more data, read the next doclist element. */ - if( pReader->nData!=0 ){ - sqlite_int64 iDocidDelta; - int iDummy, n = getVarint(pReader->pData, &iDocidDelta); - pReader->iDocid += iDocidDelta; - if( pReader->iType>=DL_POSITIONS ){ - assert( nnData ); - while( 1 ){ - n += getVarint32(pReader->pData+n, &iDummy); - assert( n<=pReader->nData ); - if( iDummy==POS_END ) break; - if( iDummy==POS_COLUMN ){ - n += getVarint32(pReader->pData+n, &iDummy); - assert( nnData ); - }else if( pReader->iType==DL_POSITIONS_OFFSETS ){ - n += getVarint32(pReader->pData+n, &iDummy); - n += getVarint32(pReader->pData+n, &iDummy); - assert( nnData ); - } - } - } - pReader->nElement = n; - assert( pReader->nElement<=pReader->nData ); - } -} -static void dlrInit(DLReader *pReader, DocListType iType, - const char *pData, int nData){ - assert( pData!=NULL && nData!=0 ); - pReader->iType = iType; - pReader->pData = pData; - pReader->nData = nData; - pReader->nElement = 0; - pReader->iDocid = 0; - - /* Load the first element's data. There must be a first element. */ - dlrStep(pReader); -} -static void dlrDestroy(DLReader *pReader){ - SCRAMBLE(pReader); -} - -#ifndef NDEBUG -/* Verify that the doclist can be validly decoded. Also returns the -** last docid found because it is convenient in other assertions for -** DLWriter. -*/ -static void docListValidate(DocListType iType, const char *pData, int nData, - sqlite_int64 *pLastDocid){ - sqlite_int64 iPrevDocid = 0; - assert( nData>0 ); - assert( pData!=0 ); - assert( pData+nData>pData ); - while( nData!=0 ){ - sqlite_int64 iDocidDelta; - int n = getVarint(pData, &iDocidDelta); - iPrevDocid += iDocidDelta; - if( iType>DL_DOCIDS ){ - int iDummy; - while( 1 ){ - n += getVarint32(pData+n, &iDummy); - if( iDummy==POS_END ) break; - if( iDummy==POS_COLUMN ){ - n += getVarint32(pData+n, &iDummy); - }else if( iType>DL_POSITIONS ){ - n += getVarint32(pData+n, &iDummy); - n += getVarint32(pData+n, &iDummy); - } - assert( n<=nData ); - } - } - assert( n<=nData ); - pData += n; - nData -= n; - } - if( pLastDocid ) *pLastDocid = iPrevDocid; -} -#define ASSERT_VALID_DOCLIST(i, p, n, o) docListValidate(i, p, n, o) -#else -#define ASSERT_VALID_DOCLIST(i, p, n, o) assert( 1 ) -#endif - -/*******************************************************************/ -/* DLWriter is used to write doclist data to a DataBuffer. DLWriter -** always appends to the buffer and does not own it. -** -** dlwInit - initialize to write a given type doclistto a buffer. -** dlwDestroy - clear the writer's memory. Does not free buffer. -** dlwAppend - append raw doclist data to buffer. -** dlwCopy - copy next doclist from reader to writer. -** dlwAdd - construct doclist element and append to buffer. -** Only apply dlwAdd() to DL_DOCIDS doclists (else use PLWriter). -*/ -typedef struct DLWriter { - DocListType iType; - DataBuffer *b; - sqlite_int64 iPrevDocid; -#ifndef NDEBUG - int has_iPrevDocid; -#endif -} DLWriter; - -static void dlwInit(DLWriter *pWriter, DocListType iType, DataBuffer *b){ - pWriter->b = b; - pWriter->iType = iType; - pWriter->iPrevDocid = 0; -#ifndef NDEBUG - pWriter->has_iPrevDocid = 0; -#endif -} -static void dlwDestroy(DLWriter *pWriter){ - SCRAMBLE(pWriter); -} -/* iFirstDocid is the first docid in the doclist in pData. It is -** needed because pData may point within a larger doclist, in which -** case the first item would be delta-encoded. -** -** iLastDocid is the final docid in the doclist in pData. It is -** needed to create the new iPrevDocid for future delta-encoding. The -** code could decode the passed doclist to recreate iLastDocid, but -** the only current user (docListMerge) already has decoded this -** information. -*/ -/* TODO(shess) This has become just a helper for docListMerge. -** Consider a refactor to make this cleaner. -*/ -static void dlwAppend(DLWriter *pWriter, - const char *pData, int nData, - sqlite_int64 iFirstDocid, sqlite_int64 iLastDocid){ - sqlite_int64 iDocid = 0; - char c[VARINT_MAX]; - int nFirstOld, nFirstNew; /* Old and new varint len of first docid. */ -#ifndef NDEBUG - sqlite_int64 iLastDocidDelta; -#endif - - /* Recode the initial docid as delta from iPrevDocid. */ - nFirstOld = getVarint(pData, &iDocid); - assert( nFirstOldiType==DL_DOCIDS) ); - nFirstNew = putVarint(c, iFirstDocid-pWriter->iPrevDocid); - - /* Verify that the incoming doclist is valid AND that it ends with - ** the expected docid. This is essential because we'll trust this - ** docid in future delta-encoding. - */ - ASSERT_VALID_DOCLIST(pWriter->iType, pData, nData, &iLastDocidDelta); - assert( iLastDocid==iFirstDocid-iDocid+iLastDocidDelta ); - - /* Append recoded initial docid and everything else. Rest of docids - ** should have been delta-encoded from previous initial docid. - */ - if( nFirstOldb, c, nFirstNew, - pData+nFirstOld, nData-nFirstOld); - }else{ - dataBufferAppend(pWriter->b, c, nFirstNew); - } - pWriter->iPrevDocid = iLastDocid; -} -static void dlwCopy(DLWriter *pWriter, DLReader *pReader){ - dlwAppend(pWriter, dlrDocData(pReader), dlrDocDataBytes(pReader), - dlrDocid(pReader), dlrDocid(pReader)); -} -static void dlwAdd(DLWriter *pWriter, sqlite_int64 iDocid){ - char c[VARINT_MAX]; - int n = putVarint(c, iDocid-pWriter->iPrevDocid); - - /* Docids must ascend. */ - assert( !pWriter->has_iPrevDocid || iDocid>pWriter->iPrevDocid ); - assert( pWriter->iType==DL_DOCIDS ); - - dataBufferAppend(pWriter->b, c, n); - pWriter->iPrevDocid = iDocid; -#ifndef NDEBUG - pWriter->has_iPrevDocid = 1; -#endif -} - -/*******************************************************************/ -/* PLReader is used to read data from a document's position list. As -** the caller steps through the list, data is cached so that varints -** only need to be decoded once. -** -** plrInit, plrDestroy - create/destroy a reader. -** plrColumn, plrPosition, plrStartOffset, plrEndOffset - accessors -** plrAtEnd - at end of stream, only call plrDestroy once true. -** plrStep - step to the next element. -*/ -typedef struct PLReader { - /* These refer to the next position's data. nData will reach 0 when - ** reading the last position, so plrStep() signals EOF by setting - ** pData to NULL. - */ - const char *pData; - int nData; - - DocListType iType; - int iColumn; /* the last column read */ - int iPosition; /* the last position read */ - int iStartOffset; /* the last start offset read */ - int iEndOffset; /* the last end offset read */ -} PLReader; - -static int plrAtEnd(PLReader *pReader){ - return pReader->pData==NULL; -} -static int plrColumn(PLReader *pReader){ - assert( !plrAtEnd(pReader) ); - return pReader->iColumn; -} -static int plrPosition(PLReader *pReader){ - assert( !plrAtEnd(pReader) ); - return pReader->iPosition; -} -static int plrStartOffset(PLReader *pReader){ - assert( !plrAtEnd(pReader) ); - return pReader->iStartOffset; -} -static int plrEndOffset(PLReader *pReader){ - assert( !plrAtEnd(pReader) ); - return pReader->iEndOffset; -} -static void plrStep(PLReader *pReader){ - int i, n; - - assert( !plrAtEnd(pReader) ); - - if( pReader->nData==0 ){ - pReader->pData = NULL; - return; - } - - n = getVarint32(pReader->pData, &i); - if( i==POS_COLUMN ){ - n += getVarint32(pReader->pData+n, &pReader->iColumn); - pReader->iPosition = 0; - pReader->iStartOffset = 0; - n += getVarint32(pReader->pData+n, &i); - } - /* Should never see adjacent column changes. */ - assert( i!=POS_COLUMN ); - - if( i==POS_END ){ - pReader->nData = 0; - pReader->pData = NULL; - return; - } - - pReader->iPosition += i-POS_BASE; - if( pReader->iType==DL_POSITIONS_OFFSETS ){ - n += getVarint32(pReader->pData+n, &i); - pReader->iStartOffset += i; - n += getVarint32(pReader->pData+n, &i); - pReader->iEndOffset = pReader->iStartOffset+i; - } - assert( n<=pReader->nData ); - pReader->pData += n; - pReader->nData -= n; -} - -static void plrInit(PLReader *pReader, DLReader *pDLReader){ - pReader->pData = dlrPosData(pDLReader); - pReader->nData = dlrPosDataLen(pDLReader); - pReader->iType = pDLReader->iType; - pReader->iColumn = 0; - pReader->iPosition = 0; - pReader->iStartOffset = 0; - pReader->iEndOffset = 0; - plrStep(pReader); -} -static void plrDestroy(PLReader *pReader){ - SCRAMBLE(pReader); -} - -/*******************************************************************/ -/* PLWriter is used in constructing a document's position list. As a -** convenience, if iType is DL_DOCIDS, PLWriter becomes a no-op. -** PLWriter writes to the associated DLWriter's buffer. -** -** plwInit - init for writing a document's poslist. -** plwDestroy - clear a writer. -** plwAdd - append position and offset information. -** plwCopy - copy next position's data from reader to writer. -** plwTerminate - add any necessary doclist terminator. -** -** Calling plwAdd() after plwTerminate() may result in a corrupt -** doclist. -*/ -/* TODO(shess) Until we've written the second item, we can cache the -** first item's information. Then we'd have three states: -** -** - initialized with docid, no positions. -** - docid and one position. -** - docid and multiple positions. -** -** Only the last state needs to actually write to dlw->b, which would -** be an improvement in the DLCollector case. -*/ -typedef struct PLWriter { - DLWriter *dlw; - - int iColumn; /* the last column written */ - int iPos; /* the last position written */ - int iOffset; /* the last start offset written */ -} PLWriter; - -/* TODO(shess) In the case where the parent is reading these values -** from a PLReader, we could optimize to a copy if that PLReader has -** the same type as pWriter. -*/ -static void plwAdd(PLWriter *pWriter, int iColumn, int iPos, - int iStartOffset, int iEndOffset){ - /* Worst-case space for POS_COLUMN, iColumn, iPosDelta, - ** iStartOffsetDelta, and iEndOffsetDelta. - */ - char c[5*VARINT_MAX]; - int n = 0; - - /* Ban plwAdd() after plwTerminate(). */ - assert( pWriter->iPos!=-1 ); - - if( pWriter->dlw->iType==DL_DOCIDS ) return; - - if( iColumn!=pWriter->iColumn ){ - n += putVarint(c+n, POS_COLUMN); - n += putVarint(c+n, iColumn); - pWriter->iColumn = iColumn; - pWriter->iPos = 0; - pWriter->iOffset = 0; - } - assert( iPos>=pWriter->iPos ); - n += putVarint(c+n, POS_BASE+(iPos-pWriter->iPos)); - pWriter->iPos = iPos; - if( pWriter->dlw->iType==DL_POSITIONS_OFFSETS ){ - assert( iStartOffset>=pWriter->iOffset ); - n += putVarint(c+n, iStartOffset-pWriter->iOffset); - pWriter->iOffset = iStartOffset; - assert( iEndOffset>=iStartOffset ); - n += putVarint(c+n, iEndOffset-iStartOffset); - } - dataBufferAppend(pWriter->dlw->b, c, n); -} -static void plwCopy(PLWriter *pWriter, PLReader *pReader){ - plwAdd(pWriter, plrColumn(pReader), plrPosition(pReader), - plrStartOffset(pReader), plrEndOffset(pReader)); -} -static void plwInit(PLWriter *pWriter, DLWriter *dlw, sqlite_int64 iDocid){ - char c[VARINT_MAX]; - int n; - - pWriter->dlw = dlw; - - /* Docids must ascend. */ - assert( !pWriter->dlw->has_iPrevDocid || iDocid>pWriter->dlw->iPrevDocid ); - n = putVarint(c, iDocid-pWriter->dlw->iPrevDocid); - dataBufferAppend(pWriter->dlw->b, c, n); - pWriter->dlw->iPrevDocid = iDocid; -#ifndef NDEBUG - pWriter->dlw->has_iPrevDocid = 1; -#endif - - pWriter->iColumn = 0; - pWriter->iPos = 0; - pWriter->iOffset = 0; -} -/* TODO(shess) Should plwDestroy() also terminate the doclist? But -** then plwDestroy() would no longer be just a destructor, it would -** also be doing work, which isn't consistent with the overall idiom. -** Another option would be for plwAdd() to always append any necessary -** terminator, so that the output is always correct. But that would -** add incremental work to the common case with the only benefit being -** API elegance. Punt for now. -*/ -static void plwTerminate(PLWriter *pWriter){ - if( pWriter->dlw->iType>DL_DOCIDS ){ - char c[VARINT_MAX]; - int n = putVarint(c, POS_END); - dataBufferAppend(pWriter->dlw->b, c, n); - } -#ifndef NDEBUG - /* Mark as terminated for assert in plwAdd(). */ - pWriter->iPos = -1; -#endif -} -static void plwDestroy(PLWriter *pWriter){ - SCRAMBLE(pWriter); -} - -/*******************************************************************/ -/* DLCollector wraps PLWriter and DLWriter to provide a -** dynamically-allocated doclist area to use during tokenization. -** -** dlcNew - malloc up and initialize a collector. -** dlcDelete - destroy a collector and all contained items. -** dlcAddPos - append position and offset information. -** dlcAddDoclist - add the collected doclist to the given buffer. -** dlcNext - terminate the current document and open another. -*/ -typedef struct DLCollector { - DataBuffer b; - DLWriter dlw; - PLWriter plw; -} DLCollector; - -/* TODO(shess) This could also be done by calling plwTerminate() and -** dataBufferAppend(). I tried that, expecting nominal performance -** differences, but it seemed to pretty reliably be worth 1% to code -** it this way. I suspect it is the incremental malloc overhead (some -** percentage of the plwTerminate() calls will cause a realloc), so -** this might be worth revisiting if the DataBuffer implementation -** changes. -*/ -static void dlcAddDoclist(DLCollector *pCollector, DataBuffer *b){ - if( pCollector->dlw.iType>DL_DOCIDS ){ - char c[VARINT_MAX]; - int n = putVarint(c, POS_END); - dataBufferAppend2(b, pCollector->b.pData, pCollector->b.nData, c, n); - }else{ - dataBufferAppend(b, pCollector->b.pData, pCollector->b.nData); - } -} -static void dlcNext(DLCollector *pCollector, sqlite_int64 iDocid){ - plwTerminate(&pCollector->plw); - plwDestroy(&pCollector->plw); - plwInit(&pCollector->plw, &pCollector->dlw, iDocid); -} -static void dlcAddPos(DLCollector *pCollector, int iColumn, int iPos, - int iStartOffset, int iEndOffset){ - plwAdd(&pCollector->plw, iColumn, iPos, iStartOffset, iEndOffset); -} - -static DLCollector *dlcNew(sqlite_int64 iDocid, DocListType iType){ - DLCollector *pCollector = sqlite3_malloc(sizeof(DLCollector)); - dataBufferInit(&pCollector->b, 0); - dlwInit(&pCollector->dlw, iType, &pCollector->b); - plwInit(&pCollector->plw, &pCollector->dlw, iDocid); - return pCollector; -} -static void dlcDelete(DLCollector *pCollector){ - plwDestroy(&pCollector->plw); - dlwDestroy(&pCollector->dlw); - dataBufferDestroy(&pCollector->b); - SCRAMBLE(pCollector); - sqlite3_free(pCollector); -} - - -/* Copy the doclist data of iType in pData/nData into *out, trimming -** unnecessary data as we go. Only columns matching iColumn are -** copied, all columns copied if iColumn is -1. Elements with no -** matching columns are dropped. The output is an iOutType doclist. -*/ -/* NOTE(shess) This code is only valid after all doclists are merged. -** If this is run before merges, then doclist items which represent -** deletion will be trimmed, and will thus not effect a deletion -** during the merge. -*/ -static void docListTrim(DocListType iType, const char *pData, int nData, - int iColumn, DocListType iOutType, DataBuffer *out){ - DLReader dlReader; - DLWriter dlWriter; - - assert( iOutType<=iType ); - - dlrInit(&dlReader, iType, pData, nData); - dlwInit(&dlWriter, iOutType, out); - - while( !dlrAtEnd(&dlReader) ){ - PLReader plReader; - PLWriter plWriter; - int match = 0; - - plrInit(&plReader, &dlReader); - - while( !plrAtEnd(&plReader) ){ - if( iColumn==-1 || plrColumn(&plReader)==iColumn ){ - if( !match ){ - plwInit(&plWriter, &dlWriter, dlrDocid(&dlReader)); - match = 1; - } - plwAdd(&plWriter, plrColumn(&plReader), plrPosition(&plReader), - plrStartOffset(&plReader), plrEndOffset(&plReader)); - } - plrStep(&plReader); - } - if( match ){ - plwTerminate(&plWriter); - plwDestroy(&plWriter); - } - - plrDestroy(&plReader); - dlrStep(&dlReader); - } - dlwDestroy(&dlWriter); - dlrDestroy(&dlReader); -} - -/* Used by docListMerge() to keep doclists in the ascending order by -** docid, then ascending order by age (so the newest comes first). -*/ -typedef struct OrderedDLReader { - DLReader *pReader; - - /* TODO(shess) If we assume that docListMerge pReaders is ordered by - ** age (which we do), then we could use pReader comparisons to break - ** ties. - */ - int idx; -} OrderedDLReader; - -/* Order eof to end, then by docid asc, idx desc. */ -static int orderedDLReaderCmp(OrderedDLReader *r1, OrderedDLReader *r2){ - if( dlrAtEnd(r1->pReader) ){ - if( dlrAtEnd(r2->pReader) ) return 0; /* Both atEnd(). */ - return 1; /* Only r1 atEnd(). */ - } - if( dlrAtEnd(r2->pReader) ) return -1; /* Only r2 atEnd(). */ - - if( dlrDocid(r1->pReader)pReader) ) return -1; - if( dlrDocid(r1->pReader)>dlrDocid(r2->pReader) ) return 1; - - /* Descending on idx. */ - return r2->idx-r1->idx; -} - -/* Bubble p[0] to appropriate place in p[1..n-1]. Assumes that -** p[1..n-1] is already sorted. -*/ -/* TODO(shess) Is this frequent enough to warrant a binary search? -** Before implementing that, instrument the code to check. In most -** current usage, I expect that p[0] will be less than p[1] a very -** high proportion of the time. -*/ -static void orderedDLReaderReorder(OrderedDLReader *p, int n){ - while( n>1 && orderedDLReaderCmp(p, p+1)>0 ){ - OrderedDLReader tmp = p[0]; - p[0] = p[1]; - p[1] = tmp; - n--; - p++; - } -} - -/* Given an array of doclist readers, merge their doclist elements -** into out in sorted order (by docid), dropping elements from older -** readers when there is a duplicate docid. pReaders is assumed to be -** ordered by age, oldest first. -*/ -/* TODO(shess) nReaders must be <= MERGE_COUNT. This should probably -** be fixed. -*/ -static void docListMerge(DataBuffer *out, - DLReader *pReaders, int nReaders){ - OrderedDLReader readers[MERGE_COUNT]; - DLWriter writer; - int i, n; - const char *pStart = 0; - int nStart = 0; - sqlite_int64 iFirstDocid = 0, iLastDocid = 0; - - assert( nReaders>0 ); - if( nReaders==1 ){ - dataBufferAppend(out, dlrDocData(pReaders), dlrAllDataBytes(pReaders)); - return; - } - - assert( nReaders<=MERGE_COUNT ); - n = 0; - for(i=0; i0 ){ - orderedDLReaderReorder(readers+i, nReaders-i); - } - - dlwInit(&writer, pReaders[0].iType, out); - while( !dlrAtEnd(readers[0].pReader) ){ - sqlite_int64 iDocid = dlrDocid(readers[0].pReader); - - /* If this is a continuation of the current buffer to copy, extend - ** that buffer. memcpy() seems to be more efficient if it has a - ** lots of data to copy. - */ - if( dlrDocData(readers[0].pReader)==pStart+nStart ){ - nStart += dlrDocDataBytes(readers[0].pReader); - }else{ - if( pStart!=0 ){ - dlwAppend(&writer, pStart, nStart, iFirstDocid, iLastDocid); - } - pStart = dlrDocData(readers[0].pReader); - nStart = dlrDocDataBytes(readers[0].pReader); - iFirstDocid = iDocid; - } - iLastDocid = iDocid; - dlrStep(readers[0].pReader); - - /* Drop all of the older elements with the same docid. */ - for(i=1; i0 ){ - orderedDLReaderReorder(readers+i, nReaders-i); - } - } - - /* Copy over any remaining elements. */ - if( nStart>0 ) dlwAppend(&writer, pStart, nStart, iFirstDocid, iLastDocid); - dlwDestroy(&writer); -} - -/* Helper function for posListUnion(). Compares the current position -** between left and right, returning as standard C idiom of <0 if -** left0 if left>right, and 0 if left==right. "End" always -** compares greater. -*/ -static int posListCmp(PLReader *pLeft, PLReader *pRight){ - assert( pLeft->iType==pRight->iType ); - if( pLeft->iType==DL_DOCIDS ) return 0; - - if( plrAtEnd(pLeft) ) return plrAtEnd(pRight) ? 0 : 1; - if( plrAtEnd(pRight) ) return -1; - - if( plrColumn(pLeft)plrColumn(pRight) ) return 1; - - if( plrPosition(pLeft)plrPosition(pRight) ) return 1; - if( pLeft->iType==DL_POSITIONS ) return 0; - - if( plrStartOffset(pLeft)plrStartOffset(pRight) ) return 1; - - if( plrEndOffset(pLeft)plrEndOffset(pRight) ) return 1; - - return 0; -} - -/* Write the union of position lists in pLeft and pRight to pOut. -** "Union" in this case meaning "All unique position tuples". Should -** work with any doclist type, though both inputs and the output -** should be the same type. -*/ -static void posListUnion(DLReader *pLeft, DLReader *pRight, DLWriter *pOut){ - PLReader left, right; - PLWriter writer; - - assert( dlrDocid(pLeft)==dlrDocid(pRight) ); - assert( pLeft->iType==pRight->iType ); - assert( pLeft->iType==pOut->iType ); - - plrInit(&left, pLeft); - plrInit(&right, pRight); - plwInit(&writer, pOut, dlrDocid(pLeft)); - - while( !plrAtEnd(&left) || !plrAtEnd(&right) ){ - int c = posListCmp(&left, &right); - if( c<0 ){ - plwCopy(&writer, &left); - plrStep(&left); - }else if( c>0 ){ - plwCopy(&writer, &right); - plrStep(&right); - }else{ - plwCopy(&writer, &left); - plrStep(&left); - plrStep(&right); - } - } - - plwTerminate(&writer); - plwDestroy(&writer); - plrDestroy(&left); - plrDestroy(&right); -} - -/* Write the union of doclists in pLeft and pRight to pOut. For -** docids in common between the inputs, the union of the position -** lists is written. Inputs and outputs are always type DL_DEFAULT. -*/ -static void docListUnion( - const char *pLeft, int nLeft, - const char *pRight, int nRight, - DataBuffer *pOut /* Write the combined doclist here */ -){ - DLReader left, right; - DLWriter writer; - - if( nLeft==0 ){ - if( nRight!=0) dataBufferAppend(pOut, pRight, nRight); - return; - } - if( nRight==0 ){ - dataBufferAppend(pOut, pLeft, nLeft); - return; - } - - dlrInit(&left, DL_DEFAULT, pLeft, nLeft); - dlrInit(&right, DL_DEFAULT, pRight, nRight); - dlwInit(&writer, DL_DEFAULT, pOut); - - while( !dlrAtEnd(&left) || !dlrAtEnd(&right) ){ - if( dlrAtEnd(&right) ){ - dlwCopy(&writer, &left); - dlrStep(&left); - }else if( dlrAtEnd(&left) ){ - dlwCopy(&writer, &right); - dlrStep(&right); - }else if( dlrDocid(&left)dlrDocid(&right) ){ - dlwCopy(&writer, &right); - dlrStep(&right); - }else{ - posListUnion(&left, &right, &writer); - dlrStep(&left); - dlrStep(&right); - } - } - - dlrDestroy(&left); - dlrDestroy(&right); - dlwDestroy(&writer); -} - -/* pLeft and pRight are DLReaders positioned to the same docid. -** -** If there are no instances in pLeft or pRight where the position -** of pLeft is one less than the position of pRight, then this -** routine adds nothing to pOut. -** -** If there are one or more instances where positions from pLeft -** are exactly one less than positions from pRight, then add a new -** document record to pOut. If pOut wants to hold positions, then -** include the positions from pRight that are one more than a -** position in pLeft. In other words: pRight.iPos==pLeft.iPos+1. -*/ -static void posListPhraseMerge(DLReader *pLeft, DLReader *pRight, - DLWriter *pOut){ - PLReader left, right; - PLWriter writer; - int match = 0; - - assert( dlrDocid(pLeft)==dlrDocid(pRight) ); - assert( pOut->iType!=DL_POSITIONS_OFFSETS ); - - plrInit(&left, pLeft); - plrInit(&right, pRight); - - while( !plrAtEnd(&left) && !plrAtEnd(&right) ){ - if( plrColumn(&left)plrColumn(&right) ){ - plrStep(&right); - }else if( plrPosition(&left)+1plrPosition(&right) ){ - plrStep(&right); - }else{ - if( !match ){ - plwInit(&writer, pOut, dlrDocid(pLeft)); - match = 1; - } - plwAdd(&writer, plrColumn(&right), plrPosition(&right), 0, 0); - plrStep(&left); - plrStep(&right); - } - } - - if( match ){ - plwTerminate(&writer); - plwDestroy(&writer); - } - - plrDestroy(&left); - plrDestroy(&right); -} - -/* We have two doclists with positions: pLeft and pRight. -** Write the phrase intersection of these two doclists into pOut. -** -** A phrase intersection means that two documents only match -** if pLeft.iPos+1==pRight.iPos. -** -** iType controls the type of data written to pOut. If iType is -** DL_POSITIONS, the positions are those from pRight. -*/ -static void docListPhraseMerge( - const char *pLeft, int nLeft, - const char *pRight, int nRight, - DocListType iType, - DataBuffer *pOut /* Write the combined doclist here */ -){ - DLReader left, right; - DLWriter writer; - - if( nLeft==0 || nRight==0 ) return; - - assert( iType!=DL_POSITIONS_OFFSETS ); - - dlrInit(&left, DL_POSITIONS, pLeft, nLeft); - dlrInit(&right, DL_POSITIONS, pRight, nRight); - dlwInit(&writer, iType, pOut); - - while( !dlrAtEnd(&left) && !dlrAtEnd(&right) ){ - if( dlrDocid(&left) one AND (two OR three) - * [one OR two three] ==> (one OR two) AND three - * - * A "-" before a term matches all entries that lack that term. - * The "-" must occur immediately before the term with in intervening - * space. This is how the search engines do it. - * - * A NOT term cannot be the right-hand operand of an OR. If this - * occurs in the query string, the NOT is ignored: - * - * [one OR -two] ==> one OR two - * - */ -typedef struct Query { - fulltext_vtab *pFts; /* The full text index */ - int nTerms; /* Number of terms in the query */ - QueryTerm *pTerms; /* Array of terms. Space obtained from malloc() */ - int nextIsOr; /* Set the isOr flag on the next inserted term */ - int nextColumn; /* Next word parsed must be in this column */ - int dfltColumn; /* The default column */ -} Query; - - -/* -** An instance of the following structure keeps track of generated -** matching-word offset information and snippets. -*/ -typedef struct Snippet { - int nMatch; /* Total number of matches */ - int nAlloc; /* Space allocated for aMatch[] */ - struct snippetMatch { /* One entry for each matching term */ - char snStatus; /* Status flag for use while constructing snippets */ - short int iCol; /* The column that contains the match */ - short int iTerm; /* The index in Query.pTerms[] of the matching term */ - short int nByte; /* Number of bytes in the term */ - int iStart; /* The offset to the first character of the term */ - } *aMatch; /* Points to space obtained from malloc */ - char *zOffset; /* Text rendering of aMatch[] */ - int nOffset; /* strlen(zOffset) */ - char *zSnippet; /* Snippet text */ - int nSnippet; /* strlen(zSnippet) */ -} Snippet; - - -typedef enum QueryType { - QUERY_GENERIC, /* table scan */ - QUERY_ROWID, /* lookup by rowid */ - QUERY_FULLTEXT /* QUERY_FULLTEXT + [i] is a full-text search for column i*/ -} QueryType; - -typedef enum fulltext_statement { - CONTENT_INSERT_STMT, - CONTENT_SELECT_STMT, - CONTENT_UPDATE_STMT, - CONTENT_DELETE_STMT, - CONTENT_EXISTS_STMT, - - BLOCK_INSERT_STMT, - BLOCK_SELECT_STMT, - BLOCK_DELETE_STMT, - BLOCK_DELETE_ALL_STMT, - - SEGDIR_MAX_INDEX_STMT, - SEGDIR_SET_STMT, - SEGDIR_SELECT_LEVEL_STMT, - SEGDIR_SPAN_STMT, - SEGDIR_DELETE_STMT, - SEGDIR_SELECT_SEGMENT_STMT, - SEGDIR_SELECT_ALL_STMT, - SEGDIR_DELETE_ALL_STMT, - SEGDIR_COUNT_STMT, - - MAX_STMT /* Always at end! */ -} fulltext_statement; - -/* These must exactly match the enum above. */ -/* TODO(shess): Is there some risk that a statement will be used in two -** cursors at once, e.g. if a query joins a virtual table to itself? -** If so perhaps we should move some of these to the cursor object. -*/ -static const char *const fulltext_zStatement[MAX_STMT] = { - /* CONTENT_INSERT */ NULL, /* generated in contentInsertStatement() */ - /* CONTENT_SELECT */ "select * from %_content where rowid = ?", - /* CONTENT_UPDATE */ NULL, /* generated in contentUpdateStatement() */ - /* CONTENT_DELETE */ "delete from %_content where rowid = ?", - /* CONTENT_EXISTS */ "select rowid from %_content limit 1", - - /* BLOCK_INSERT */ "insert into %_segments values (?)", - /* BLOCK_SELECT */ "select block from %_segments where rowid = ?", - /* BLOCK_DELETE */ "delete from %_segments where rowid between ? and ?", - /* BLOCK_DELETE_ALL */ "delete from %_segments", - - /* SEGDIR_MAX_INDEX */ "select max(idx) from %_segdir where level = ?", - /* SEGDIR_SET */ "insert into %_segdir values (?, ?, ?, ?, ?, ?)", - /* SEGDIR_SELECT_LEVEL */ - "select start_block, leaves_end_block, root from %_segdir " - " where level = ? order by idx", - /* SEGDIR_SPAN */ - "select min(start_block), max(end_block) from %_segdir " - " where level = ? and start_block <> 0", - /* SEGDIR_DELETE */ "delete from %_segdir where level = ?", - - /* NOTE(shess): The first three results of the following two - ** statements must match. - */ - /* SEGDIR_SELECT_SEGMENT */ - "select start_block, leaves_end_block, root from %_segdir " - " where level = ? and idx = ?", - /* SEGDIR_SELECT_ALL */ - "select start_block, leaves_end_block, root from %_segdir " - " order by level desc, idx asc", - /* SEGDIR_DELETE_ALL */ "delete from %_segdir", - /* SEGDIR_COUNT */ "select count(*), ifnull(max(level),0) from %_segdir", -}; - -/* -** A connection to a fulltext index is an instance of the following -** structure. The xCreate and xConnect methods create an instance -** of this structure and xDestroy and xDisconnect free that instance. -** All other methods receive a pointer to the structure as one of their -** arguments. -*/ -struct fulltext_vtab { - sqlite3_vtab base; /* Base class used by SQLite core */ - sqlite3 *db; /* The database connection */ - const char *zDb; /* logical database name */ - const char *zName; /* virtual table name */ - int nColumn; /* number of columns in virtual table */ - char **azColumn; /* column names. malloced */ - char **azContentColumn; /* column names in content table; malloced */ - sqlite3_tokenizer *pTokenizer; /* tokenizer for inserts and queries */ - - /* Precompiled statements which we keep as long as the table is - ** open. - */ - sqlite3_stmt *pFulltextStatements[MAX_STMT]; - - /* Precompiled statements used for segment merges. We run a - ** separate select across the leaf level of each tree being merged. - */ - sqlite3_stmt *pLeafSelectStmts[MERGE_COUNT]; - /* The statement used to prepare pLeafSelectStmts. */ -#define LEAF_SELECT \ - "select block from %_segments where rowid between ? and ? order by rowid" - - /* These buffer pending index updates during transactions. - ** nPendingData estimates the memory size of the pending data. It - ** doesn't include the hash-bucket overhead, nor any malloc - ** overhead. When nPendingData exceeds kPendingThreshold, the - ** buffer is flushed even before the transaction closes. - ** pendingTerms stores the data, and is only valid when nPendingData - ** is >=0 (nPendingData<0 means pendingTerms has not been - ** initialized). iPrevDocid is the last docid written, used to make - ** certain we're inserting in sorted order. - */ - int nPendingData; -#define kPendingThreshold (1*1024*1024) - sqlite_int64 iPrevDocid; - fts2Hash pendingTerms; -}; - -/* -** When the core wants to do a query, it create a cursor using a -** call to xOpen. This structure is an instance of a cursor. It -** is destroyed by xClose. -*/ -typedef struct fulltext_cursor { - sqlite3_vtab_cursor base; /* Base class used by SQLite core */ - QueryType iCursorType; /* Copy of sqlite3_index_info.idxNum */ - sqlite3_stmt *pStmt; /* Prepared statement in use by the cursor */ - int eof; /* True if at End Of Results */ - Query q; /* Parsed query string */ - Snippet snippet; /* Cached snippet for the current row */ - int iColumn; /* Column being searched */ - DataBuffer result; /* Doclist results from fulltextQuery */ - DLReader reader; /* Result reader if result not empty */ -} fulltext_cursor; - -static struct fulltext_vtab *cursor_vtab(fulltext_cursor *c){ - return (fulltext_vtab *) c->base.pVtab; -} - -static const sqlite3_module fts2Module; /* forward declaration */ - -/* Return a dynamically generated statement of the form - * insert into %_content (rowid, ...) values (?, ...) - */ -static const char *contentInsertStatement(fulltext_vtab *v){ - StringBuffer sb; - int i; - - initStringBuffer(&sb); - append(&sb, "insert into %_content (rowid, "); - appendList(&sb, v->nColumn, v->azContentColumn); - append(&sb, ") values (?"); - for(i=0; inColumn; ++i) - append(&sb, ", ?"); - append(&sb, ")"); - return stringBufferData(&sb); -} - -/* Return a dynamically generated statement of the form - * update %_content set [col_0] = ?, [col_1] = ?, ... - * where rowid = ? - */ -static const char *contentUpdateStatement(fulltext_vtab *v){ - StringBuffer sb; - int i; - - initStringBuffer(&sb); - append(&sb, "update %_content set "); - for(i=0; inColumn; ++i) { - if( i>0 ){ - append(&sb, ", "); - } - append(&sb, v->azContentColumn[i]); - append(&sb, " = ?"); - } - append(&sb, " where rowid = ?"); - return stringBufferData(&sb); -} - -/* Puts a freshly-prepared statement determined by iStmt in *ppStmt. -** If the indicated statement has never been prepared, it is prepared -** and cached, otherwise the cached version is reset. -*/ -static int sql_get_statement(fulltext_vtab *v, fulltext_statement iStmt, - sqlite3_stmt **ppStmt){ - assert( iStmtpFulltextStatements[iStmt]==NULL ){ - const char *zStmt; - int rc; - switch( iStmt ){ - case CONTENT_INSERT_STMT: - zStmt = contentInsertStatement(v); break; - case CONTENT_UPDATE_STMT: - zStmt = contentUpdateStatement(v); break; - default: - zStmt = fulltext_zStatement[iStmt]; - } - rc = sql_prepare(v->db, v->zDb, v->zName, &v->pFulltextStatements[iStmt], - zStmt); - if( zStmt != fulltext_zStatement[iStmt]) sqlite3_free((void *) zStmt); - if( rc!=SQLITE_OK ) return rc; - } else { - int rc = sqlite3_reset(v->pFulltextStatements[iStmt]); - if( rc!=SQLITE_OK ) return rc; - } - - *ppStmt = v->pFulltextStatements[iStmt]; - return SQLITE_OK; -} - -/* Like sqlite3_step(), but convert SQLITE_DONE to SQLITE_OK and -** SQLITE_ROW to SQLITE_ERROR. Useful for statements like UPDATE, -** where we expect no results. -*/ -static int sql_single_step(sqlite3_stmt *s){ - int rc = sqlite3_step(s); - return (rc==SQLITE_DONE) ? SQLITE_OK : rc; -} - -/* Like sql_get_statement(), but for special replicated LEAF_SELECT -** statements. idx -1 is a special case for an uncached version of -** the statement (used in the optimize implementation). -*/ -/* TODO(shess) Write version for generic statements and then share -** that between the cached-statement functions. -*/ -static int sql_get_leaf_statement(fulltext_vtab *v, int idx, - sqlite3_stmt **ppStmt){ - assert( idx>=-1 && idxdb, v->zDb, v->zName, ppStmt, LEAF_SELECT); - }else if( v->pLeafSelectStmts[idx]==NULL ){ - int rc = sql_prepare(v->db, v->zDb, v->zName, &v->pLeafSelectStmts[idx], - LEAF_SELECT); - if( rc!=SQLITE_OK ) return rc; - }else{ - int rc = sqlite3_reset(v->pLeafSelectStmts[idx]); - if( rc!=SQLITE_OK ) return rc; - } - - *ppStmt = v->pLeafSelectStmts[idx]; - return SQLITE_OK; -} - -/* insert into %_content (rowid, ...) values ([rowid], [pValues]) */ -static int content_insert(fulltext_vtab *v, sqlite3_value *rowid, - sqlite3_value **pValues){ - sqlite3_stmt *s; - int i; - int rc = sql_get_statement(v, CONTENT_INSERT_STMT, &s); - if( rc!=SQLITE_OK ) return rc; - - rc = sqlite3_bind_value(s, 1, rowid); - if( rc!=SQLITE_OK ) return rc; - - for(i=0; inColumn; ++i){ - rc = sqlite3_bind_value(s, 2+i, pValues[i]); - if( rc!=SQLITE_OK ) return rc; - } - - return sql_single_step(s); -} - -/* update %_content set col0 = pValues[0], col1 = pValues[1], ... - * where rowid = [iRowid] */ -static int content_update(fulltext_vtab *v, sqlite3_value **pValues, - sqlite_int64 iRowid){ - sqlite3_stmt *s; - int i; - int rc = sql_get_statement(v, CONTENT_UPDATE_STMT, &s); - if( rc!=SQLITE_OK ) return rc; - - for(i=0; inColumn; ++i){ - rc = sqlite3_bind_value(s, 1+i, pValues[i]); - if( rc!=SQLITE_OK ) return rc; - } - - rc = sqlite3_bind_int64(s, 1+v->nColumn, iRowid); - if( rc!=SQLITE_OK ) return rc; - - return sql_single_step(s); -} - -static void freeStringArray(int nString, const char **pString){ - int i; - - for (i=0 ; i < nString ; ++i) { - if( pString[i]!=NULL ) sqlite3_free((void *) pString[i]); - } - sqlite3_free((void *) pString); -} - -/* select * from %_content where rowid = [iRow] - * The caller must delete the returned array and all strings in it. - * null fields will be NULL in the returned array. - * - * TODO: Perhaps we should return pointer/length strings here for consistency - * with other code which uses pointer/length. */ -static int content_select(fulltext_vtab *v, sqlite_int64 iRow, - const char ***pValues){ - sqlite3_stmt *s; - const char **values; - int i; - int rc; - - *pValues = NULL; - - rc = sql_get_statement(v, CONTENT_SELECT_STMT, &s); - if( rc!=SQLITE_OK ) return rc; - - rc = sqlite3_bind_int64(s, 1, iRow); - if( rc!=SQLITE_OK ) return rc; - - rc = sqlite3_step(s); - if( rc!=SQLITE_ROW ) return rc; - - values = (const char **) sqlite3_malloc(v->nColumn * sizeof(const char *)); - for(i=0; inColumn; ++i){ - if( sqlite3_column_type(s, i)==SQLITE_NULL ){ - values[i] = NULL; - }else{ - values[i] = string_dup((char*)sqlite3_column_text(s, i)); - } - } - - /* We expect only one row. We must execute another sqlite3_step() - * to complete the iteration; otherwise the table will remain locked. */ - rc = sqlite3_step(s); - if( rc==SQLITE_DONE ){ - *pValues = values; - return SQLITE_OK; - } - - freeStringArray(v->nColumn, values); - return rc; -} - -/* delete from %_content where rowid = [iRow ] */ -static int content_delete(fulltext_vtab *v, sqlite_int64 iRow){ - sqlite3_stmt *s; - int rc = sql_get_statement(v, CONTENT_DELETE_STMT, &s); - if( rc!=SQLITE_OK ) return rc; - - rc = sqlite3_bind_int64(s, 1, iRow); - if( rc!=SQLITE_OK ) return rc; - - return sql_single_step(s); -} - -/* Returns SQLITE_ROW if any rows exist in %_content, SQLITE_DONE if -** no rows exist, and any error in case of failure. -*/ -static int content_exists(fulltext_vtab *v){ - sqlite3_stmt *s; - int rc = sql_get_statement(v, CONTENT_EXISTS_STMT, &s); - if( rc!=SQLITE_OK ) return rc; - - rc = sqlite3_step(s); - if( rc!=SQLITE_ROW ) return rc; - - /* We expect only one row. We must execute another sqlite3_step() - * to complete the iteration; otherwise the table will remain locked. */ - rc = sqlite3_step(s); - if( rc==SQLITE_DONE ) return SQLITE_ROW; - if( rc==SQLITE_ROW ) return SQLITE_ERROR; - return rc; -} - -/* insert into %_segments values ([pData]) -** returns assigned rowid in *piBlockid -*/ -static int block_insert(fulltext_vtab *v, const char *pData, int nData, - sqlite_int64 *piBlockid){ - sqlite3_stmt *s; - int rc = sql_get_statement(v, BLOCK_INSERT_STMT, &s); - if( rc!=SQLITE_OK ) return rc; - - rc = sqlite3_bind_blob(s, 1, pData, nData, SQLITE_STATIC); - if( rc!=SQLITE_OK ) return rc; - - rc = sqlite3_step(s); - if( rc==SQLITE_ROW ) return SQLITE_ERROR; - if( rc!=SQLITE_DONE ) return rc; - - *piBlockid = sqlite3_last_insert_rowid(v->db); - return SQLITE_OK; -} - -/* delete from %_segments -** where rowid between [iStartBlockid] and [iEndBlockid] -** -** Deletes the range of blocks, inclusive, used to delete the blocks -** which form a segment. -*/ -static int block_delete(fulltext_vtab *v, - sqlite_int64 iStartBlockid, sqlite_int64 iEndBlockid){ - sqlite3_stmt *s; - int rc = sql_get_statement(v, BLOCK_DELETE_STMT, &s); - if( rc!=SQLITE_OK ) return rc; - - rc = sqlite3_bind_int64(s, 1, iStartBlockid); - if( rc!=SQLITE_OK ) return rc; - - rc = sqlite3_bind_int64(s, 2, iEndBlockid); - if( rc!=SQLITE_OK ) return rc; - - return sql_single_step(s); -} - -/* Returns SQLITE_ROW with *pidx set to the maximum segment idx found -** at iLevel. Returns SQLITE_DONE if there are no segments at -** iLevel. Otherwise returns an error. -*/ -static int segdir_max_index(fulltext_vtab *v, int iLevel, int *pidx){ - sqlite3_stmt *s; - int rc = sql_get_statement(v, SEGDIR_MAX_INDEX_STMT, &s); - if( rc!=SQLITE_OK ) return rc; - - rc = sqlite3_bind_int(s, 1, iLevel); - if( rc!=SQLITE_OK ) return rc; - - rc = sqlite3_step(s); - /* Should always get at least one row due to how max() works. */ - if( rc==SQLITE_DONE ) return SQLITE_DONE; - if( rc!=SQLITE_ROW ) return rc; - - /* NULL means that there were no inputs to max(). */ - if( SQLITE_NULL==sqlite3_column_type(s, 0) ){ - rc = sqlite3_step(s); - if( rc==SQLITE_ROW ) return SQLITE_ERROR; - return rc; - } - - *pidx = sqlite3_column_int(s, 0); - - /* We expect only one row. We must execute another sqlite3_step() - * to complete the iteration; otherwise the table will remain locked. */ - rc = sqlite3_step(s); - if( rc==SQLITE_ROW ) return SQLITE_ERROR; - if( rc!=SQLITE_DONE ) return rc; - return SQLITE_ROW; -} - -/* insert into %_segdir values ( -** [iLevel], [idx], -** [iStartBlockid], [iLeavesEndBlockid], [iEndBlockid], -** [pRootData] -** ) -*/ -static int segdir_set(fulltext_vtab *v, int iLevel, int idx, - sqlite_int64 iStartBlockid, - sqlite_int64 iLeavesEndBlockid, - sqlite_int64 iEndBlockid, - const char *pRootData, int nRootData){ - sqlite3_stmt *s; - int rc = sql_get_statement(v, SEGDIR_SET_STMT, &s); - if( rc!=SQLITE_OK ) return rc; - - rc = sqlite3_bind_int(s, 1, iLevel); - if( rc!=SQLITE_OK ) return rc; - - rc = sqlite3_bind_int(s, 2, idx); - if( rc!=SQLITE_OK ) return rc; - - rc = sqlite3_bind_int64(s, 3, iStartBlockid); - if( rc!=SQLITE_OK ) return rc; - - rc = sqlite3_bind_int64(s, 4, iLeavesEndBlockid); - if( rc!=SQLITE_OK ) return rc; - - rc = sqlite3_bind_int64(s, 5, iEndBlockid); - if( rc!=SQLITE_OK ) return rc; - - rc = sqlite3_bind_blob(s, 6, pRootData, nRootData, SQLITE_STATIC); - if( rc!=SQLITE_OK ) return rc; - - return sql_single_step(s); -} - -/* Queries %_segdir for the block span of the segments in level -** iLevel. Returns SQLITE_DONE if there are no blocks for iLevel, -** SQLITE_ROW if there are blocks, else an error. -*/ -static int segdir_span(fulltext_vtab *v, int iLevel, - sqlite_int64 *piStartBlockid, - sqlite_int64 *piEndBlockid){ - sqlite3_stmt *s; - int rc = sql_get_statement(v, SEGDIR_SPAN_STMT, &s); - if( rc!=SQLITE_OK ) return rc; - - rc = sqlite3_bind_int(s, 1, iLevel); - if( rc!=SQLITE_OK ) return rc; - - rc = sqlite3_step(s); - if( rc==SQLITE_DONE ) return SQLITE_DONE; /* Should never happen */ - if( rc!=SQLITE_ROW ) return rc; - - /* This happens if all segments at this level are entirely inline. */ - if( SQLITE_NULL==sqlite3_column_type(s, 0) ){ - /* We expect only one row. We must execute another sqlite3_step() - * to complete the iteration; otherwise the table will remain locked. */ - int rc2 = sqlite3_step(s); - if( rc2==SQLITE_ROW ) return SQLITE_ERROR; - return rc2; - } - - *piStartBlockid = sqlite3_column_int64(s, 0); - *piEndBlockid = sqlite3_column_int64(s, 1); - - /* We expect only one row. We must execute another sqlite3_step() - * to complete the iteration; otherwise the table will remain locked. */ - rc = sqlite3_step(s); - if( rc==SQLITE_ROW ) return SQLITE_ERROR; - if( rc!=SQLITE_DONE ) return rc; - return SQLITE_ROW; -} - -/* Delete the segment blocks and segment directory records for all -** segments at iLevel. -*/ -static int segdir_delete(fulltext_vtab *v, int iLevel){ - sqlite3_stmt *s; - sqlite_int64 iStartBlockid, iEndBlockid; - int rc = segdir_span(v, iLevel, &iStartBlockid, &iEndBlockid); - if( rc!=SQLITE_ROW && rc!=SQLITE_DONE ) return rc; - - if( rc==SQLITE_ROW ){ - rc = block_delete(v, iStartBlockid, iEndBlockid); - if( rc!=SQLITE_OK ) return rc; - } - - /* Delete the segment directory itself. */ - rc = sql_get_statement(v, SEGDIR_DELETE_STMT, &s); - if( rc!=SQLITE_OK ) return rc; - - rc = sqlite3_bind_int64(s, 1, iLevel); - if( rc!=SQLITE_OK ) return rc; - - return sql_single_step(s); -} - -/* Delete entire fts index, SQLITE_OK on success, relevant error on -** failure. -*/ -static int segdir_delete_all(fulltext_vtab *v){ - sqlite3_stmt *s; - int rc = sql_get_statement(v, SEGDIR_DELETE_ALL_STMT, &s); - if( rc!=SQLITE_OK ) return rc; - - rc = sql_single_step(s); - if( rc!=SQLITE_OK ) return rc; - - rc = sql_get_statement(v, BLOCK_DELETE_ALL_STMT, &s); - if( rc!=SQLITE_OK ) return rc; - - return sql_single_step(s); -} - -/* Returns SQLITE_OK with *pnSegments set to the number of entries in -** %_segdir and *piMaxLevel set to the highest level which has a -** segment. Otherwise returns the SQLite error which caused failure. -*/ -static int segdir_count(fulltext_vtab *v, int *pnSegments, int *piMaxLevel){ - sqlite3_stmt *s; - int rc = sql_get_statement(v, SEGDIR_COUNT_STMT, &s); - if( rc!=SQLITE_OK ) return rc; - - rc = sqlite3_step(s); - /* TODO(shess): This case should not be possible? Should stronger - ** measures be taken if it happens? - */ - if( rc==SQLITE_DONE ){ - *pnSegments = 0; - *piMaxLevel = 0; - return SQLITE_OK; - } - if( rc!=SQLITE_ROW ) return rc; - - *pnSegments = sqlite3_column_int(s, 0); - *piMaxLevel = sqlite3_column_int(s, 1); - - /* We expect only one row. We must execute another sqlite3_step() - * to complete the iteration; otherwise the table will remain locked. */ - rc = sqlite3_step(s); - if( rc==SQLITE_DONE ) return SQLITE_OK; - if( rc==SQLITE_ROW ) return SQLITE_ERROR; - return rc; -} - -/* TODO(shess) clearPendingTerms() is far down the file because -** writeZeroSegment() is far down the file because LeafWriter is far -** down the file. Consider refactoring the code to move the non-vtab -** code above the vtab code so that we don't need this forward -** reference. -*/ -static int clearPendingTerms(fulltext_vtab *v); - -/* -** Free the memory used to contain a fulltext_vtab structure. -*/ -static void fulltext_vtab_destroy(fulltext_vtab *v){ - int iStmt, i; - - TRACE(("FTS2 Destroy %p\n", v)); - for( iStmt=0; iStmtpFulltextStatements[iStmt]!=NULL ){ - sqlite3_finalize(v->pFulltextStatements[iStmt]); - v->pFulltextStatements[iStmt] = NULL; - } - } - - for( i=0; ipLeafSelectStmts[i]!=NULL ){ - sqlite3_finalize(v->pLeafSelectStmts[i]); - v->pLeafSelectStmts[i] = NULL; - } - } - - if( v->pTokenizer!=NULL ){ - v->pTokenizer->pModule->xDestroy(v->pTokenizer); - v->pTokenizer = NULL; - } - - clearPendingTerms(v); - - sqlite3_free(v->azColumn); - for(i = 0; i < v->nColumn; ++i) { - sqlite3_free(v->azContentColumn[i]); - } - sqlite3_free(v->azContentColumn); - sqlite3_free(v); -} - -/* -** Token types for parsing the arguments to xConnect or xCreate. -*/ -#define TOKEN_EOF 0 /* End of file */ -#define TOKEN_SPACE 1 /* Any kind of whitespace */ -#define TOKEN_ID 2 /* An identifier */ -#define TOKEN_STRING 3 /* A string literal */ -#define TOKEN_PUNCT 4 /* A single punctuation character */ - -/* -** If X is a character that can be used in an identifier then -** IdChar(X) will be true. Otherwise it is false. -** -** For ASCII, any character with the high-order bit set is -** allowed in an identifier. For 7-bit characters, -** sqlite3IsIdChar[X] must be 1. -** -** Ticket #1066. the SQL standard does not allow '$' in the -** middle of identfiers. But many SQL implementations do. -** SQLite will allow '$' in identifiers for compatibility. -** But the feature is undocumented. -*/ -static const char isIdChar[] = { -/* x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 xA xB xC xD xE xF */ - 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 2x */ - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, /* 3x */ - 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 4x */ - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, /* 5x */ - 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 6x */ - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, /* 7x */ -}; -#define IdChar(C) (((c=C)&0x80)!=0 || (c>0x1f && isIdChar[c-0x20])) - - -/* -** Return the length of the token that begins at z[0]. -** Store the token type in *tokenType before returning. -*/ -static int getToken(const char *z, int *tokenType){ - int i, c; - switch( *z ){ - case 0: { - *tokenType = TOKEN_EOF; - return 0; - } - case ' ': case '\t': case '\n': case '\f': case '\r': { - for(i=1; safe_isspace(z[i]); i++){} - *tokenType = TOKEN_SPACE; - return i; - } - case '`': - case '\'': - case '"': { - int delim = z[0]; - for(i=1; (c=z[i])!=0; i++){ - if( c==delim ){ - if( z[i+1]==delim ){ - i++; - }else{ - break; - } - } - } - *tokenType = TOKEN_STRING; - return i + (c!=0); - } - case '[': { - for(i=1, c=z[0]; c!=']' && (c=z[i])!=0; i++){} - *tokenType = TOKEN_ID; - return i; - } - default: { - if( !IdChar(*z) ){ - break; - } - for(i=1; IdChar(z[i]); i++){} - *tokenType = TOKEN_ID; - return i; - } - } - *tokenType = TOKEN_PUNCT; - return 1; -} - -/* -** A token extracted from a string is an instance of the following -** structure. -*/ -typedef struct Token { - const char *z; /* Pointer to token text. Not '\000' terminated */ - short int n; /* Length of the token text in bytes. */ -} Token; - -/* -** Given a input string (which is really one of the argv[] parameters -** passed into xConnect or xCreate) split the string up into tokens. -** Return an array of pointers to '\000' terminated strings, one string -** for each non-whitespace token. -** -** The returned array is terminated by a single NULL pointer. -** -** Space to hold the returned array is obtained from a single -** malloc and should be freed by passing the return value to free(). -** The individual strings within the token list are all a part of -** the single memory allocation and will all be freed at once. -*/ -static char **tokenizeString(const char *z, int *pnToken){ - int nToken = 0; - Token *aToken = sqlite3_malloc( strlen(z) * sizeof(aToken[0]) ); - int n = 1; - int e, i; - int totalSize = 0; - char **azToken; - char *zCopy; - while( n>0 ){ - n = getToken(z, &e); - if( e!=TOKEN_SPACE ){ - aToken[nToken].z = z; - aToken[nToken].n = n; - nToken++; - totalSize += n+1; - } - z += n; - } - azToken = (char**)sqlite3_malloc( nToken*sizeof(char*) + totalSize ); - zCopy = (char*)&azToken[nToken]; - nToken--; - for(i=0; i=0 ){ - azIn[j] = azIn[i]; - } - j++; - } - } - azIn[j] = 0; - } -} - - -/* -** Find the first alphanumeric token in the string zIn. Null-terminate -** this token. Remove any quotation marks. And return a pointer to -** the result. -*/ -static char *firstToken(char *zIn, char **pzTail){ - int n, ttype; - while(1){ - n = getToken(zIn, &ttype); - if( ttype==TOKEN_SPACE ){ - zIn += n; - }else if( ttype==TOKEN_EOF ){ - *pzTail = zIn; - return 0; - }else{ - zIn[n] = 0; - *pzTail = &zIn[1]; - dequoteString(zIn); - return zIn; - } - } - /*NOTREACHED*/ -} - -/* Return true if... -** -** * s begins with the string t, ignoring case -** * s is longer than t -** * The first character of s beyond t is not a alphanumeric -** -** Ignore leading space in *s. -** -** To put it another way, return true if the first token of -** s[] is t[]. -*/ -static int startsWith(const char *s, const char *t){ - while( safe_isspace(*s) ){ s++; } - while( *t ){ - if( safe_tolower(*s++)!=safe_tolower(*t++) ) return 0; - } - return *s!='_' && !safe_isalnum(*s); -} - -/* -** An instance of this structure defines the "spec" of a -** full text index. This structure is populated by parseSpec -** and use by fulltextConnect and fulltextCreate. -*/ -typedef struct TableSpec { - const char *zDb; /* Logical database name */ - const char *zName; /* Name of the full-text index */ - int nColumn; /* Number of columns to be indexed */ - char **azColumn; /* Original names of columns to be indexed */ - char **azContentColumn; /* Column names for %_content */ - char **azTokenizer; /* Name of tokenizer and its arguments */ -} TableSpec; - -/* -** Reclaim all of the memory used by a TableSpec -*/ -static void clearTableSpec(TableSpec *p) { - sqlite3_free(p->azColumn); - sqlite3_free(p->azContentColumn); - sqlite3_free(p->azTokenizer); -} - -/* Parse a CREATE VIRTUAL TABLE statement, which looks like this: - * - * CREATE VIRTUAL TABLE email - * USING fts2(subject, body, tokenize mytokenizer(myarg)) - * - * We return parsed information in a TableSpec structure. - * - */ -static int parseSpec(TableSpec *pSpec, int argc, const char *const*argv, - char**pzErr){ - int i, n; - char *z, *zDummy; - char **azArg; - const char *zTokenizer = 0; /* argv[] entry describing the tokenizer */ - - assert( argc>=3 ); - /* Current interface: - ** argv[0] - module name - ** argv[1] - database name - ** argv[2] - table name - ** argv[3..] - columns, optionally followed by tokenizer specification - ** and snippet delimiters specification. - */ - - /* Make a copy of the complete argv[][] array in a single allocation. - ** The argv[][] array is read-only and transient. We can write to the - ** copy in order to modify things and the copy is persistent. - */ - CLEAR(pSpec); - for(i=n=0; izDb = azArg[1]; - pSpec->zName = azArg[2]; - pSpec->nColumn = 0; - pSpec->azColumn = azArg; - zTokenizer = "tokenize simple"; - for(i=3; inColumn] = firstToken(azArg[i], &zDummy); - pSpec->nColumn++; - } - } - if( pSpec->nColumn==0 ){ - azArg[0] = "content"; - pSpec->nColumn = 1; - } - - /* - ** Construct the list of content column names. - ** - ** Each content column name will be of the form cNNAAAA - ** where NN is the column number and AAAA is the sanitized - ** column name. "sanitized" means that special characters are - ** converted to "_". The cNN prefix guarantees that all column - ** names are unique. - ** - ** The AAAA suffix is not strictly necessary. It is included - ** for the convenience of people who might examine the generated - ** %_content table and wonder what the columns are used for. - */ - pSpec->azContentColumn = sqlite3_malloc( pSpec->nColumn * sizeof(char *) ); - if( pSpec->azContentColumn==0 ){ - clearTableSpec(pSpec); - return SQLITE_NOMEM; - } - for(i=0; inColumn; i++){ - char *p; - pSpec->azContentColumn[i] = sqlite3_mprintf("c%d%s", i, azArg[i]); - for (p = pSpec->azContentColumn[i]; *p ; ++p) { - if( !safe_isalnum(*p) ) *p = '_'; - } - } - - /* - ** Parse the tokenizer specification string. - */ - pSpec->azTokenizer = tokenizeString(zTokenizer, &n); - tokenListToIdList(pSpec->azTokenizer); - - return SQLITE_OK; -} - -/* -** Generate a CREATE TABLE statement that describes the schema of -** the virtual table. Return a pointer to this schema string. -** -** Space is obtained from sqlite3_mprintf() and should be freed -** using sqlite3_free(). -*/ -static char *fulltextSchema( - int nColumn, /* Number of columns */ - const char *const* azColumn, /* List of columns */ - const char *zTableName /* Name of the table */ -){ - int i; - char *zSchema, *zNext; - const char *zSep = "("; - zSchema = sqlite3_mprintf("CREATE TABLE x"); - for(i=0; ibase */ - v->db = db; - v->zDb = spec->zDb; /* Freed when azColumn is freed */ - v->zName = spec->zName; /* Freed when azColumn is freed */ - v->nColumn = spec->nColumn; - v->azContentColumn = spec->azContentColumn; - spec->azContentColumn = 0; - v->azColumn = spec->azColumn; - spec->azColumn = 0; - - if( spec->azTokenizer==0 ){ - return SQLITE_NOMEM; - } - - zTok = spec->azTokenizer[0]; - if( !zTok ){ - zTok = "simple"; - } - nTok = strlen(zTok)+1; - - m = (sqlite3_tokenizer_module *)sqlite3Fts2HashFind(pHash, zTok, nTok); - if( !m ){ - *pzErr = sqlite3_mprintf("unknown tokenizer: %s", spec->azTokenizer[0]); - rc = SQLITE_ERROR; - goto err; - } - - for(n=0; spec->azTokenizer[n]; n++){} - if( n ){ - rc = m->xCreate(n-1, (const char*const*)&spec->azTokenizer[1], - &v->pTokenizer); - }else{ - rc = m->xCreate(0, 0, &v->pTokenizer); - } - if( rc!=SQLITE_OK ) goto err; - v->pTokenizer->pModule = m; - - /* TODO: verify the existence of backing tables foo_content, foo_term */ - - schema = fulltextSchema(v->nColumn, (const char*const*)v->azColumn, - spec->zName); - rc = sqlite3_declare_vtab(db, schema); - sqlite3_free(schema); - if( rc!=SQLITE_OK ) goto err; - - memset(v->pFulltextStatements, 0, sizeof(v->pFulltextStatements)); - - /* Indicate that the buffer is not live. */ - v->nPendingData = -1; - - *ppVTab = &v->base; - TRACE(("FTS2 Connect %p\n", v)); - - return rc; - -err: - fulltext_vtab_destroy(v); - return rc; -} - -static int fulltextConnect( - sqlite3 *db, - void *pAux, - int argc, const char *const*argv, - sqlite3_vtab **ppVTab, - char **pzErr -){ - TableSpec spec; - int rc = parseSpec(&spec, argc, argv, pzErr); - if( rc!=SQLITE_OK ) return rc; - - rc = constructVtab(db, (fts2Hash *)pAux, &spec, ppVTab, pzErr); - clearTableSpec(&spec); - return rc; -} - -/* The %_content table holds the text of each document, with -** the rowid used as the docid. -*/ -/* TODO(shess) This comment needs elaboration to match the updated -** code. Work it into the top-of-file comment at that time. -*/ -static int fulltextCreate(sqlite3 *db, void *pAux, - int argc, const char * const *argv, - sqlite3_vtab **ppVTab, char **pzErr){ - int rc; - TableSpec spec; - StringBuffer schema; - TRACE(("FTS2 Create\n")); - - rc = parseSpec(&spec, argc, argv, pzErr); - if( rc!=SQLITE_OK ) return rc; - - initStringBuffer(&schema); - append(&schema, "CREATE TABLE %_content("); - appendList(&schema, spec.nColumn, spec.azContentColumn); - append(&schema, ")"); - rc = sql_exec(db, spec.zDb, spec.zName, stringBufferData(&schema)); - stringBufferDestroy(&schema); - if( rc!=SQLITE_OK ) goto out; - - rc = sql_exec(db, spec.zDb, spec.zName, - "create table %_segments(block blob);"); - if( rc!=SQLITE_OK ) goto out; - - rc = sql_exec(db, spec.zDb, spec.zName, - "create table %_segdir(" - " level integer," - " idx integer," - " start_block integer," - " leaves_end_block integer," - " end_block integer," - " root blob," - " primary key(level, idx)" - ");"); - if( rc!=SQLITE_OK ) goto out; - - rc = constructVtab(db, (fts2Hash *)pAux, &spec, ppVTab, pzErr); - -out: - clearTableSpec(&spec); - return rc; -} - -/* Decide how to handle an SQL query. */ -static int fulltextBestIndex(sqlite3_vtab *pVTab, sqlite3_index_info *pInfo){ - int i; - TRACE(("FTS2 BestIndex\n")); - - for(i=0; inConstraint; ++i){ - const struct sqlite3_index_constraint *pConstraint; - pConstraint = &pInfo->aConstraint[i]; - if( pConstraint->usable ) { - if( pConstraint->iColumn==-1 && - pConstraint->op==SQLITE_INDEX_CONSTRAINT_EQ ){ - pInfo->idxNum = QUERY_ROWID; /* lookup by rowid */ - TRACE(("FTS2 QUERY_ROWID\n")); - } else if( pConstraint->iColumn>=0 && - pConstraint->op==SQLITE_INDEX_CONSTRAINT_MATCH ){ - /* full-text search */ - pInfo->idxNum = QUERY_FULLTEXT + pConstraint->iColumn; - TRACE(("FTS2 QUERY_FULLTEXT %d\n", pConstraint->iColumn)); - } else continue; - - pInfo->aConstraintUsage[i].argvIndex = 1; - pInfo->aConstraintUsage[i].omit = 1; - - /* An arbitrary value for now. - * TODO: Perhaps rowid matches should be considered cheaper than - * full-text searches. */ - pInfo->estimatedCost = 1.0; - - return SQLITE_OK; - } - } - pInfo->idxNum = QUERY_GENERIC; - return SQLITE_OK; -} - -static int fulltextDisconnect(sqlite3_vtab *pVTab){ - TRACE(("FTS2 Disconnect %p\n", pVTab)); - fulltext_vtab_destroy((fulltext_vtab *)pVTab); - return SQLITE_OK; -} - -static int fulltextDestroy(sqlite3_vtab *pVTab){ - fulltext_vtab *v = (fulltext_vtab *)pVTab; - int rc; - - TRACE(("FTS2 Destroy %p\n", pVTab)); - rc = sql_exec(v->db, v->zDb, v->zName, - "drop table if exists %_content;" - "drop table if exists %_segments;" - "drop table if exists %_segdir;" - ); - if( rc!=SQLITE_OK ) return rc; - - fulltext_vtab_destroy((fulltext_vtab *)pVTab); - return SQLITE_OK; -} - -static int fulltextOpen(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor){ - fulltext_cursor *c; - - c = (fulltext_cursor *) sqlite3_malloc(sizeof(fulltext_cursor)); - if( c ){ - memset(c, 0, sizeof(fulltext_cursor)); - /* sqlite will initialize c->base */ - *ppCursor = &c->base; - TRACE(("FTS2 Open %p: %p\n", pVTab, c)); - return SQLITE_OK; - }else{ - return SQLITE_NOMEM; - } -} - - -/* Free all of the dynamically allocated memory held by *q -*/ -static void queryClear(Query *q){ - int i; - for(i = 0; i < q->nTerms; ++i){ - sqlite3_free(q->pTerms[i].pTerm); - } - sqlite3_free(q->pTerms); - CLEAR(q); -} - -/* Free all of the dynamically allocated memory held by the -** Snippet -*/ -static void snippetClear(Snippet *p){ - sqlite3_free(p->aMatch); - sqlite3_free(p->zOffset); - sqlite3_free(p->zSnippet); - CLEAR(p); -} -/* -** Append a single entry to the p->aMatch[] log. -*/ -static void snippetAppendMatch( - Snippet *p, /* Append the entry to this snippet */ - int iCol, int iTerm, /* The column and query term */ - int iStart, int nByte /* Offset and size of the match */ -){ - int i; - struct snippetMatch *pMatch; - if( p->nMatch+1>=p->nAlloc ){ - p->nAlloc = p->nAlloc*2 + 10; - p->aMatch = sqlite3_realloc(p->aMatch, p->nAlloc*sizeof(p->aMatch[0]) ); - if( p->aMatch==0 ){ - p->nMatch = 0; - p->nAlloc = 0; - return; - } - } - i = p->nMatch++; - pMatch = &p->aMatch[i]; - pMatch->iCol = iCol; - pMatch->iTerm = iTerm; - pMatch->iStart = iStart; - pMatch->nByte = nByte; -} - -/* -** Sizing information for the circular buffer used in snippetOffsetsOfColumn() -*/ -#define FTS2_ROTOR_SZ (32) -#define FTS2_ROTOR_MASK (FTS2_ROTOR_SZ-1) - -/* -** Add entries to pSnippet->aMatch[] for every match that occurs against -** document zDoc[0..nDoc-1] which is stored in column iColumn. -*/ -static void snippetOffsetsOfColumn( - Query *pQuery, - Snippet *pSnippet, - int iColumn, - const char *zDoc, - int nDoc -){ - const sqlite3_tokenizer_module *pTModule; /* The tokenizer module */ - sqlite3_tokenizer *pTokenizer; /* The specific tokenizer */ - sqlite3_tokenizer_cursor *pTCursor; /* Tokenizer cursor */ - fulltext_vtab *pVtab; /* The full text index */ - int nColumn; /* Number of columns in the index */ - const QueryTerm *aTerm; /* Query string terms */ - int nTerm; /* Number of query string terms */ - int i, j; /* Loop counters */ - int rc; /* Return code */ - unsigned int match, prevMatch; /* Phrase search bitmasks */ - const char *zToken; /* Next token from the tokenizer */ - int nToken; /* Size of zToken */ - int iBegin, iEnd, iPos; /* Offsets of beginning and end */ - - /* The following variables keep a circular buffer of the last - ** few tokens */ - unsigned int iRotor = 0; /* Index of current token */ - int iRotorBegin[FTS2_ROTOR_SZ]; /* Beginning offset of token */ - int iRotorLen[FTS2_ROTOR_SZ]; /* Length of token */ - - pVtab = pQuery->pFts; - nColumn = pVtab->nColumn; - pTokenizer = pVtab->pTokenizer; - pTModule = pTokenizer->pModule; - rc = pTModule->xOpen(pTokenizer, zDoc, nDoc, &pTCursor); - if( rc ) return; - pTCursor->pTokenizer = pTokenizer; - aTerm = pQuery->pTerms; - nTerm = pQuery->nTerms; - if( nTerm>=FTS2_ROTOR_SZ ){ - nTerm = FTS2_ROTOR_SZ - 1; - } - prevMatch = 0; - while(1){ - rc = pTModule->xNext(pTCursor, &zToken, &nToken, &iBegin, &iEnd, &iPos); - if( rc ) break; - iRotorBegin[iRotor&FTS2_ROTOR_MASK] = iBegin; - iRotorLen[iRotor&FTS2_ROTOR_MASK] = iEnd-iBegin; - match = 0; - for(i=0; i=0 && iColnToken ) continue; - if( !aTerm[i].isPrefix && aTerm[i].nTerm1 && (prevMatch & (1<=0; j--){ - int k = (iRotor-j) & FTS2_ROTOR_MASK; - snippetAppendMatch(pSnippet, iColumn, i-j, - iRotorBegin[k], iRotorLen[k]); - } - } - } - prevMatch = match<<1; - iRotor++; - } - pTModule->xClose(pTCursor); -} - - -/* -** Compute all offsets for the current row of the query. -** If the offsets have already been computed, this routine is a no-op. -*/ -static void snippetAllOffsets(fulltext_cursor *p){ - int nColumn; - int iColumn, i; - int iFirst, iLast; - fulltext_vtab *pFts; - - if( p->snippet.nMatch ) return; - if( p->q.nTerms==0 ) return; - pFts = p->q.pFts; - nColumn = pFts->nColumn; - iColumn = (p->iCursorType - QUERY_FULLTEXT); - if( iColumn<0 || iColumn>=nColumn ){ - iFirst = 0; - iLast = nColumn-1; - }else{ - iFirst = iColumn; - iLast = iColumn; - } - for(i=iFirst; i<=iLast; i++){ - const char *zDoc; - int nDoc; - zDoc = (const char*)sqlite3_column_text(p->pStmt, i+1); - nDoc = sqlite3_column_bytes(p->pStmt, i+1); - snippetOffsetsOfColumn(&p->q, &p->snippet, i, zDoc, nDoc); - } -} - -/* -** Convert the information in the aMatch[] array of the snippet -** into the string zOffset[0..nOffset-1]. -*/ -static void snippetOffsetText(Snippet *p){ - int i; - int cnt = 0; - StringBuffer sb; - char zBuf[200]; - if( p->zOffset ) return; - initStringBuffer(&sb); - for(i=0; inMatch; i++){ - struct snippetMatch *pMatch = &p->aMatch[i]; - zBuf[0] = ' '; - sqlite3_snprintf(sizeof(zBuf)-1, &zBuf[cnt>0], "%d %d %d %d", - pMatch->iCol, pMatch->iTerm, pMatch->iStart, pMatch->nByte); - append(&sb, zBuf); - cnt++; - } - p->zOffset = stringBufferData(&sb); - p->nOffset = stringBufferLength(&sb); -} - -/* -** zDoc[0..nDoc-1] is phrase of text. aMatch[0..nMatch-1] are a set -** of matching words some of which might be in zDoc. zDoc is column -** number iCol. -** -** iBreak is suggested spot in zDoc where we could begin or end an -** excerpt. Return a value similar to iBreak but possibly adjusted -** to be a little left or right so that the break point is better. -*/ -static int wordBoundary( - int iBreak, /* The suggested break point */ - const char *zDoc, /* Document text */ - int nDoc, /* Number of bytes in zDoc[] */ - struct snippetMatch *aMatch, /* Matching words */ - int nMatch, /* Number of entries in aMatch[] */ - int iCol /* The column number for zDoc[] */ -){ - int i; - if( iBreak<=10 ){ - return 0; - } - if( iBreak>=nDoc-10 ){ - return nDoc; - } - for(i=0; i0 && aMatch[i-1].iStart+aMatch[i-1].nByte>=iBreak ){ - return aMatch[i-1].iStart; - } - } - for(i=1; i<=10; i++){ - if( safe_isspace(zDoc[iBreak-i]) ){ - return iBreak - i + 1; - } - if( safe_isspace(zDoc[iBreak+i]) ){ - return iBreak + i + 1; - } - } - return iBreak; -} - - - -/* -** Allowed values for Snippet.aMatch[].snStatus -*/ -#define SNIPPET_IGNORE 0 /* It is ok to omit this match from the snippet */ -#define SNIPPET_DESIRED 1 /* We want to include this match in the snippet */ - -/* -** Generate the text of a snippet. -*/ -static void snippetText( - fulltext_cursor *pCursor, /* The cursor we need the snippet for */ - const char *zStartMark, /* Markup to appear before each match */ - const char *zEndMark, /* Markup to appear after each match */ - const char *zEllipsis /* Ellipsis mark */ -){ - int i, j; - struct snippetMatch *aMatch; - int nMatch; - int nDesired; - StringBuffer sb; - int tailCol; - int tailOffset; - int iCol; - int nDoc; - const char *zDoc; - int iStart, iEnd; - int tailEllipsis = 0; - int iMatch; - - - sqlite3_free(pCursor->snippet.zSnippet); - pCursor->snippet.zSnippet = 0; - aMatch = pCursor->snippet.aMatch; - nMatch = pCursor->snippet.nMatch; - initStringBuffer(&sb); - - for(i=0; iq.nTerms; i++){ - for(j=0; j0; i++){ - if( aMatch[i].snStatus!=SNIPPET_DESIRED ) continue; - nDesired--; - iCol = aMatch[i].iCol; - zDoc = (const char*)sqlite3_column_text(pCursor->pStmt, iCol+1); - nDoc = sqlite3_column_bytes(pCursor->pStmt, iCol+1); - iStart = aMatch[i].iStart - 40; - iStart = wordBoundary(iStart, zDoc, nDoc, aMatch, nMatch, iCol); - if( iStart<=10 ){ - iStart = 0; - } - if( iCol==tailCol && iStart<=tailOffset+20 ){ - iStart = tailOffset; - } - if( (iCol!=tailCol && tailCol>=0) || iStart!=tailOffset ){ - trimWhiteSpace(&sb); - appendWhiteSpace(&sb); - append(&sb, zEllipsis); - appendWhiteSpace(&sb); - } - iEnd = aMatch[i].iStart + aMatch[i].nByte + 40; - iEnd = wordBoundary(iEnd, zDoc, nDoc, aMatch, nMatch, iCol); - if( iEnd>=nDoc-10 ){ - iEnd = nDoc; - tailEllipsis = 0; - }else{ - tailEllipsis = 1; - } - while( iMatchsnippet.zSnippet = stringBufferData(&sb); - pCursor->snippet.nSnippet = stringBufferLength(&sb); -} - - -/* -** Close the cursor. For additional information see the documentation -** on the xClose method of the virtual table interface. -*/ -static int fulltextClose(sqlite3_vtab_cursor *pCursor){ - fulltext_cursor *c = (fulltext_cursor *) pCursor; - TRACE(("FTS2 Close %p\n", c)); - sqlite3_finalize(c->pStmt); - queryClear(&c->q); - snippetClear(&c->snippet); - if( c->result.nData!=0 ) dlrDestroy(&c->reader); - dataBufferDestroy(&c->result); - sqlite3_free(c); - return SQLITE_OK; -} - -static int fulltextNext(sqlite3_vtab_cursor *pCursor){ - fulltext_cursor *c = (fulltext_cursor *) pCursor; - int rc; - - TRACE(("FTS2 Next %p\n", pCursor)); - snippetClear(&c->snippet); - if( c->iCursorType < QUERY_FULLTEXT ){ - /* TODO(shess) Handle SQLITE_SCHEMA AND SQLITE_BUSY. */ - rc = sqlite3_step(c->pStmt); - switch( rc ){ - case SQLITE_ROW: - c->eof = 0; - return SQLITE_OK; - case SQLITE_DONE: - c->eof = 1; - return SQLITE_OK; - default: - c->eof = 1; - return rc; - } - } else { /* full-text query */ - rc = sqlite3_reset(c->pStmt); - if( rc!=SQLITE_OK ) return rc; - - if( c->result.nData==0 || dlrAtEnd(&c->reader) ){ - c->eof = 1; - return SQLITE_OK; - } - rc = sqlite3_bind_int64(c->pStmt, 1, dlrDocid(&c->reader)); - dlrStep(&c->reader); - if( rc!=SQLITE_OK ) return rc; - /* TODO(shess) Handle SQLITE_SCHEMA AND SQLITE_BUSY. */ - rc = sqlite3_step(c->pStmt); - if( rc==SQLITE_ROW ){ /* the case we expect */ - c->eof = 0; - return SQLITE_OK; - } - /* an error occurred; abort */ - return rc==SQLITE_DONE ? SQLITE_ERROR : rc; - } -} - - -/* TODO(shess) If we pushed LeafReader to the top of the file, or to -** another file, term_select() could be pushed above -** docListOfTerm(). -*/ -static int termSelect(fulltext_vtab *v, int iColumn, - const char *pTerm, int nTerm, int isPrefix, - DocListType iType, DataBuffer *out); - -/* Return a DocList corresponding to the query term *pTerm. If *pTerm -** is the first term of a phrase query, go ahead and evaluate the phrase -** query and return the doclist for the entire phrase query. -** -** The resulting DL_DOCIDS doclist is stored in pResult, which is -** overwritten. -*/ -static int docListOfTerm( - fulltext_vtab *v, /* The full text index */ - int iColumn, /* column to restrict to. No restriction if >=nColumn */ - QueryTerm *pQTerm, /* Term we are looking for, or 1st term of a phrase */ - DataBuffer *pResult /* Write the result here */ -){ - DataBuffer left, right, new; - int i, rc; - - /* No phrase search if no position info. */ - assert( pQTerm->nPhrase==0 || DL_DEFAULT!=DL_DOCIDS ); - - /* This code should never be called with buffered updates. */ - assert( v->nPendingData<0 ); - - dataBufferInit(&left, 0); - rc = termSelect(v, iColumn, pQTerm->pTerm, pQTerm->nTerm, pQTerm->isPrefix, - 0nPhrase ? DL_POSITIONS : DL_DOCIDS, &left); - if( rc ) return rc; - for(i=1; i<=pQTerm->nPhrase && left.nData>0; i++){ - dataBufferInit(&right, 0); - rc = termSelect(v, iColumn, pQTerm[i].pTerm, pQTerm[i].nTerm, - pQTerm[i].isPrefix, DL_POSITIONS, &right); - if( rc ){ - dataBufferDestroy(&left); - return rc; - } - dataBufferInit(&new, 0); - docListPhraseMerge(left.pData, left.nData, right.pData, right.nData, - inPhrase ? DL_POSITIONS : DL_DOCIDS, &new); - dataBufferDestroy(&left); - dataBufferDestroy(&right); - left = new; - } - *pResult = left; - return SQLITE_OK; -} - -/* Add a new term pTerm[0..nTerm-1] to the query *q. -*/ -static void queryAdd(Query *q, const char *pTerm, int nTerm){ - QueryTerm *t; - ++q->nTerms; - q->pTerms = sqlite3_realloc(q->pTerms, q->nTerms * sizeof(q->pTerms[0])); - if( q->pTerms==0 ){ - q->nTerms = 0; - return; - } - t = &q->pTerms[q->nTerms - 1]; - CLEAR(t); - t->pTerm = sqlite3_malloc(nTerm+1); - memcpy(t->pTerm, pTerm, nTerm); - t->pTerm[nTerm] = 0; - t->nTerm = nTerm; - t->isOr = q->nextIsOr; - t->isPrefix = 0; - q->nextIsOr = 0; - t->iColumn = q->nextColumn; - q->nextColumn = q->dfltColumn; -} - -/* -** Check to see if the string zToken[0...nToken-1] matches any -** column name in the virtual table. If it does, -** return the zero-indexed column number. If not, return -1. -*/ -static int checkColumnSpecifier( - fulltext_vtab *pVtab, /* The virtual table */ - const char *zToken, /* Text of the token */ - int nToken /* Number of characters in the token */ -){ - int i; - for(i=0; inColumn; i++){ - if( memcmp(pVtab->azColumn[i], zToken, nToken)==0 - && pVtab->azColumn[i][nToken]==0 ){ - return i; - } - } - return -1; -} - -/* -** Parse the text at pSegment[0..nSegment-1]. Add additional terms -** to the query being assemblied in pQuery. -** -** inPhrase is true if pSegment[0..nSegement-1] is contained within -** double-quotes. If inPhrase is true, then the first term -** is marked with the number of terms in the phrase less one and -** OR and "-" syntax is ignored. If inPhrase is false, then every -** term found is marked with nPhrase=0 and OR and "-" syntax is significant. -*/ -static int tokenizeSegment( - sqlite3_tokenizer *pTokenizer, /* The tokenizer to use */ - const char *pSegment, int nSegment, /* Query expression being parsed */ - int inPhrase, /* True if within "..." */ - Query *pQuery /* Append results here */ -){ - const sqlite3_tokenizer_module *pModule = pTokenizer->pModule; - sqlite3_tokenizer_cursor *pCursor; - int firstIndex = pQuery->nTerms; - int iCol; - int nTerm = 1; - - int rc = pModule->xOpen(pTokenizer, pSegment, nSegment, &pCursor); - if( rc!=SQLITE_OK ) return rc; - pCursor->pTokenizer = pTokenizer; - - while( 1 ){ - const char *pToken; - int nToken, iBegin, iEnd, iPos; - - rc = pModule->xNext(pCursor, - &pToken, &nToken, - &iBegin, &iEnd, &iPos); - if( rc!=SQLITE_OK ) break; - if( !inPhrase && - pSegment[iEnd]==':' && - (iCol = checkColumnSpecifier(pQuery->pFts, pToken, nToken))>=0 ){ - pQuery->nextColumn = iCol; - continue; - } - if( !inPhrase && pQuery->nTerms>0 && nToken==2 - && pSegment[iBegin]=='O' && pSegment[iBegin+1]=='R' ){ - pQuery->nextIsOr = 1; - continue; - } - queryAdd(pQuery, pToken, nToken); - if( !inPhrase && iBegin>0 && pSegment[iBegin-1]=='-' ){ - pQuery->pTerms[pQuery->nTerms-1].isNot = 1; - } - if( iEndpTerms[pQuery->nTerms-1].isPrefix = 1; - } - pQuery->pTerms[pQuery->nTerms-1].iPhrase = nTerm; - if( inPhrase ){ - nTerm++; - } - } - - if( inPhrase && pQuery->nTerms>firstIndex ){ - pQuery->pTerms[firstIndex].nPhrase = pQuery->nTerms - firstIndex - 1; - } - - return pModule->xClose(pCursor); -} - -/* Parse a query string, yielding a Query object pQuery. -** -** The calling function will need to queryClear() to clean up -** the dynamically allocated memory held by pQuery. -*/ -static int parseQuery( - fulltext_vtab *v, /* The fulltext index */ - const char *zInput, /* Input text of the query string */ - int nInput, /* Size of the input text */ - int dfltColumn, /* Default column of the index to match against */ - Query *pQuery /* Write the parse results here. */ -){ - int iInput, inPhrase = 0; - - if( zInput==0 ) nInput = 0; - if( nInput<0 ) nInput = strlen(zInput); - pQuery->nTerms = 0; - pQuery->pTerms = NULL; - pQuery->nextIsOr = 0; - pQuery->nextColumn = dfltColumn; - pQuery->dfltColumn = dfltColumn; - pQuery->pFts = v; - - for(iInput=0; iInputiInput ){ - tokenizeSegment(v->pTokenizer, zInput+iInput, i-iInput, inPhrase, - pQuery); - } - iInput = i; - if( i=nColumn -** they are allowed to match against any column. -*/ -static int fulltextQuery( - fulltext_vtab *v, /* The full text index */ - int iColumn, /* Match against this column by default */ - const char *zInput, /* The query string */ - int nInput, /* Number of bytes in zInput[] */ - DataBuffer *pResult, /* Write the result doclist here */ - Query *pQuery /* Put parsed query string here */ -){ - int i, iNext, rc; - DataBuffer left, right, or, new; - int nNot = 0; - QueryTerm *aTerm; - - /* TODO(shess) Instead of flushing pendingTerms, we could query for - ** the relevant term and merge the doclist into what we receive from - ** the database. Wait and see if this is a common issue, first. - ** - ** A good reason not to flush is to not generate update-related - ** error codes from here. - */ - - /* Flush any buffered updates before executing the query. */ - rc = flushPendingTerms(v); - if( rc!=SQLITE_OK ) return rc; - - /* TODO(shess) I think that the queryClear() calls below are not - ** necessary, because fulltextClose() already clears the query. - */ - rc = parseQuery(v, zInput, nInput, iColumn, pQuery); - if( rc!=SQLITE_OK ) return rc; - - /* Empty or NULL queries return no results. */ - if( pQuery->nTerms==0 ){ - dataBufferInit(pResult, 0); - return SQLITE_OK; - } - - /* Merge AND terms. */ - /* TODO(shess) I think we can early-exit if( i>nNot && left.nData==0 ). */ - aTerm = pQuery->pTerms; - for(i = 0; inTerms; i=iNext){ - if( aTerm[i].isNot ){ - /* Handle all NOT terms in a separate pass */ - nNot++; - iNext = i + aTerm[i].nPhrase+1; - continue; - } - iNext = i + aTerm[i].nPhrase + 1; - rc = docListOfTerm(v, aTerm[i].iColumn, &aTerm[i], &right); - if( rc ){ - if( i!=nNot ) dataBufferDestroy(&left); - queryClear(pQuery); - return rc; - } - while( iNextnTerms && aTerm[iNext].isOr ){ - rc = docListOfTerm(v, aTerm[iNext].iColumn, &aTerm[iNext], &or); - iNext += aTerm[iNext].nPhrase + 1; - if( rc ){ - if( i!=nNot ) dataBufferDestroy(&left); - dataBufferDestroy(&right); - queryClear(pQuery); - return rc; - } - dataBufferInit(&new, 0); - docListOrMerge(right.pData, right.nData, or.pData, or.nData, &new); - dataBufferDestroy(&right); - dataBufferDestroy(&or); - right = new; - } - if( i==nNot ){ /* first term processed. */ - left = right; - }else{ - dataBufferInit(&new, 0); - docListAndMerge(left.pData, left.nData, right.pData, right.nData, &new); - dataBufferDestroy(&right); - dataBufferDestroy(&left); - left = new; - } - } - - if( nNot==pQuery->nTerms ){ - /* We do not yet know how to handle a query of only NOT terms */ - return SQLITE_ERROR; - } - - /* Do the EXCEPT terms */ - for(i=0; inTerms; i += aTerm[i].nPhrase + 1){ - if( !aTerm[i].isNot ) continue; - rc = docListOfTerm(v, aTerm[i].iColumn, &aTerm[i], &right); - if( rc ){ - queryClear(pQuery); - dataBufferDestroy(&left); - return rc; - } - dataBufferInit(&new, 0); - docListExceptMerge(left.pData, left.nData, right.pData, right.nData, &new); - dataBufferDestroy(&right); - dataBufferDestroy(&left); - left = new; - } - - *pResult = left; - return rc; -} - -/* -** This is the xFilter interface for the virtual table. See -** the virtual table xFilter method documentation for additional -** information. -** -** If idxNum==QUERY_GENERIC then do a full table scan against -** the %_content table. -** -** If idxNum==QUERY_ROWID then do a rowid lookup for a single entry -** in the %_content table. -** -** If idxNum>=QUERY_FULLTEXT then use the full text index. The -** column on the left-hand side of the MATCH operator is column -** number idxNum-QUERY_FULLTEXT, 0 indexed. argv[0] is the right-hand -** side of the MATCH operator. -*/ -/* TODO(shess) Upgrade the cursor initialization and destruction to -** account for fulltextFilter() being called multiple times on the -** same cursor. The current solution is very fragile. Apply fix to -** fts2 as appropriate. -*/ -static int fulltextFilter( - sqlite3_vtab_cursor *pCursor, /* The cursor used for this query */ - int idxNum, const char *idxStr, /* Which indexing scheme to use */ - int argc, sqlite3_value **argv /* Arguments for the indexing scheme */ -){ - fulltext_cursor *c = (fulltext_cursor *) pCursor; - fulltext_vtab *v = cursor_vtab(c); - int rc; - - TRACE(("FTS2 Filter %p\n",pCursor)); - - /* If the cursor has a statement that was not prepared according to - ** idxNum, clear it. I believe all calls to fulltextFilter with a - ** given cursor will have the same idxNum , but in this case it's - ** easy to be safe. - */ - if( c->pStmt && c->iCursorType!=idxNum ){ - sqlite3_finalize(c->pStmt); - c->pStmt = NULL; - } - - /* Get a fresh statement appropriate to idxNum. */ - /* TODO(shess): Add a prepared-statement cache in the vt structure. - ** The cache must handle multiple open cursors. Easier to cache the - ** statement variants at the vt to reduce malloc/realloc/free here. - ** Or we could have a StringBuffer variant which allowed stack - ** construction for small values. - */ - if( !c->pStmt ){ - char *zSql = sqlite3_mprintf("select rowid, * from %%_content %s", - idxNum==QUERY_GENERIC ? "" : "where rowid=?"); - rc = sql_prepare(v->db, v->zDb, v->zName, &c->pStmt, zSql); - sqlite3_free(zSql); - if( rc!=SQLITE_OK ) return rc; - c->iCursorType = idxNum; - }else{ - sqlite3_reset(c->pStmt); - assert( c->iCursorType==idxNum ); - } - - switch( idxNum ){ - case QUERY_GENERIC: - break; - - case QUERY_ROWID: - rc = sqlite3_bind_int64(c->pStmt, 1, sqlite3_value_int64(argv[0])); - if( rc!=SQLITE_OK ) return rc; - break; - - default: /* full-text search */ - { - const char *zQuery = (const char *)sqlite3_value_text(argv[0]); - assert( idxNum<=QUERY_FULLTEXT+v->nColumn); - assert( argc==1 ); - queryClear(&c->q); - if( c->result.nData!=0 ){ - /* This case happens if the same cursor is used repeatedly. */ - dlrDestroy(&c->reader); - dataBufferReset(&c->result); - }else{ - dataBufferInit(&c->result, 0); - } - rc = fulltextQuery(v, idxNum-QUERY_FULLTEXT, zQuery, -1, &c->result, &c->q); - if( rc!=SQLITE_OK ) return rc; - if( c->result.nData!=0 ){ - dlrInit(&c->reader, DL_DOCIDS, c->result.pData, c->result.nData); - } - break; - } - } - - return fulltextNext(pCursor); -} - -/* This is the xEof method of the virtual table. The SQLite core -** calls this routine to find out if it has reached the end of -** a query's results set. -*/ -static int fulltextEof(sqlite3_vtab_cursor *pCursor){ - fulltext_cursor *c = (fulltext_cursor *) pCursor; - return c->eof; -} - -/* This is the xColumn method of the virtual table. The SQLite -** core calls this method during a query when it needs the value -** of a column from the virtual table. This method needs to use -** one of the sqlite3_result_*() routines to store the requested -** value back in the pContext. -*/ -static int fulltextColumn(sqlite3_vtab_cursor *pCursor, - sqlite3_context *pContext, int idxCol){ - fulltext_cursor *c = (fulltext_cursor *) pCursor; - fulltext_vtab *v = cursor_vtab(c); - - if( idxColnColumn ){ - sqlite3_value *pVal = sqlite3_column_value(c->pStmt, idxCol+1); - sqlite3_result_value(pContext, pVal); - }else if( idxCol==v->nColumn ){ - /* The extra column whose name is the same as the table. - ** Return a blob which is a pointer to the cursor - */ - sqlite3_result_blob(pContext, &c, sizeof(c), SQLITE_TRANSIENT); - } - return SQLITE_OK; -} - -/* This is the xRowid method. The SQLite core calls this routine to -** retrive the rowid for the current row of the result set. The -** rowid should be written to *pRowid. -*/ -static int fulltextRowid(sqlite3_vtab_cursor *pCursor, sqlite_int64 *pRowid){ - fulltext_cursor *c = (fulltext_cursor *) pCursor; - - *pRowid = sqlite3_column_int64(c->pStmt, 0); - return SQLITE_OK; -} - -/* Add all terms in [zText] to pendingTerms table. If [iColumn] > 0, -** we also store positions and offsets in the hash table using that -** column number. -*/ -static int buildTerms(fulltext_vtab *v, sqlite_int64 iDocid, - const char *zText, int iColumn){ - sqlite3_tokenizer *pTokenizer = v->pTokenizer; - sqlite3_tokenizer_cursor *pCursor; - const char *pToken; - int nTokenBytes; - int iStartOffset, iEndOffset, iPosition; - int rc; - - rc = pTokenizer->pModule->xOpen(pTokenizer, zText, -1, &pCursor); - if( rc!=SQLITE_OK ) return rc; - - pCursor->pTokenizer = pTokenizer; - while( SQLITE_OK==(rc=pTokenizer->pModule->xNext(pCursor, - &pToken, &nTokenBytes, - &iStartOffset, &iEndOffset, - &iPosition)) ){ - DLCollector *p; - int nData; /* Size of doclist before our update. */ - - /* Positions can't be negative; we use -1 as a terminator - * internally. Token can't be NULL or empty. */ - if( iPosition<0 || pToken == NULL || nTokenBytes == 0 ){ - rc = SQLITE_ERROR; - break; - } - - p = fts2HashFind(&v->pendingTerms, pToken, nTokenBytes); - if( p==NULL ){ - nData = 0; - p = dlcNew(iDocid, DL_DEFAULT); - fts2HashInsert(&v->pendingTerms, pToken, nTokenBytes, p); - - /* Overhead for our hash table entry, the key, and the value. */ - v->nPendingData += sizeof(struct fts2HashElem)+sizeof(*p)+nTokenBytes; - }else{ - nData = p->b.nData; - if( p->dlw.iPrevDocid!=iDocid ) dlcNext(p, iDocid); - } - if( iColumn>=0 ){ - dlcAddPos(p, iColumn, iPosition, iStartOffset, iEndOffset); - } - - /* Accumulate data added by dlcNew or dlcNext, and dlcAddPos. */ - v->nPendingData += p->b.nData-nData; - } - - /* TODO(shess) Check return? Should this be able to cause errors at - ** this point? Actually, same question about sqlite3_finalize(), - ** though one could argue that failure there means that the data is - ** not durable. *ponder* - */ - pTokenizer->pModule->xClose(pCursor); - if( SQLITE_DONE == rc ) return SQLITE_OK; - return rc; -} - -/* Add doclists for all terms in [pValues] to pendingTerms table. */ -static int insertTerms(fulltext_vtab *v, sqlite_int64 iRowid, - sqlite3_value **pValues){ - int i; - for(i = 0; i < v->nColumn ; ++i){ - char *zText = (char*)sqlite3_value_text(pValues[i]); - int rc = buildTerms(v, iRowid, zText, i); - if( rc!=SQLITE_OK ) return rc; - } - return SQLITE_OK; -} - -/* Add empty doclists for all terms in the given row's content to -** pendingTerms. -*/ -static int deleteTerms(fulltext_vtab *v, sqlite_int64 iRowid){ - const char **pValues; - int i, rc; - - /* TODO(shess) Should we allow such tables at all? */ - if( DL_DEFAULT==DL_DOCIDS ) return SQLITE_ERROR; - - rc = content_select(v, iRowid, &pValues); - if( rc!=SQLITE_OK ) return rc; - - for(i = 0 ; i < v->nColumn; ++i) { - rc = buildTerms(v, iRowid, pValues[i], -1); - if( rc!=SQLITE_OK ) break; - } - - freeStringArray(v->nColumn, pValues); - return SQLITE_OK; -} - -/* TODO(shess) Refactor the code to remove this forward decl. */ -static int initPendingTerms(fulltext_vtab *v, sqlite_int64 iDocid); - -/* Insert a row into the %_content table; set *piRowid to be the ID of the -** new row. Add doclists for terms to pendingTerms. -*/ -static int index_insert(fulltext_vtab *v, sqlite3_value *pRequestRowid, - sqlite3_value **pValues, sqlite_int64 *piRowid){ - int rc; - - rc = content_insert(v, pRequestRowid, pValues); /* execute an SQL INSERT */ - if( rc!=SQLITE_OK ) return rc; - - *piRowid = sqlite3_last_insert_rowid(v->db); - rc = initPendingTerms(v, *piRowid); - if( rc!=SQLITE_OK ) return rc; - - return insertTerms(v, *piRowid, pValues); -} - -/* Delete a row from the %_content table; add empty doclists for terms -** to pendingTerms. -*/ -static int index_delete(fulltext_vtab *v, sqlite_int64 iRow){ - int rc = initPendingTerms(v, iRow); - if( rc!=SQLITE_OK ) return rc; - - rc = deleteTerms(v, iRow); - if( rc!=SQLITE_OK ) return rc; - - return content_delete(v, iRow); /* execute an SQL DELETE */ -} - -/* Update a row in the %_content table; add delete doclists to -** pendingTerms for old terms not in the new data, add insert doclists -** to pendingTerms for terms in the new data. -*/ -static int index_update(fulltext_vtab *v, sqlite_int64 iRow, - sqlite3_value **pValues){ - int rc = initPendingTerms(v, iRow); - if( rc!=SQLITE_OK ) return rc; - - /* Generate an empty doclist for each term that previously appeared in this - * row. */ - rc = deleteTerms(v, iRow); - if( rc!=SQLITE_OK ) return rc; - - rc = content_update(v, pValues, iRow); /* execute an SQL UPDATE */ - if( rc!=SQLITE_OK ) return rc; - - /* Now add positions for terms which appear in the updated row. */ - return insertTerms(v, iRow, pValues); -} - -/*******************************************************************/ -/* InteriorWriter is used to collect terms and block references into -** interior nodes in %_segments. See commentary at top of file for -** format. -*/ - -/* How large interior nodes can grow. */ -#define INTERIOR_MAX 2048 - -/* Minimum number of terms per interior node (except the root). This -** prevents large terms from making the tree too skinny - must be >0 -** so that the tree always makes progress. Note that the min tree -** fanout will be INTERIOR_MIN_TERMS+1. -*/ -#define INTERIOR_MIN_TERMS 7 -#if INTERIOR_MIN_TERMS<1 -# error INTERIOR_MIN_TERMS must be greater than 0. -#endif - -/* ROOT_MAX controls how much data is stored inline in the segment -** directory. -*/ -/* TODO(shess) Push ROOT_MAX down to whoever is writing things. It's -** only here so that interiorWriterRootInfo() and leafWriterRootInfo() -** can both see it, but if the caller passed it in, we wouldn't even -** need a define. -*/ -#define ROOT_MAX 1024 -#if ROOT_MAXterm, 0); - dataBufferReplace(&block->term, pTerm, nTerm); - - n = putVarint(c, iHeight); - n += putVarint(c+n, iChildBlock); - dataBufferInit(&block->data, INTERIOR_MAX); - dataBufferReplace(&block->data, c, n); - } - return block; -} - -#ifndef NDEBUG -/* Verify that the data is readable as an interior node. */ -static void interiorBlockValidate(InteriorBlock *pBlock){ - const char *pData = pBlock->data.pData; - int nData = pBlock->data.nData; - int n, iDummy; - sqlite_int64 iBlockid; - - assert( nData>0 ); - assert( pData!=0 ); - assert( pData+nData>pData ); - - /* Must lead with height of node as a varint(n), n>0 */ - n = getVarint32(pData, &iDummy); - assert( n>0 ); - assert( iDummy>0 ); - assert( n0 ); - assert( n<=nData ); - pData += n; - nData -= n; - - /* Zero or more terms of positive length */ - if( nData!=0 ){ - /* First term is not delta-encoded. */ - n = getVarint32(pData, &iDummy); - assert( n>0 ); - assert( iDummy>0 ); - assert( n+iDummy>0); - assert( n+iDummy<=nData ); - pData += n+iDummy; - nData -= n+iDummy; - - /* Following terms delta-encoded. */ - while( nData!=0 ){ - /* Length of shared prefix. */ - n = getVarint32(pData, &iDummy); - assert( n>0 ); - assert( iDummy>=0 ); - assert( n0 ); - assert( iDummy>0 ); - assert( n+iDummy>0); - assert( n+iDummy<=nData ); - pData += n+iDummy; - nData -= n+iDummy; - } - } -} -#define ASSERT_VALID_INTERIOR_BLOCK(x) interiorBlockValidate(x) -#else -#define ASSERT_VALID_INTERIOR_BLOCK(x) assert( 1 ) -#endif - -typedef struct InteriorWriter { - int iHeight; /* from 0 at leaves. */ - InteriorBlock *first, *last; - struct InteriorWriter *parentWriter; - - DataBuffer term; /* Last term written to block "last". */ - sqlite_int64 iOpeningChildBlock; /* First child block in block "last". */ -#ifndef NDEBUG - sqlite_int64 iLastChildBlock; /* for consistency checks. */ -#endif -} InteriorWriter; - -/* Initialize an interior node where pTerm[nTerm] marks the leftmost -** term in the tree. iChildBlock is the leftmost child block at the -** next level down the tree. -*/ -static void interiorWriterInit(int iHeight, const char *pTerm, int nTerm, - sqlite_int64 iChildBlock, - InteriorWriter *pWriter){ - InteriorBlock *block; - assert( iHeight>0 ); - CLEAR(pWriter); - - pWriter->iHeight = iHeight; - pWriter->iOpeningChildBlock = iChildBlock; -#ifndef NDEBUG - pWriter->iLastChildBlock = iChildBlock; -#endif - block = interiorBlockNew(iHeight, iChildBlock, pTerm, nTerm); - pWriter->last = pWriter->first = block; - ASSERT_VALID_INTERIOR_BLOCK(pWriter->last); - dataBufferInit(&pWriter->term, 0); -} - -/* Append the child node rooted at iChildBlock to the interior node, -** with pTerm[nTerm] as the leftmost term in iChildBlock's subtree. -*/ -static void interiorWriterAppend(InteriorWriter *pWriter, - const char *pTerm, int nTerm, - sqlite_int64 iChildBlock){ - char c[VARINT_MAX+VARINT_MAX]; - int n, nPrefix = 0; - - ASSERT_VALID_INTERIOR_BLOCK(pWriter->last); - - /* The first term written into an interior node is actually - ** associated with the second child added (the first child was added - ** in interiorWriterInit, or in the if clause at the bottom of this - ** function). That term gets encoded straight up, with nPrefix left - ** at 0. - */ - if( pWriter->term.nData==0 ){ - n = putVarint(c, nTerm); - }else{ - while( nPrefixterm.nData && - pTerm[nPrefix]==pWriter->term.pData[nPrefix] ){ - nPrefix++; - } - - n = putVarint(c, nPrefix); - n += putVarint(c+n, nTerm-nPrefix); - } - -#ifndef NDEBUG - pWriter->iLastChildBlock++; -#endif - assert( pWriter->iLastChildBlock==iChildBlock ); - - /* Overflow to a new block if the new term makes the current block - ** too big, and the current block already has enough terms. - */ - if( pWriter->last->data.nData+n+nTerm-nPrefix>INTERIOR_MAX && - iChildBlock-pWriter->iOpeningChildBlock>INTERIOR_MIN_TERMS ){ - pWriter->last->next = interiorBlockNew(pWriter->iHeight, iChildBlock, - pTerm, nTerm); - pWriter->last = pWriter->last->next; - pWriter->iOpeningChildBlock = iChildBlock; - dataBufferReset(&pWriter->term); - }else{ - dataBufferAppend2(&pWriter->last->data, c, n, - pTerm+nPrefix, nTerm-nPrefix); - dataBufferReplace(&pWriter->term, pTerm, nTerm); - } - ASSERT_VALID_INTERIOR_BLOCK(pWriter->last); -} - -/* Free the space used by pWriter, including the linked-list of -** InteriorBlocks, and parentWriter, if present. -*/ -static int interiorWriterDestroy(InteriorWriter *pWriter){ - InteriorBlock *block = pWriter->first; - - while( block!=NULL ){ - InteriorBlock *b = block; - block = block->next; - dataBufferDestroy(&b->term); - dataBufferDestroy(&b->data); - sqlite3_free(b); - } - if( pWriter->parentWriter!=NULL ){ - interiorWriterDestroy(pWriter->parentWriter); - sqlite3_free(pWriter->parentWriter); - } - dataBufferDestroy(&pWriter->term); - SCRAMBLE(pWriter); - return SQLITE_OK; -} - -/* If pWriter can fit entirely in ROOT_MAX, return it as the root info -** directly, leaving *piEndBlockid unchanged. Otherwise, flush -** pWriter to %_segments, building a new layer of interior nodes, and -** recursively ask for their root into. -*/ -static int interiorWriterRootInfo(fulltext_vtab *v, InteriorWriter *pWriter, - char **ppRootInfo, int *pnRootInfo, - sqlite_int64 *piEndBlockid){ - InteriorBlock *block = pWriter->first; - sqlite_int64 iBlockid = 0; - int rc; - - /* If we can fit the segment inline */ - if( block==pWriter->last && block->data.nDatadata.pData; - *pnRootInfo = block->data.nData; - return SQLITE_OK; - } - - /* Flush the first block to %_segments, and create a new level of - ** interior node. - */ - ASSERT_VALID_INTERIOR_BLOCK(block); - rc = block_insert(v, block->data.pData, block->data.nData, &iBlockid); - if( rc!=SQLITE_OK ) return rc; - *piEndBlockid = iBlockid; - - pWriter->parentWriter = sqlite3_malloc(sizeof(*pWriter->parentWriter)); - interiorWriterInit(pWriter->iHeight+1, - block->term.pData, block->term.nData, - iBlockid, pWriter->parentWriter); - - /* Flush additional blocks and append to the higher interior - ** node. - */ - for(block=block->next; block!=NULL; block=block->next){ - ASSERT_VALID_INTERIOR_BLOCK(block); - rc = block_insert(v, block->data.pData, block->data.nData, &iBlockid); - if( rc!=SQLITE_OK ) return rc; - *piEndBlockid = iBlockid; - - interiorWriterAppend(pWriter->parentWriter, - block->term.pData, block->term.nData, iBlockid); - } - - /* Parent node gets the chance to be the root. */ - return interiorWriterRootInfo(v, pWriter->parentWriter, - ppRootInfo, pnRootInfo, piEndBlockid); -} - -/****************************************************************/ -/* InteriorReader is used to read off the data from an interior node -** (see comment at top of file for the format). -*/ -typedef struct InteriorReader { - const char *pData; - int nData; - - DataBuffer term; /* previous term, for decoding term delta. */ - - sqlite_int64 iBlockid; -} InteriorReader; - -static void interiorReaderDestroy(InteriorReader *pReader){ - dataBufferDestroy(&pReader->term); - SCRAMBLE(pReader); -} - -/* TODO(shess) The assertions are great, but what if we're in NDEBUG -** and the blob is empty or otherwise contains suspect data? -*/ -static void interiorReaderInit(const char *pData, int nData, - InteriorReader *pReader){ - int n, nTerm; - - /* Require at least the leading flag byte */ - assert( nData>0 ); - assert( pData[0]!='\0' ); - - CLEAR(pReader); - - /* Decode the base blockid, and set the cursor to the first term. */ - n = getVarint(pData+1, &pReader->iBlockid); - assert( 1+n<=nData ); - pReader->pData = pData+1+n; - pReader->nData = nData-(1+n); - - /* A single-child interior node (such as when a leaf node was too - ** large for the segment directory) won't have any terms. - ** Otherwise, decode the first term. - */ - if( pReader->nData==0 ){ - dataBufferInit(&pReader->term, 0); - }else{ - n = getVarint32(pReader->pData, &nTerm); - dataBufferInit(&pReader->term, nTerm); - dataBufferReplace(&pReader->term, pReader->pData+n, nTerm); - assert( n+nTerm<=pReader->nData ); - pReader->pData += n+nTerm; - pReader->nData -= n+nTerm; - } -} - -static int interiorReaderAtEnd(InteriorReader *pReader){ - return pReader->term.nData==0; -} - -static sqlite_int64 interiorReaderCurrentBlockid(InteriorReader *pReader){ - return pReader->iBlockid; -} - -static int interiorReaderTermBytes(InteriorReader *pReader){ - assert( !interiorReaderAtEnd(pReader) ); - return pReader->term.nData; -} -static const char *interiorReaderTerm(InteriorReader *pReader){ - assert( !interiorReaderAtEnd(pReader) ); - return pReader->term.pData; -} - -/* Step forward to the next term in the node. */ -static void interiorReaderStep(InteriorReader *pReader){ - assert( !interiorReaderAtEnd(pReader) ); - - /* If the last term has been read, signal eof, else construct the - ** next term. - */ - if( pReader->nData==0 ){ - dataBufferReset(&pReader->term); - }else{ - int n, nPrefix, nSuffix; - - n = getVarint32(pReader->pData, &nPrefix); - n += getVarint32(pReader->pData+n, &nSuffix); - - /* Truncate the current term and append suffix data. */ - pReader->term.nData = nPrefix; - dataBufferAppend(&pReader->term, pReader->pData+n, nSuffix); - - assert( n+nSuffix<=pReader->nData ); - pReader->pData += n+nSuffix; - pReader->nData -= n+nSuffix; - } - pReader->iBlockid++; -} - -/* Compare the current term to pTerm[nTerm], returning strcmp-style -** results. If isPrefix, equality means equal through nTerm bytes. -*/ -static int interiorReaderTermCmp(InteriorReader *pReader, - const char *pTerm, int nTerm, int isPrefix){ - const char *pReaderTerm = interiorReaderTerm(pReader); - int nReaderTerm = interiorReaderTermBytes(pReader); - int c, n = nReaderTerm0 ) return -1; - if( nTerm>0 ) return 1; - return 0; - } - - c = memcmp(pReaderTerm, pTerm, n); - if( c!=0 ) return c; - if( isPrefix && n==nTerm ) return 0; - return nReaderTerm - nTerm; -} - -/****************************************************************/ -/* LeafWriter is used to collect terms and associated doclist data -** into leaf blocks in %_segments (see top of file for format info). -** Expected usage is: -** -** LeafWriter writer; -** leafWriterInit(0, 0, &writer); -** while( sorted_terms_left_to_process ){ -** // data is doclist data for that term. -** rc = leafWriterStep(v, &writer, pTerm, nTerm, pData, nData); -** if( rc!=SQLITE_OK ) goto err; -** } -** rc = leafWriterFinalize(v, &writer); -**err: -** leafWriterDestroy(&writer); -** return rc; -** -** leafWriterStep() may write a collected leaf out to %_segments. -** leafWriterFinalize() finishes writing any buffered data and stores -** a root node in %_segdir. leafWriterDestroy() frees all buffers and -** InteriorWriters allocated as part of writing this segment. -** -** TODO(shess) Document leafWriterStepMerge(). -*/ - -/* Put terms with data this big in their own block. */ -#define STANDALONE_MIN 1024 - -/* Keep leaf blocks below this size. */ -#define LEAF_MAX 2048 - -typedef struct LeafWriter { - int iLevel; - int idx; - sqlite_int64 iStartBlockid; /* needed to create the root info */ - sqlite_int64 iEndBlockid; /* when we're done writing. */ - - DataBuffer term; /* previous encoded term */ - DataBuffer data; /* encoding buffer */ - - /* bytes of first term in the current node which distinguishes that - ** term from the last term of the previous node. - */ - int nTermDistinct; - - InteriorWriter parentWriter; /* if we overflow */ - int has_parent; -} LeafWriter; - -static void leafWriterInit(int iLevel, int idx, LeafWriter *pWriter){ - CLEAR(pWriter); - pWriter->iLevel = iLevel; - pWriter->idx = idx; - - dataBufferInit(&pWriter->term, 32); - - /* Start out with a reasonably sized block, though it can grow. */ - dataBufferInit(&pWriter->data, LEAF_MAX); -} - -#ifndef NDEBUG -/* Verify that the data is readable as a leaf node. */ -static void leafNodeValidate(const char *pData, int nData){ - int n, iDummy; - - if( nData==0 ) return; - assert( nData>0 ); - assert( pData!=0 ); - assert( pData+nData>pData ); - - /* Must lead with a varint(0) */ - n = getVarint32(pData, &iDummy); - assert( iDummy==0 ); - assert( n>0 ); - assert( n0 ); - assert( iDummy>0 ); - assert( n+iDummy>0 ); - assert( n+iDummy0 ); - assert( iDummy>0 ); - assert( n+iDummy>0 ); - assert( n+iDummy<=nData ); - ASSERT_VALID_DOCLIST(DL_DEFAULT, pData+n, iDummy, NULL); - pData += n+iDummy; - nData -= n+iDummy; - - /* Verify that trailing terms and doclists also are readable. */ - while( nData!=0 ){ - n = getVarint32(pData, &iDummy); - assert( n>0 ); - assert( iDummy>=0 ); - assert( n0 ); - assert( iDummy>0 ); - assert( n+iDummy>0 ); - assert( n+iDummy0 ); - assert( iDummy>0 ); - assert( n+iDummy>0 ); - assert( n+iDummy<=nData ); - ASSERT_VALID_DOCLIST(DL_DEFAULT, pData+n, iDummy, NULL); - pData += n+iDummy; - nData -= n+iDummy; - } -} -#define ASSERT_VALID_LEAF_NODE(p, n) leafNodeValidate(p, n) -#else -#define ASSERT_VALID_LEAF_NODE(p, n) assert( 1 ) -#endif - -/* Flush the current leaf node to %_segments, and adding the resulting -** blockid and the starting term to the interior node which will -** contain it. -*/ -static int leafWriterInternalFlush(fulltext_vtab *v, LeafWriter *pWriter, - int iData, int nData){ - sqlite_int64 iBlockid = 0; - const char *pStartingTerm; - int nStartingTerm, rc, n; - - /* Must have the leading varint(0) flag, plus at least some - ** valid-looking data. - */ - assert( nData>2 ); - assert( iData>=0 ); - assert( iData+nData<=pWriter->data.nData ); - ASSERT_VALID_LEAF_NODE(pWriter->data.pData+iData, nData); - - rc = block_insert(v, pWriter->data.pData+iData, nData, &iBlockid); - if( rc!=SQLITE_OK ) return rc; - assert( iBlockid!=0 ); - - /* Reconstruct the first term in the leaf for purposes of building - ** the interior node. - */ - n = getVarint32(pWriter->data.pData+iData+1, &nStartingTerm); - pStartingTerm = pWriter->data.pData+iData+1+n; - assert( pWriter->data.nData>iData+1+n+nStartingTerm ); - assert( pWriter->nTermDistinct>0 ); - assert( pWriter->nTermDistinct<=nStartingTerm ); - nStartingTerm = pWriter->nTermDistinct; - - if( pWriter->has_parent ){ - interiorWriterAppend(&pWriter->parentWriter, - pStartingTerm, nStartingTerm, iBlockid); - }else{ - interiorWriterInit(1, pStartingTerm, nStartingTerm, iBlockid, - &pWriter->parentWriter); - pWriter->has_parent = 1; - } - - /* Track the span of this segment's leaf nodes. */ - if( pWriter->iEndBlockid==0 ){ - pWriter->iEndBlockid = pWriter->iStartBlockid = iBlockid; - }else{ - pWriter->iEndBlockid++; - assert( iBlockid==pWriter->iEndBlockid ); - } - - return SQLITE_OK; -} -static int leafWriterFlush(fulltext_vtab *v, LeafWriter *pWriter){ - int rc = leafWriterInternalFlush(v, pWriter, 0, pWriter->data.nData); - if( rc!=SQLITE_OK ) return rc; - - /* Re-initialize the output buffer. */ - dataBufferReset(&pWriter->data); - - return SQLITE_OK; -} - -/* Fetch the root info for the segment. If the entire leaf fits -** within ROOT_MAX, then it will be returned directly, otherwise it -** will be flushed and the root info will be returned from the -** interior node. *piEndBlockid is set to the blockid of the last -** interior or leaf node written to disk (0 if none are written at -** all). -*/ -static int leafWriterRootInfo(fulltext_vtab *v, LeafWriter *pWriter, - char **ppRootInfo, int *pnRootInfo, - sqlite_int64 *piEndBlockid){ - /* we can fit the segment entirely inline */ - if( !pWriter->has_parent && pWriter->data.nDatadata.pData; - *pnRootInfo = pWriter->data.nData; - *piEndBlockid = 0; - return SQLITE_OK; - } - - /* Flush remaining leaf data. */ - if( pWriter->data.nData>0 ){ - int rc = leafWriterFlush(v, pWriter); - if( rc!=SQLITE_OK ) return rc; - } - - /* We must have flushed a leaf at some point. */ - assert( pWriter->has_parent ); - - /* Tenatively set the end leaf blockid as the end blockid. If the - ** interior node can be returned inline, this will be the final - ** blockid, otherwise it will be overwritten by - ** interiorWriterRootInfo(). - */ - *piEndBlockid = pWriter->iEndBlockid; - - return interiorWriterRootInfo(v, &pWriter->parentWriter, - ppRootInfo, pnRootInfo, piEndBlockid); -} - -/* Collect the rootInfo data and store it into the segment directory. -** This has the effect of flushing the segment's leaf data to -** %_segments, and also flushing any interior nodes to %_segments. -*/ -static int leafWriterFinalize(fulltext_vtab *v, LeafWriter *pWriter){ - sqlite_int64 iEndBlockid; - char *pRootInfo; - int rc, nRootInfo; - - rc = leafWriterRootInfo(v, pWriter, &pRootInfo, &nRootInfo, &iEndBlockid); - if( rc!=SQLITE_OK ) return rc; - - /* Don't bother storing an entirely empty segment. */ - if( iEndBlockid==0 && nRootInfo==0 ) return SQLITE_OK; - - return segdir_set(v, pWriter->iLevel, pWriter->idx, - pWriter->iStartBlockid, pWriter->iEndBlockid, - iEndBlockid, pRootInfo, nRootInfo); -} - -static void leafWriterDestroy(LeafWriter *pWriter){ - if( pWriter->has_parent ) interiorWriterDestroy(&pWriter->parentWriter); - dataBufferDestroy(&pWriter->term); - dataBufferDestroy(&pWriter->data); -} - -/* Encode a term into the leafWriter, delta-encoding as appropriate. -** Returns the length of the new term which distinguishes it from the -** previous term, which can be used to set nTermDistinct when a node -** boundary is crossed. -*/ -static int leafWriterEncodeTerm(LeafWriter *pWriter, - const char *pTerm, int nTerm){ - char c[VARINT_MAX+VARINT_MAX]; - int n, nPrefix = 0; - - assert( nTerm>0 ); - while( nPrefixterm.nData && - pTerm[nPrefix]==pWriter->term.pData[nPrefix] ){ - nPrefix++; - /* Failing this implies that the terms weren't in order. */ - assert( nPrefixdata.nData==0 ){ - /* Encode the node header and leading term as: - ** varint(0) - ** varint(nTerm) - ** char pTerm[nTerm] - */ - n = putVarint(c, '\0'); - n += putVarint(c+n, nTerm); - dataBufferAppend2(&pWriter->data, c, n, pTerm, nTerm); - }else{ - /* Delta-encode the term as: - ** varint(nPrefix) - ** varint(nSuffix) - ** char pTermSuffix[nSuffix] - */ - n = putVarint(c, nPrefix); - n += putVarint(c+n, nTerm-nPrefix); - dataBufferAppend2(&pWriter->data, c, n, pTerm+nPrefix, nTerm-nPrefix); - } - dataBufferReplace(&pWriter->term, pTerm, nTerm); - - return nPrefix+1; -} - -/* Used to avoid a memmove when a large amount of doclist data is in -** the buffer. This constructs a node and term header before -** iDoclistData and flushes the resulting complete node using -** leafWriterInternalFlush(). -*/ -static int leafWriterInlineFlush(fulltext_vtab *v, LeafWriter *pWriter, - const char *pTerm, int nTerm, - int iDoclistData){ - char c[VARINT_MAX+VARINT_MAX]; - int iData, n = putVarint(c, 0); - n += putVarint(c+n, nTerm); - - /* There should always be room for the header. Even if pTerm shared - ** a substantial prefix with the previous term, the entire prefix - ** could be constructed from earlier data in the doclist, so there - ** should be room. - */ - assert( iDoclistData>=n+nTerm ); - - iData = iDoclistData-(n+nTerm); - memcpy(pWriter->data.pData+iData, c, n); - memcpy(pWriter->data.pData+iData+n, pTerm, nTerm); - - return leafWriterInternalFlush(v, pWriter, iData, pWriter->data.nData-iData); -} - -/* Push pTerm[nTerm] along with the doclist data to the leaf layer of -** %_segments. -*/ -static int leafWriterStepMerge(fulltext_vtab *v, LeafWriter *pWriter, - const char *pTerm, int nTerm, - DLReader *pReaders, int nReaders){ - char c[VARINT_MAX+VARINT_MAX]; - int iTermData = pWriter->data.nData, iDoclistData; - int i, nData, n, nActualData, nActual, rc, nTermDistinct; - - ASSERT_VALID_LEAF_NODE(pWriter->data.pData, pWriter->data.nData); - nTermDistinct = leafWriterEncodeTerm(pWriter, pTerm, nTerm); - - /* Remember nTermDistinct if opening a new node. */ - if( iTermData==0 ) pWriter->nTermDistinct = nTermDistinct; - - iDoclistData = pWriter->data.nData; - - /* Estimate the length of the merged doclist so we can leave space - ** to encode it. - */ - for(i=0, nData=0; idata, c, n); - - docListMerge(&pWriter->data, pReaders, nReaders); - ASSERT_VALID_DOCLIST(DL_DEFAULT, - pWriter->data.pData+iDoclistData+n, - pWriter->data.nData-iDoclistData-n, NULL); - - /* The actual amount of doclist data at this point could be smaller - ** than the length we encoded. Additionally, the space required to - ** encode this length could be smaller. For small doclists, this is - ** not a big deal, we can just use memmove() to adjust things. - */ - nActualData = pWriter->data.nData-(iDoclistData+n); - nActual = putVarint(c, nActualData); - assert( nActualData<=nData ); - assert( nActual<=n ); - - /* If the new doclist is big enough for force a standalone leaf - ** node, we can immediately flush it inline without doing the - ** memmove(). - */ - /* TODO(shess) This test matches leafWriterStep(), which does this - ** test before it knows the cost to varint-encode the term and - ** doclist lengths. At some point, change to - ** pWriter->data.nData-iTermData>STANDALONE_MIN. - */ - if( nTerm+nActualData>STANDALONE_MIN ){ - /* Push leaf node from before this term. */ - if( iTermData>0 ){ - rc = leafWriterInternalFlush(v, pWriter, 0, iTermData); - if( rc!=SQLITE_OK ) return rc; - - pWriter->nTermDistinct = nTermDistinct; - } - - /* Fix the encoded doclist length. */ - iDoclistData += n - nActual; - memcpy(pWriter->data.pData+iDoclistData, c, nActual); - - /* Push the standalone leaf node. */ - rc = leafWriterInlineFlush(v, pWriter, pTerm, nTerm, iDoclistData); - if( rc!=SQLITE_OK ) return rc; - - /* Leave the node empty. */ - dataBufferReset(&pWriter->data); - - return rc; - } - - /* At this point, we know that the doclist was small, so do the - ** memmove if indicated. - */ - if( nActualdata.pData+iDoclistData+nActual, - pWriter->data.pData+iDoclistData+n, - pWriter->data.nData-(iDoclistData+n)); - pWriter->data.nData -= n-nActual; - } - - /* Replace written length with actual length. */ - memcpy(pWriter->data.pData+iDoclistData, c, nActual); - - /* If the node is too large, break things up. */ - /* TODO(shess) This test matches leafWriterStep(), which does this - ** test before it knows the cost to varint-encode the term and - ** doclist lengths. At some point, change to - ** pWriter->data.nData>LEAF_MAX. - */ - if( iTermData+nTerm+nActualData>LEAF_MAX ){ - /* Flush out the leading data as a node */ - rc = leafWriterInternalFlush(v, pWriter, 0, iTermData); - if( rc!=SQLITE_OK ) return rc; - - pWriter->nTermDistinct = nTermDistinct; - - /* Rebuild header using the current term */ - n = putVarint(pWriter->data.pData, 0); - n += putVarint(pWriter->data.pData+n, nTerm); - memcpy(pWriter->data.pData+n, pTerm, nTerm); - n += nTerm; - - /* There should always be room, because the previous encoding - ** included all data necessary to construct the term. - */ - assert( ndata.nData-iDoclistDatadata.pData+n, - pWriter->data.pData+iDoclistData, - pWriter->data.nData-iDoclistData); - pWriter->data.nData -= iDoclistData-n; - } - ASSERT_VALID_LEAF_NODE(pWriter->data.pData, pWriter->data.nData); - - return SQLITE_OK; -} - -/* Push pTerm[nTerm] along with the doclist data to the leaf layer of -** %_segments. -*/ -/* TODO(shess) Revise writeZeroSegment() so that doclists are -** constructed directly in pWriter->data. -*/ -static int leafWriterStep(fulltext_vtab *v, LeafWriter *pWriter, - const char *pTerm, int nTerm, - const char *pData, int nData){ - int rc; - DLReader reader; - - dlrInit(&reader, DL_DEFAULT, pData, nData); - rc = leafWriterStepMerge(v, pWriter, pTerm, nTerm, &reader, 1); - dlrDestroy(&reader); - - return rc; -} - - -/****************************************************************/ -/* LeafReader is used to iterate over an individual leaf node. */ -typedef struct LeafReader { - DataBuffer term; /* copy of current term. */ - - const char *pData; /* data for current term. */ - int nData; -} LeafReader; - -static void leafReaderDestroy(LeafReader *pReader){ - dataBufferDestroy(&pReader->term); - SCRAMBLE(pReader); -} - -static int leafReaderAtEnd(LeafReader *pReader){ - return pReader->nData<=0; -} - -/* Access the current term. */ -static int leafReaderTermBytes(LeafReader *pReader){ - return pReader->term.nData; -} -static const char *leafReaderTerm(LeafReader *pReader){ - assert( pReader->term.nData>0 ); - return pReader->term.pData; -} - -/* Access the doclist data for the current term. */ -static int leafReaderDataBytes(LeafReader *pReader){ - int nData; - assert( pReader->term.nData>0 ); - getVarint32(pReader->pData, &nData); - return nData; -} -static const char *leafReaderData(LeafReader *pReader){ - int n, nData; - assert( pReader->term.nData>0 ); - n = getVarint32(pReader->pData, &nData); - return pReader->pData+n; -} - -static void leafReaderInit(const char *pData, int nData, - LeafReader *pReader){ - int nTerm, n; - - assert( nData>0 ); - assert( pData[0]=='\0' ); - - CLEAR(pReader); - - /* Read the first term, skipping the header byte. */ - n = getVarint32(pData+1, &nTerm); - dataBufferInit(&pReader->term, nTerm); - dataBufferReplace(&pReader->term, pData+1+n, nTerm); - - /* Position after the first term. */ - assert( 1+n+nTermpData = pData+1+n+nTerm; - pReader->nData = nData-1-n-nTerm; -} - -/* Step the reader forward to the next term. */ -static void leafReaderStep(LeafReader *pReader){ - int n, nData, nPrefix, nSuffix; - assert( !leafReaderAtEnd(pReader) ); - - /* Skip previous entry's data block. */ - n = getVarint32(pReader->pData, &nData); - assert( n+nData<=pReader->nData ); - pReader->pData += n+nData; - pReader->nData -= n+nData; - - if( !leafReaderAtEnd(pReader) ){ - /* Construct the new term using a prefix from the old term plus a - ** suffix from the leaf data. - */ - n = getVarint32(pReader->pData, &nPrefix); - n += getVarint32(pReader->pData+n, &nSuffix); - assert( n+nSuffixnData ); - pReader->term.nData = nPrefix; - dataBufferAppend(&pReader->term, pReader->pData+n, nSuffix); - - pReader->pData += n+nSuffix; - pReader->nData -= n+nSuffix; - } -} - -/* strcmp-style comparison of pReader's current term against pTerm. -** If isPrefix, equality means equal through nTerm bytes. -*/ -static int leafReaderTermCmp(LeafReader *pReader, - const char *pTerm, int nTerm, int isPrefix){ - int c, n = pReader->term.nDataterm.nData : nTerm; - if( n==0 ){ - if( pReader->term.nData>0 ) return -1; - if(nTerm>0 ) return 1; - return 0; - } - - c = memcmp(pReader->term.pData, pTerm, n); - if( c!=0 ) return c; - if( isPrefix && n==nTerm ) return 0; - return pReader->term.nData - nTerm; -} - - -/****************************************************************/ -/* LeavesReader wraps LeafReader to allow iterating over the entire -** leaf layer of the tree. -*/ -typedef struct LeavesReader { - int idx; /* Index within the segment. */ - - sqlite3_stmt *pStmt; /* Statement we're streaming leaves from. */ - int eof; /* we've seen SQLITE_DONE from pStmt. */ - - LeafReader leafReader; /* reader for the current leaf. */ - DataBuffer rootData; /* root data for inline. */ -} LeavesReader; - -/* Access the current term. */ -static int leavesReaderTermBytes(LeavesReader *pReader){ - assert( !pReader->eof ); - return leafReaderTermBytes(&pReader->leafReader); -} -static const char *leavesReaderTerm(LeavesReader *pReader){ - assert( !pReader->eof ); - return leafReaderTerm(&pReader->leafReader); -} - -/* Access the doclist data for the current term. */ -static int leavesReaderDataBytes(LeavesReader *pReader){ - assert( !pReader->eof ); - return leafReaderDataBytes(&pReader->leafReader); -} -static const char *leavesReaderData(LeavesReader *pReader){ - assert( !pReader->eof ); - return leafReaderData(&pReader->leafReader); -} - -static int leavesReaderAtEnd(LeavesReader *pReader){ - return pReader->eof; -} - -/* loadSegmentLeaves() may not read all the way to SQLITE_DONE, thus -** leaving the statement handle open, which locks the table. -*/ -/* TODO(shess) This "solution" is not satisfactory. Really, there -** should be check-in function for all statement handles which -** arranges to call sqlite3_reset(). This most likely will require -** modification to control flow all over the place, though, so for now -** just punt. -** -** Note the current system assumes that segment merges will run to -** completion, which is why this particular probably hasn't arisen in -** this case. Probably a brittle assumption. -*/ -static int leavesReaderReset(LeavesReader *pReader){ - return sqlite3_reset(pReader->pStmt); -} - -static void leavesReaderDestroy(LeavesReader *pReader){ - /* If idx is -1, that means we're using a non-cached statement - ** handle in the optimize() case, so we need to release it. - */ - if( pReader->pStmt!=NULL && pReader->idx==-1 ){ - sqlite3_finalize(pReader->pStmt); - } - leafReaderDestroy(&pReader->leafReader); - dataBufferDestroy(&pReader->rootData); - SCRAMBLE(pReader); -} - -/* Initialize pReader with the given root data (if iStartBlockid==0 -** the leaf data was entirely contained in the root), or from the -** stream of blocks between iStartBlockid and iEndBlockid, inclusive. -*/ -static int leavesReaderInit(fulltext_vtab *v, - int idx, - sqlite_int64 iStartBlockid, - sqlite_int64 iEndBlockid, - const char *pRootData, int nRootData, - LeavesReader *pReader){ - CLEAR(pReader); - pReader->idx = idx; - - dataBufferInit(&pReader->rootData, 0); - if( iStartBlockid==0 ){ - /* Entire leaf level fit in root data. */ - dataBufferReplace(&pReader->rootData, pRootData, nRootData); - leafReaderInit(pReader->rootData.pData, pReader->rootData.nData, - &pReader->leafReader); - }else{ - sqlite3_stmt *s; - int rc = sql_get_leaf_statement(v, idx, &s); - if( rc!=SQLITE_OK ) return rc; - - rc = sqlite3_bind_int64(s, 1, iStartBlockid); - if( rc!=SQLITE_OK ) return rc; - - rc = sqlite3_bind_int64(s, 2, iEndBlockid); - if( rc!=SQLITE_OK ) return rc; - - rc = sqlite3_step(s); - if( rc==SQLITE_DONE ){ - pReader->eof = 1; - return SQLITE_OK; - } - if( rc!=SQLITE_ROW ) return rc; - - pReader->pStmt = s; - leafReaderInit(sqlite3_column_blob(pReader->pStmt, 0), - sqlite3_column_bytes(pReader->pStmt, 0), - &pReader->leafReader); - } - return SQLITE_OK; -} - -/* Step the current leaf forward to the next term. If we reach the -** end of the current leaf, step forward to the next leaf block. -*/ -static int leavesReaderStep(fulltext_vtab *v, LeavesReader *pReader){ - assert( !leavesReaderAtEnd(pReader) ); - leafReaderStep(&pReader->leafReader); - - if( leafReaderAtEnd(&pReader->leafReader) ){ - int rc; - if( pReader->rootData.pData ){ - pReader->eof = 1; - return SQLITE_OK; - } - rc = sqlite3_step(pReader->pStmt); - if( rc!=SQLITE_ROW ){ - pReader->eof = 1; - return rc==SQLITE_DONE ? SQLITE_OK : rc; - } - leafReaderDestroy(&pReader->leafReader); - leafReaderInit(sqlite3_column_blob(pReader->pStmt, 0), - sqlite3_column_bytes(pReader->pStmt, 0), - &pReader->leafReader); - } - return SQLITE_OK; -} - -/* Order LeavesReaders by their term, ignoring idx. Readers at eof -** always sort to the end. -*/ -static int leavesReaderTermCmp(LeavesReader *lr1, LeavesReader *lr2){ - if( leavesReaderAtEnd(lr1) ){ - if( leavesReaderAtEnd(lr2) ) return 0; - return 1; - } - if( leavesReaderAtEnd(lr2) ) return -1; - - return leafReaderTermCmp(&lr1->leafReader, - leavesReaderTerm(lr2), leavesReaderTermBytes(lr2), - 0); -} - -/* Similar to leavesReaderTermCmp(), with additional ordering by idx -** so that older segments sort before newer segments. -*/ -static int leavesReaderCmp(LeavesReader *lr1, LeavesReader *lr2){ - int c = leavesReaderTermCmp(lr1, lr2); - if( c!=0 ) return c; - return lr1->idx-lr2->idx; -} - -/* Assume that pLr[1]..pLr[nLr] are sorted. Bubble pLr[0] into its -** sorted position. -*/ -static void leavesReaderReorder(LeavesReader *pLr, int nLr){ - while( nLr>1 && leavesReaderCmp(pLr, pLr+1)>0 ){ - LeavesReader tmp = pLr[0]; - pLr[0] = pLr[1]; - pLr[1] = tmp; - nLr--; - pLr++; - } -} - -/* Initializes pReaders with the segments from level iLevel, returning -** the number of segments in *piReaders. Leaves pReaders in sorted -** order. -*/ -static int leavesReadersInit(fulltext_vtab *v, int iLevel, - LeavesReader *pReaders, int *piReaders){ - sqlite3_stmt *s; - int i, rc = sql_get_statement(v, SEGDIR_SELECT_LEVEL_STMT, &s); - if( rc!=SQLITE_OK ) return rc; - - rc = sqlite3_bind_int(s, 1, iLevel); - if( rc!=SQLITE_OK ) return rc; - - i = 0; - while( (rc = sqlite3_step(s))==SQLITE_ROW ){ - sqlite_int64 iStart = sqlite3_column_int64(s, 0); - sqlite_int64 iEnd = sqlite3_column_int64(s, 1); - const char *pRootData = sqlite3_column_blob(s, 2); - int nRootData = sqlite3_column_bytes(s, 2); - - assert( i0 ){ - leavesReaderDestroy(&pReaders[i]); - } - return rc; - } - - *piReaders = i; - - /* Leave our results sorted by term, then age. */ - while( i-- ){ - leavesReaderReorder(pReaders+i, *piReaders-i); - } - return SQLITE_OK; -} - -/* Merge doclists from pReaders[nReaders] into a single doclist, which -** is written to pWriter. Assumes pReaders is ordered oldest to -** newest. -*/ -/* TODO(shess) Consider putting this inline in segmentMerge(). */ -static int leavesReadersMerge(fulltext_vtab *v, - LeavesReader *pReaders, int nReaders, - LeafWriter *pWriter){ - DLReader dlReaders[MERGE_COUNT]; - const char *pTerm = leavesReaderTerm(pReaders); - int i, nTerm = leavesReaderTermBytes(pReaders); - - assert( nReaders<=MERGE_COUNT ); - - for(i=0; i0 ){ - rc = leavesReaderStep(v, lrs+i); - if( rc!=SQLITE_OK ) goto err; - - /* Reorder by term, then by age. */ - leavesReaderReorder(lrs+i, MERGE_COUNT-i); - } - } - - for(i=0; i0 ); - - for(rc=SQLITE_OK; rc==SQLITE_OK && !leavesReaderAtEnd(pReader); - rc=leavesReaderStep(v, pReader)){ - /* TODO(shess) Really want leavesReaderTermCmp(), but that name is - ** already taken to compare the terms of two LeavesReaders. Think - ** on a better name. [Meanwhile, break encapsulation rather than - ** use a confusing name.] - */ - int c = leafReaderTermCmp(&pReader->leafReader, pTerm, nTerm, isPrefix); - if( c>0 ) break; /* Past any possible matches. */ - if( c==0 ){ - const char *pData = leavesReaderData(pReader); - int iBuffer, nData = leavesReaderDataBytes(pReader); - - /* Find the first empty buffer. */ - for(iBuffer=0; iBuffer0 ){ - assert(pBuffers!=NULL); - memcpy(p, pBuffers, nBuffers*sizeof(*pBuffers)); - sqlite3_free(pBuffers); - } - pBuffers = p; - } - dataBufferInit(&(pBuffers[nBuffers]), 0); - nBuffers++; - } - - /* At this point, must have an empty at iBuffer. */ - assert(iBufferpData, p->nData); - - /* dataBufferReset() could allow a large doclist to blow up - ** our memory requirements. - */ - if( p->nCapacity<1024 ){ - dataBufferReset(p); - }else{ - dataBufferDestroy(p); - dataBufferInit(p, 0); - } - } - } - } - } - - /* Union all the doclists together into *out. */ - /* TODO(shess) What if *out is big? Sigh. */ - if( rc==SQLITE_OK && nBuffers>0 ){ - int iBuffer; - for(iBuffer=0; iBuffer0 ){ - if( out->nData==0 ){ - dataBufferSwap(out, &(pBuffers[iBuffer])); - }else{ - docListAccumulateUnion(out, pBuffers[iBuffer].pData, - pBuffers[iBuffer].nData); - } - } - } - } - - while( nBuffers-- ){ - dataBufferDestroy(&(pBuffers[nBuffers])); - } - if( pBuffers!=NULL ) sqlite3_free(pBuffers); - - return rc; -} - -/* Call loadSegmentLeavesInt() with pData/nData as input. */ -static int loadSegmentLeaf(fulltext_vtab *v, const char *pData, int nData, - const char *pTerm, int nTerm, int isPrefix, - DataBuffer *out){ - LeavesReader reader; - int rc; - - assert( nData>1 ); - assert( *pData=='\0' ); - rc = leavesReaderInit(v, 0, 0, 0, pData, nData, &reader); - if( rc!=SQLITE_OK ) return rc; - - rc = loadSegmentLeavesInt(v, &reader, pTerm, nTerm, isPrefix, out); - leavesReaderReset(&reader); - leavesReaderDestroy(&reader); - return rc; -} - -/* Call loadSegmentLeavesInt() with the leaf nodes from iStartLeaf to -** iEndLeaf (inclusive) as input, and merge the resulting doclist into -** out. -*/ -static int loadSegmentLeaves(fulltext_vtab *v, - sqlite_int64 iStartLeaf, sqlite_int64 iEndLeaf, - const char *pTerm, int nTerm, int isPrefix, - DataBuffer *out){ - int rc; - LeavesReader reader; - - assert( iStartLeaf<=iEndLeaf ); - rc = leavesReaderInit(v, 0, iStartLeaf, iEndLeaf, NULL, 0, &reader); - if( rc!=SQLITE_OK ) return rc; - - rc = loadSegmentLeavesInt(v, &reader, pTerm, nTerm, isPrefix, out); - leavesReaderReset(&reader); - leavesReaderDestroy(&reader); - return rc; -} - -/* Taking pData/nData as an interior node, find the sequence of child -** nodes which could include pTerm/nTerm/isPrefix. Note that the -** interior node terms logically come between the blocks, so there is -** one more blockid than there are terms (that block contains terms >= -** the last interior-node term). -*/ -/* TODO(shess) The calling code may already know that the end child is -** not worth calculating, because the end may be in a later sibling -** node. Consider whether breaking symmetry is worthwhile. I suspect -** it is not worthwhile. -*/ -static void getChildrenContaining(const char *pData, int nData, - const char *pTerm, int nTerm, int isPrefix, - sqlite_int64 *piStartChild, - sqlite_int64 *piEndChild){ - InteriorReader reader; - - assert( nData>1 ); - assert( *pData!='\0' ); - interiorReaderInit(pData, nData, &reader); - - /* Scan for the first child which could contain pTerm/nTerm. */ - while( !interiorReaderAtEnd(&reader) ){ - if( interiorReaderTermCmp(&reader, pTerm, nTerm, 0)>0 ) break; - interiorReaderStep(&reader); - } - *piStartChild = interiorReaderCurrentBlockid(&reader); - - /* Keep scanning to find a term greater than our term, using prefix - ** comparison if indicated. If isPrefix is false, this will be the - ** same blockid as the starting block. - */ - while( !interiorReaderAtEnd(&reader) ){ - if( interiorReaderTermCmp(&reader, pTerm, nTerm, isPrefix)>0 ) break; - interiorReaderStep(&reader); - } - *piEndChild = interiorReaderCurrentBlockid(&reader); - - interiorReaderDestroy(&reader); - - /* Children must ascend, and if !prefix, both must be the same. */ - assert( *piEndChild>=*piStartChild ); - assert( isPrefix || *piStartChild==*piEndChild ); -} - -/* Read block at iBlockid and pass it with other params to -** getChildrenContaining(). -*/ -static int loadAndGetChildrenContaining( - fulltext_vtab *v, - sqlite_int64 iBlockid, - const char *pTerm, int nTerm, int isPrefix, - sqlite_int64 *piStartChild, sqlite_int64 *piEndChild -){ - sqlite3_stmt *s = NULL; - int rc; - - assert( iBlockid!=0 ); - assert( pTerm!=NULL ); - assert( nTerm!=0 ); /* TODO(shess) Why not allow this? */ - assert( piStartChild!=NULL ); - assert( piEndChild!=NULL ); - - rc = sql_get_statement(v, BLOCK_SELECT_STMT, &s); - if( rc!=SQLITE_OK ) return rc; - - rc = sqlite3_bind_int64(s, 1, iBlockid); - if( rc!=SQLITE_OK ) return rc; - - rc = sqlite3_step(s); - if( rc==SQLITE_DONE ) return SQLITE_ERROR; - if( rc!=SQLITE_ROW ) return rc; - - getChildrenContaining(sqlite3_column_blob(s, 0), sqlite3_column_bytes(s, 0), - pTerm, nTerm, isPrefix, piStartChild, piEndChild); - - /* We expect only one row. We must execute another sqlite3_step() - * to complete the iteration; otherwise the table will remain - * locked. */ - rc = sqlite3_step(s); - if( rc==SQLITE_ROW ) return SQLITE_ERROR; - if( rc!=SQLITE_DONE ) return rc; - - return SQLITE_OK; -} - -/* Traverse the tree represented by pData[nData] looking for -** pTerm[nTerm], placing its doclist into *out. This is internal to -** loadSegment() to make error-handling cleaner. -*/ -static int loadSegmentInt(fulltext_vtab *v, const char *pData, int nData, - sqlite_int64 iLeavesEnd, - const char *pTerm, int nTerm, int isPrefix, - DataBuffer *out){ - /* Special case where root is a leaf. */ - if( *pData=='\0' ){ - return loadSegmentLeaf(v, pData, nData, pTerm, nTerm, isPrefix, out); - }else{ - int rc; - sqlite_int64 iStartChild, iEndChild; - - /* Process pData as an interior node, then loop down the tree - ** until we find the set of leaf nodes to scan for the term. - */ - getChildrenContaining(pData, nData, pTerm, nTerm, isPrefix, - &iStartChild, &iEndChild); - while( iStartChild>iLeavesEnd ){ - sqlite_int64 iNextStart, iNextEnd; - rc = loadAndGetChildrenContaining(v, iStartChild, pTerm, nTerm, isPrefix, - &iNextStart, &iNextEnd); - if( rc!=SQLITE_OK ) return rc; - - /* If we've branched, follow the end branch, too. */ - if( iStartChild!=iEndChild ){ - sqlite_int64 iDummy; - rc = loadAndGetChildrenContaining(v, iEndChild, pTerm, nTerm, isPrefix, - &iDummy, &iNextEnd); - if( rc!=SQLITE_OK ) return rc; - } - - assert( iNextStart<=iNextEnd ); - iStartChild = iNextStart; - iEndChild = iNextEnd; - } - assert( iStartChild<=iLeavesEnd ); - assert( iEndChild<=iLeavesEnd ); - - /* Scan through the leaf segments for doclists. */ - return loadSegmentLeaves(v, iStartChild, iEndChild, - pTerm, nTerm, isPrefix, out); - } -} - -/* Call loadSegmentInt() to collect the doclist for pTerm/nTerm, then -** merge its doclist over *out (any duplicate doclists read from the -** segment rooted at pData will overwrite those in *out). -*/ -/* TODO(shess) Consider changing this to determine the depth of the -** leaves using either the first characters of interior nodes (when -** ==1, we're one level above the leaves), or the first character of -** the root (which will describe the height of the tree directly). -** Either feels somewhat tricky to me. -*/ -/* TODO(shess) The current merge is likely to be slow for large -** doclists (though it should process from newest/smallest to -** oldest/largest, so it may not be that bad). It might be useful to -** modify things to allow for N-way merging. This could either be -** within a segment, with pairwise merges across segments, or across -** all segments at once. -*/ -static int loadSegment(fulltext_vtab *v, const char *pData, int nData, - sqlite_int64 iLeavesEnd, - const char *pTerm, int nTerm, int isPrefix, - DataBuffer *out){ - DataBuffer result; - int rc; - - assert( nData>1 ); - - /* This code should never be called with buffered updates. */ - assert( v->nPendingData<0 ); - - dataBufferInit(&result, 0); - rc = loadSegmentInt(v, pData, nData, iLeavesEnd, - pTerm, nTerm, isPrefix, &result); - if( rc==SQLITE_OK && result.nData>0 ){ - if( out->nData==0 ){ - DataBuffer tmp = *out; - *out = result; - result = tmp; - }else{ - DataBuffer merged; - DLReader readers[2]; - - dlrInit(&readers[0], DL_DEFAULT, out->pData, out->nData); - dlrInit(&readers[1], DL_DEFAULT, result.pData, result.nData); - dataBufferInit(&merged, out->nData+result.nData); - docListMerge(&merged, readers, 2); - dataBufferDestroy(out); - *out = merged; - dlrDestroy(&readers[0]); - dlrDestroy(&readers[1]); - } - } - dataBufferDestroy(&result); - return rc; -} - -/* Scan the database and merge together the posting lists for the term -** into *out. -*/ -static int termSelect(fulltext_vtab *v, int iColumn, - const char *pTerm, int nTerm, int isPrefix, - DocListType iType, DataBuffer *out){ - DataBuffer doclist; - sqlite3_stmt *s; - int rc = sql_get_statement(v, SEGDIR_SELECT_ALL_STMT, &s); - if( rc!=SQLITE_OK ) return rc; - - /* This code should never be called with buffered updates. */ - assert( v->nPendingData<0 ); - - dataBufferInit(&doclist, 0); - - /* Traverse the segments from oldest to newest so that newer doclist - ** elements for given docids overwrite older elements. - */ - while( (rc = sqlite3_step(s))==SQLITE_ROW ){ - const char *pData = sqlite3_column_blob(s, 2); - const int nData = sqlite3_column_bytes(s, 2); - const sqlite_int64 iLeavesEnd = sqlite3_column_int64(s, 1); - rc = loadSegment(v, pData, nData, iLeavesEnd, pTerm, nTerm, isPrefix, - &doclist); - if( rc!=SQLITE_OK ) goto err; - } - if( rc==SQLITE_DONE ){ - if( doclist.nData!=0 ){ - /* TODO(shess) The old term_select_all() code applied the column - ** restrict as we merged segments, leading to smaller buffers. - ** This is probably worthwhile to bring back, once the new storage - ** system is checked in. - */ - if( iColumn==v->nColumn) iColumn = -1; - docListTrim(DL_DEFAULT, doclist.pData, doclist.nData, - iColumn, iType, out); - } - rc = SQLITE_OK; - } - - err: - dataBufferDestroy(&doclist); - return rc; -} - -/****************************************************************/ -/* Used to hold hashtable data for sorting. */ -typedef struct TermData { - const char *pTerm; - int nTerm; - DLCollector *pCollector; -} TermData; - -/* Orders TermData elements in strcmp fashion ( <0 for less-than, 0 -** for equal, >0 for greater-than). -*/ -static int termDataCmp(const void *av, const void *bv){ - const TermData *a = (const TermData *)av; - const TermData *b = (const TermData *)bv; - int n = a->nTermnTerm ? a->nTerm : b->nTerm; - int c = memcmp(a->pTerm, b->pTerm, n); - if( c!=0 ) return c; - return a->nTerm-b->nTerm; -} - -/* Order pTerms data by term, then write a new level 0 segment using -** LeafWriter. -*/ -static int writeZeroSegment(fulltext_vtab *v, fts2Hash *pTerms){ - fts2HashElem *e; - int idx, rc, i, n; - TermData *pData; - LeafWriter writer; - DataBuffer dl; - - /* Determine the next index at level 0, merging as necessary. */ - rc = segdirNextIndex(v, 0, &idx); - if( rc!=SQLITE_OK ) return rc; - - n = fts2HashCount(pTerms); - pData = sqlite3_malloc(n*sizeof(TermData)); - - for(i = 0, e = fts2HashFirst(pTerms); e; i++, e = fts2HashNext(e)){ - assert( i1 ) qsort(pData, n, sizeof(*pData), termDataCmp); - - /* TODO(shess) Refactor so that we can write directly to the segment - ** DataBuffer, as happens for segment merges. - */ - leafWriterInit(0, idx, &writer); - dataBufferInit(&dl, 0); - for(i=0; inPendingData>=0 ){ - fts2HashElem *e; - for(e=fts2HashFirst(&v->pendingTerms); e; e=fts2HashNext(e)){ - dlcDelete(fts2HashData(e)); - } - fts2HashClear(&v->pendingTerms); - v->nPendingData = -1; - } - return SQLITE_OK; -} - -/* If pendingTerms has data, flush it to a level-zero segment, and -** free it. -*/ -static int flushPendingTerms(fulltext_vtab *v){ - if( v->nPendingData>=0 ){ - int rc = writeZeroSegment(v, &v->pendingTerms); - if( rc==SQLITE_OK ) clearPendingTerms(v); - return rc; - } - return SQLITE_OK; -} - -/* If pendingTerms is "too big", or docid is out of order, flush it. -** Regardless, be certain that pendingTerms is initialized for use. -*/ -static int initPendingTerms(fulltext_vtab *v, sqlite_int64 iDocid){ - /* TODO(shess) Explore whether partially flushing the buffer on - ** forced-flush would provide better performance. I suspect that if - ** we ordered the doclists by size and flushed the largest until the - ** buffer was half empty, that would let the less frequent terms - ** generate longer doclists. - */ - if( iDocid<=v->iPrevDocid || v->nPendingData>kPendingThreshold ){ - int rc = flushPendingTerms(v); - if( rc!=SQLITE_OK ) return rc; - } - if( v->nPendingData<0 ){ - fts2HashInit(&v->pendingTerms, FTS2_HASH_STRING, 1); - v->nPendingData = 0; - } - v->iPrevDocid = iDocid; - return SQLITE_OK; -} - -/* This function implements the xUpdate callback; it is the top-level entry - * point for inserting, deleting or updating a row in a full-text table. */ -static int fulltextUpdate(sqlite3_vtab *pVtab, int nArg, sqlite3_value **ppArg, - sqlite_int64 *pRowid){ - fulltext_vtab *v = (fulltext_vtab *) pVtab; - int rc; - - TRACE(("FTS2 Update %p\n", pVtab)); - - if( nArg<2 ){ - rc = index_delete(v, sqlite3_value_int64(ppArg[0])); - if( rc==SQLITE_OK ){ - /* If we just deleted the last row in the table, clear out the - ** index data. - */ - rc = content_exists(v); - if( rc==SQLITE_ROW ){ - rc = SQLITE_OK; - }else if( rc==SQLITE_DONE ){ - /* Clear the pending terms so we don't flush a useless level-0 - ** segment when the transaction closes. - */ - rc = clearPendingTerms(v); - if( rc==SQLITE_OK ){ - rc = segdir_delete_all(v); - } - } - } - } else if( sqlite3_value_type(ppArg[0]) != SQLITE_NULL ){ - /* An update: - * ppArg[0] = old rowid - * ppArg[1] = new rowid - * ppArg[2..2+v->nColumn-1] = values - * ppArg[2+v->nColumn] = value for magic column (we ignore this) - */ - sqlite_int64 rowid = sqlite3_value_int64(ppArg[0]); - if( sqlite3_value_type(ppArg[1]) != SQLITE_INTEGER || - sqlite3_value_int64(ppArg[1]) != rowid ){ - rc = SQLITE_ERROR; /* we don't allow changing the rowid */ - } else { - assert( nArg==2+v->nColumn+1); - rc = index_update(v, rowid, &ppArg[2]); - } - } else { - /* An insert: - * ppArg[1] = requested rowid - * ppArg[2..2+v->nColumn-1] = values - * ppArg[2+v->nColumn] = value for magic column (we ignore this) - */ - assert( nArg==2+v->nColumn+1); - rc = index_insert(v, ppArg[1], &ppArg[2], pRowid); - } - - return rc; -} - -static int fulltextSync(sqlite3_vtab *pVtab){ - TRACE(("FTS2 xSync()\n")); - return flushPendingTerms((fulltext_vtab *)pVtab); -} - -static int fulltextBegin(sqlite3_vtab *pVtab){ - fulltext_vtab *v = (fulltext_vtab *) pVtab; - TRACE(("FTS2 xBegin()\n")); - - /* Any buffered updates should have been cleared by the previous - ** transaction. - */ - assert( v->nPendingData<0 ); - return clearPendingTerms(v); -} - -static int fulltextCommit(sqlite3_vtab *pVtab){ - fulltext_vtab *v = (fulltext_vtab *) pVtab; - TRACE(("FTS2 xCommit()\n")); - - /* Buffered updates should have been cleared by fulltextSync(). */ - assert( v->nPendingData<0 ); - return clearPendingTerms(v); -} - -static int fulltextRollback(sqlite3_vtab *pVtab){ - TRACE(("FTS2 xRollback()\n")); - return clearPendingTerms((fulltext_vtab *)pVtab); -} - -/* -** Implementation of the snippet() function for FTS2 -*/ -static void snippetFunc( - sqlite3_context *pContext, - int argc, - sqlite3_value **argv -){ - fulltext_cursor *pCursor; - if( argc<1 ) return; - if( sqlite3_value_type(argv[0])!=SQLITE_BLOB || - sqlite3_value_bytes(argv[0])!=sizeof(pCursor) ){ - sqlite3_result_error(pContext, "illegal first argument to html_snippet",-1); - }else{ - const char *zStart = ""; - const char *zEnd = ""; - const char *zEllipsis = "..."; - memcpy(&pCursor, sqlite3_value_blob(argv[0]), sizeof(pCursor)); - if( argc>=2 ){ - zStart = (const char*)sqlite3_value_text(argv[1]); - if( argc>=3 ){ - zEnd = (const char*)sqlite3_value_text(argv[2]); - if( argc>=4 ){ - zEllipsis = (const char*)sqlite3_value_text(argv[3]); - } - } - } - snippetAllOffsets(pCursor); - snippetText(pCursor, zStart, zEnd, zEllipsis); - sqlite3_result_text(pContext, pCursor->snippet.zSnippet, - pCursor->snippet.nSnippet, SQLITE_STATIC); - } -} - -/* -** Implementation of the offsets() function for FTS2 -*/ -static void snippetOffsetsFunc( - sqlite3_context *pContext, - int argc, - sqlite3_value **argv -){ - fulltext_cursor *pCursor; - if( argc<1 ) return; - if( sqlite3_value_type(argv[0])!=SQLITE_BLOB || - sqlite3_value_bytes(argv[0])!=sizeof(pCursor) ){ - sqlite3_result_error(pContext, "illegal first argument to offsets",-1); - }else{ - memcpy(&pCursor, sqlite3_value_blob(argv[0]), sizeof(pCursor)); - snippetAllOffsets(pCursor); - snippetOffsetText(&pCursor->snippet); - sqlite3_result_text(pContext, - pCursor->snippet.zOffset, pCursor->snippet.nOffset, - SQLITE_STATIC); - } -} - -/* OptLeavesReader is nearly identical to LeavesReader, except that -** where LeavesReader is geared towards the merging of complete -** segment levels (with exactly MERGE_COUNT segments), OptLeavesReader -** is geared towards implementation of the optimize() function, and -** can merge all segments simultaneously. This version may be -** somewhat less efficient than LeavesReader because it merges into an -** accumulator rather than doing an N-way merge, but since segment -** size grows exponentially (so segment count logrithmically) this is -** probably not an immediate problem. -*/ -/* TODO(shess): Prove that assertion, or extend the merge code to -** merge tree fashion (like the prefix-searching code does). -*/ -/* TODO(shess): OptLeavesReader and LeavesReader could probably be -** merged with little or no loss of performance for LeavesReader. The -** merged code would need to handle >MERGE_COUNT segments, and would -** also need to be able to optionally optimize away deletes. -*/ -typedef struct OptLeavesReader { - /* Segment number, to order readers by age. */ - int segment; - LeavesReader reader; -} OptLeavesReader; - -static int optLeavesReaderAtEnd(OptLeavesReader *pReader){ - return leavesReaderAtEnd(&pReader->reader); -} -static int optLeavesReaderTermBytes(OptLeavesReader *pReader){ - return leavesReaderTermBytes(&pReader->reader); -} -static const char *optLeavesReaderData(OptLeavesReader *pReader){ - return leavesReaderData(&pReader->reader); -} -static int optLeavesReaderDataBytes(OptLeavesReader *pReader){ - return leavesReaderDataBytes(&pReader->reader); -} -static const char *optLeavesReaderTerm(OptLeavesReader *pReader){ - return leavesReaderTerm(&pReader->reader); -} -static int optLeavesReaderStep(fulltext_vtab *v, OptLeavesReader *pReader){ - return leavesReaderStep(v, &pReader->reader); -} -static int optLeavesReaderTermCmp(OptLeavesReader *lr1, OptLeavesReader *lr2){ - return leavesReaderTermCmp(&lr1->reader, &lr2->reader); -} -/* Order by term ascending, segment ascending (oldest to newest), with -** exhausted readers to the end. -*/ -static int optLeavesReaderCmp(OptLeavesReader *lr1, OptLeavesReader *lr2){ - int c = optLeavesReaderTermCmp(lr1, lr2); - if( c!=0 ) return c; - return lr1->segment-lr2->segment; -} -/* Bubble pLr[0] to appropriate place in pLr[1..nLr-1]. Assumes that -** pLr[1..nLr-1] is already sorted. -*/ -static void optLeavesReaderReorder(OptLeavesReader *pLr, int nLr){ - while( nLr>1 && optLeavesReaderCmp(pLr, pLr+1)>0 ){ - OptLeavesReader tmp = pLr[0]; - pLr[0] = pLr[1]; - pLr[1] = tmp; - nLr--; - pLr++; - } -} - -/* optimize() helper function. Put the readers in order and iterate -** through them, merging doclists for matching terms into pWriter. -** Returns SQLITE_OK on success, or the SQLite error code which -** prevented success. -*/ -static int optimizeInternal(fulltext_vtab *v, - OptLeavesReader *readers, int nReaders, - LeafWriter *pWriter){ - int i, rc = SQLITE_OK; - DataBuffer doclist, merged, tmp; - - /* Order the readers. */ - i = nReaders; - while( i-- > 0 ){ - optLeavesReaderReorder(&readers[i], nReaders-i); - } - - dataBufferInit(&doclist, LEAF_MAX); - dataBufferInit(&merged, LEAF_MAX); - - /* Exhausted readers bubble to the end, so when the first reader is - ** at eof, all are at eof. - */ - while( !optLeavesReaderAtEnd(&readers[0]) ){ - - /* Figure out how many readers share the next term. */ - for(i=1; i 0 ){ - dlrDestroy(&dlReaders[nReaders]); - } - - /* Accumulated doclist to reader 0 for next pass. */ - dlrInit(&dlReaders[0], DL_DEFAULT, doclist.pData, doclist.nData); - } - - /* Destroy reader that was left in the pipeline. */ - dlrDestroy(&dlReaders[0]); - - /* Trim deletions from the doclist. */ - dataBufferReset(&merged); - docListTrim(DL_DEFAULT, doclist.pData, doclist.nData, - -1, DL_DEFAULT, &merged); - } - - /* Only pass doclists with hits (skip if all hits deleted). */ - if( merged.nData>0 ){ - rc = leafWriterStep(v, pWriter, - optLeavesReaderTerm(&readers[0]), - optLeavesReaderTermBytes(&readers[0]), - merged.pData, merged.nData); - if( rc!=SQLITE_OK ) goto err; - } - - /* Step merged readers to next term and reorder. */ - while( i-- > 0 ){ - rc = optLeavesReaderStep(v, &readers[i]); - if( rc!=SQLITE_OK ) goto err; - - optLeavesReaderReorder(&readers[i], nReaders-i); - } - } - - err: - dataBufferDestroy(&doclist); - dataBufferDestroy(&merged); - return rc; -} - -/* Implement optimize() function for FTS3. optimize(t) merges all -** segments in the fts index into a single segment. 't' is the magic -** table-named column. -*/ -static void optimizeFunc(sqlite3_context *pContext, - int argc, sqlite3_value **argv){ - fulltext_cursor *pCursor; - if( argc>1 ){ - sqlite3_result_error(pContext, "excess arguments to optimize()",-1); - }else if( sqlite3_value_type(argv[0])!=SQLITE_BLOB || - sqlite3_value_bytes(argv[0])!=sizeof(pCursor) ){ - sqlite3_result_error(pContext, "illegal first argument to optimize",-1); - }else{ - fulltext_vtab *v; - int i, rc, iMaxLevel; - OptLeavesReader *readers; - int nReaders; - LeafWriter writer; - sqlite3_stmt *s; - - memcpy(&pCursor, sqlite3_value_blob(argv[0]), sizeof(pCursor)); - v = cursor_vtab(pCursor); - - /* Flush any buffered updates before optimizing. */ - rc = flushPendingTerms(v); - if( rc!=SQLITE_OK ) goto err; - - rc = segdir_count(v, &nReaders, &iMaxLevel); - if( rc!=SQLITE_OK ) goto err; - if( nReaders==0 || nReaders==1 ){ - sqlite3_result_text(pContext, "Index already optimal", -1, - SQLITE_STATIC); - return; - } - - rc = sql_get_statement(v, SEGDIR_SELECT_ALL_STMT, &s); - if( rc!=SQLITE_OK ) goto err; - - readers = sqlite3_malloc(nReaders*sizeof(readers[0])); - if( readers==NULL ) goto err; - - /* Note that there will already be a segment at this position - ** until we call segdir_delete() on iMaxLevel. - */ - leafWriterInit(iMaxLevel, 0, &writer); - - i = 0; - while( (rc = sqlite3_step(s))==SQLITE_ROW ){ - sqlite_int64 iStart = sqlite3_column_int64(s, 0); - sqlite_int64 iEnd = sqlite3_column_int64(s, 1); - const char *pRootData = sqlite3_column_blob(s, 2); - int nRootData = sqlite3_column_bytes(s, 2); - - assert( i 0 ){ - leavesReaderDestroy(&readers[i].reader); - } - sqlite3_free(readers); - - /* If we've successfully gotten to here, delete the old segments - ** and flush the interior structure of the new segment. - */ - if( rc==SQLITE_OK ){ - for( i=0; i<=iMaxLevel; i++ ){ - rc = segdir_delete(v, i); - if( rc!=SQLITE_OK ) break; - } - - if( rc==SQLITE_OK ) rc = leafWriterFinalize(v, &writer); - } - - leafWriterDestroy(&writer); - - if( rc!=SQLITE_OK ) goto err; - - sqlite3_result_text(pContext, "Index optimized", -1, SQLITE_STATIC); - return; - - /* TODO(shess): Error-handling needs to be improved along the - ** lines of the dump_ functions. - */ - err: - { - char buf[512]; - sqlite3_snprintf(sizeof(buf), buf, "Error in optimize: %s", - sqlite3_errmsg(sqlite3_context_db_handle(pContext))); - sqlite3_result_error(pContext, buf, -1); - } - } -} - -#ifdef SQLITE_TEST -/* Generate an error of the form ": ". If msg is NULL, -** pull the error from the context's db handle. -*/ -static void generateError(sqlite3_context *pContext, - const char *prefix, const char *msg){ - char buf[512]; - if( msg==NULL ) msg = sqlite3_errmsg(sqlite3_context_db_handle(pContext)); - sqlite3_snprintf(sizeof(buf), buf, "%s: %s", prefix, msg); - sqlite3_result_error(pContext, buf, -1); -} - -/* Helper function to collect the set of terms in the segment into -** pTerms. The segment is defined by the leaf nodes between -** iStartBlockid and iEndBlockid, inclusive, or by the contents of -** pRootData if iStartBlockid is 0 (in which case the entire segment -** fit in a leaf). -*/ -static int collectSegmentTerms(fulltext_vtab *v, sqlite3_stmt *s, - fts2Hash *pTerms){ - const sqlite_int64 iStartBlockid = sqlite3_column_int64(s, 0); - const sqlite_int64 iEndBlockid = sqlite3_column_int64(s, 1); - const char *pRootData = sqlite3_column_blob(s, 2); - const int nRootData = sqlite3_column_bytes(s, 2); - LeavesReader reader; - int rc = leavesReaderInit(v, 0, iStartBlockid, iEndBlockid, - pRootData, nRootData, &reader); - if( rc!=SQLITE_OK ) return rc; - - while( rc==SQLITE_OK && !leavesReaderAtEnd(&reader) ){ - const char *pTerm = leavesReaderTerm(&reader); - const int nTerm = leavesReaderTermBytes(&reader); - void *oldValue = sqlite3Fts2HashFind(pTerms, pTerm, nTerm); - void *newValue = (void *)((char *)oldValue+1); - - /* From the comment before sqlite3Fts2HashInsert in fts2_hash.c, - ** the data value passed is returned in case of malloc failure. - */ - if( newValue==sqlite3Fts2HashInsert(pTerms, pTerm, nTerm, newValue) ){ - rc = SQLITE_NOMEM; - }else{ - rc = leavesReaderStep(v, &reader); - } - } - - leavesReaderDestroy(&reader); - return rc; -} - -/* Helper function to build the result string for dump_terms(). */ -static int generateTermsResult(sqlite3_context *pContext, fts2Hash *pTerms){ - int iTerm, nTerms, nResultBytes, iByte; - char *result; - TermData *pData; - fts2HashElem *e; - - /* Iterate pTerms to generate an array of terms in pData for - ** sorting. - */ - nTerms = fts2HashCount(pTerms); - assert( nTerms>0 ); - pData = sqlite3_malloc(nTerms*sizeof(TermData)); - if( pData==NULL ) return SQLITE_NOMEM; - - nResultBytes = 0; - for(iTerm = 0, e = fts2HashFirst(pTerms); e; iTerm++, e = fts2HashNext(e)){ - nResultBytes += fts2HashKeysize(e)+1; /* Term plus trailing space */ - assert( iTerm0 ); /* nTerms>0, nResultsBytes must be, too. */ - result = sqlite3_malloc(nResultBytes); - if( result==NULL ){ - sqlite3_free(pData); - return SQLITE_NOMEM; - } - - if( nTerms>1 ) qsort(pData, nTerms, sizeof(*pData), termDataCmp); - - /* Read the terms in order to build the result. */ - iByte = 0; - for(iTerm=0; iTerm0 ){ - rc = generateTermsResult(pContext, &terms); - if( rc==SQLITE_NOMEM ){ - generateError(pContext, "dump_terms", "out of memory"); - }else{ - assert( rc==SQLITE_OK ); - } - }else if( argc==3 ){ - /* The specific segment asked for could not be found. */ - generateError(pContext, "dump_terms", "segment not found"); - }else{ - /* No segments found. */ - /* TODO(shess): It should be impossible to reach this. This - ** case can only happen for an empty table, in which case - ** SQLite has no rows to call this function on. - */ - sqlite3_result_null(pContext); - } - } - sqlite3Fts2HashClear(&terms); - } -} - -/* Expand the DL_DEFAULT doclist in pData into a text result in -** pContext. -*/ -static void createDoclistResult(sqlite3_context *pContext, - const char *pData, int nData){ - DataBuffer dump; - DLReader dlReader; - - assert( pData!=NULL && nData>0 ); - - dataBufferInit(&dump, 0); - dlrInit(&dlReader, DL_DEFAULT, pData, nData); - for( ; !dlrAtEnd(&dlReader); dlrStep(&dlReader) ){ - char buf[256]; - PLReader plReader; - - plrInit(&plReader, &dlReader); - if( DL_DEFAULT==DL_DOCIDS || plrAtEnd(&plReader) ){ - sqlite3_snprintf(sizeof(buf), buf, "[%lld] ", dlrDocid(&dlReader)); - dataBufferAppend(&dump, buf, strlen(buf)); - }else{ - int iColumn = plrColumn(&plReader); - - sqlite3_snprintf(sizeof(buf), buf, "[%lld %d[", - dlrDocid(&dlReader), iColumn); - dataBufferAppend(&dump, buf, strlen(buf)); - - for( ; !plrAtEnd(&plReader); plrStep(&plReader) ){ - if( plrColumn(&plReader)!=iColumn ){ - iColumn = plrColumn(&plReader); - sqlite3_snprintf(sizeof(buf), buf, "] %d[", iColumn); - assert( dump.nData>0 ); - dump.nData--; /* Overwrite trailing space. */ - assert( dump.pData[dump.nData]==' '); - dataBufferAppend(&dump, buf, strlen(buf)); - } - if( DL_DEFAULT==DL_POSITIONS_OFFSETS ){ - sqlite3_snprintf(sizeof(buf), buf, "%d,%d,%d ", - plrPosition(&plReader), - plrStartOffset(&plReader), plrEndOffset(&plReader)); - }else if( DL_DEFAULT==DL_POSITIONS ){ - sqlite3_snprintf(sizeof(buf), buf, "%d ", plrPosition(&plReader)); - }else{ - assert( NULL=="Unhandled DL_DEFAULT value"); - } - dataBufferAppend(&dump, buf, strlen(buf)); - } - plrDestroy(&plReader); - - assert( dump.nData>0 ); - dump.nData--; /* Overwrite trailing space. */ - assert( dump.pData[dump.nData]==' '); - dataBufferAppend(&dump, "]] ", 3); - } - } - dlrDestroy(&dlReader); - - assert( dump.nData>0 ); - dump.nData--; /* Overwrite trailing space. */ - assert( dump.pData[dump.nData]==' '); - dump.pData[dump.nData] = '\0'; - assert( dump.nData>0 ); - - /* Passes ownership of dump's buffer to pContext. */ - sqlite3_result_text(pContext, dump.pData, dump.nData, sqlite3_free); - dump.pData = NULL; - dump.nData = dump.nCapacity = 0; -} - -/* Implements dump_doclist() for use in inspecting the fts2 index from -** tests. TEXT result containing a string representation of the -** doclist for the indicated term. dump_doclist(t, term, level, idx) -** dumps the doclist for term from the segment specified by level, idx -** (in %_segdir), while dump_doclist(t, term) dumps the logical -** doclist for the term across all segments. The per-segment doclist -** can contain deletions, while the full-index doclist will not -** (deletions are omitted). -** -** Result formats differ with the setting of DL_DEFAULTS. Examples: -** -** DL_DOCIDS: [1] [3] [7] -** DL_POSITIONS: [1 0[0 4] 1[17]] [3 1[5]] -** DL_POSITIONS_OFFSETS: [1 0[0,0,3 4,23,26] 1[17,102,105]] [3 1[5,20,23]] -** -** In each case the number after the outer '[' is the docid. In the -** latter two cases, the number before the inner '[' is the column -** associated with the values within. For DL_POSITIONS the numbers -** within are the positions, for DL_POSITIONS_OFFSETS they are the -** position, the start offset, and the end offset. -*/ -static void dumpDoclistFunc( - sqlite3_context *pContext, - int argc, sqlite3_value **argv -){ - fulltext_cursor *pCursor; - if( argc!=2 && argc!=4 ){ - generateError(pContext, "dump_doclist", "incorrect arguments"); - }else if( sqlite3_value_type(argv[0])!=SQLITE_BLOB || - sqlite3_value_bytes(argv[0])!=sizeof(pCursor) ){ - generateError(pContext, "dump_doclist", "illegal first argument"); - }else if( sqlite3_value_text(argv[1])==NULL || - sqlite3_value_text(argv[1])[0]=='\0' ){ - generateError(pContext, "dump_doclist", "empty second argument"); - }else{ - const char *pTerm = (const char *)sqlite3_value_text(argv[1]); - const int nTerm = strlen(pTerm); - fulltext_vtab *v; - int rc; - DataBuffer doclist; - - memcpy(&pCursor, sqlite3_value_blob(argv[0]), sizeof(pCursor)); - v = cursor_vtab(pCursor); - - dataBufferInit(&doclist, 0); - - /* termSelect() yields the same logical doclist that queries are - ** run against. - */ - if( argc==2 ){ - rc = termSelect(v, v->nColumn, pTerm, nTerm, 0, DL_DEFAULT, &doclist); - }else{ - sqlite3_stmt *s = NULL; - - /* Get our specific segment's information. */ - rc = sql_get_statement(v, SEGDIR_SELECT_SEGMENT_STMT, &s); - if( rc==SQLITE_OK ){ - rc = sqlite3_bind_int(s, 1, sqlite3_value_int(argv[2])); - if( rc==SQLITE_OK ){ - rc = sqlite3_bind_int(s, 2, sqlite3_value_int(argv[3])); - } - } - - if( rc==SQLITE_OK ){ - rc = sqlite3_step(s); - - if( rc==SQLITE_DONE ){ - dataBufferDestroy(&doclist); - generateError(pContext, "dump_doclist", "segment not found"); - return; - } - - /* Found a segment, load it into doclist. */ - if( rc==SQLITE_ROW ){ - const sqlite_int64 iLeavesEnd = sqlite3_column_int64(s, 1); - const char *pData = sqlite3_column_blob(s, 2); - const int nData = sqlite3_column_bytes(s, 2); - - /* loadSegment() is used by termSelect() to load each - ** segment's data. - */ - rc = loadSegment(v, pData, nData, iLeavesEnd, pTerm, nTerm, 0, - &doclist); - if( rc==SQLITE_OK ){ - rc = sqlite3_step(s); - - /* Should not have more than one matching segment. */ - if( rc!=SQLITE_DONE ){ - sqlite3_reset(s); - dataBufferDestroy(&doclist); - generateError(pContext, "dump_doclist", "invalid segdir"); - return; - } - rc = SQLITE_OK; - } - } - } - - sqlite3_reset(s); - } - - if( rc==SQLITE_OK ){ - if( doclist.nData>0 ){ - createDoclistResult(pContext, doclist.pData, doclist.nData); - }else{ - /* TODO(shess): This can happen if the term is not present, or - ** if all instances of the term have been deleted and this is - ** an all-index dump. It may be interesting to distinguish - ** these cases. - */ - sqlite3_result_text(pContext, "", 0, SQLITE_STATIC); - } - }else if( rc==SQLITE_NOMEM ){ - /* Handle out-of-memory cases specially because if they are - ** generated in fts2 code they may not be reflected in the db - ** handle. - */ - /* TODO(shess): Handle this more comprehensively. - ** sqlite3ErrStr() has what I need, but is internal. - */ - generateError(pContext, "dump_doclist", "out of memory"); - }else{ - generateError(pContext, "dump_doclist", NULL); - } - - dataBufferDestroy(&doclist); - } -} -#endif - -/* -** This routine implements the xFindFunction method for the FTS2 -** virtual table. -*/ -static int fulltextFindFunction( - sqlite3_vtab *pVtab, - int nArg, - const char *zName, - void (**pxFunc)(sqlite3_context*,int,sqlite3_value**), - void **ppArg -){ - if( strcmp(zName,"snippet")==0 ){ - *pxFunc = snippetFunc; - return 1; - }else if( strcmp(zName,"offsets")==0 ){ - *pxFunc = snippetOffsetsFunc; - return 1; - }else if( strcmp(zName,"optimize")==0 ){ - *pxFunc = optimizeFunc; - return 1; -#ifdef SQLITE_TEST - /* NOTE(shess): These functions are present only for testing - ** purposes. No particular effort is made to optimize their - ** execution or how they build their results. - */ - }else if( strcmp(zName,"dump_terms")==0 ){ - /* fprintf(stderr, "Found dump_terms\n"); */ - *pxFunc = dumpTermsFunc; - return 1; - }else if( strcmp(zName,"dump_doclist")==0 ){ - /* fprintf(stderr, "Found dump_doclist\n"); */ - *pxFunc = dumpDoclistFunc; - return 1; -#endif - } - return 0; -} - -/* -** Rename an fts2 table. -*/ -static int fulltextRename( - sqlite3_vtab *pVtab, - const char *zName -){ - fulltext_vtab *p = (fulltext_vtab *)pVtab; - int rc = SQLITE_NOMEM; - char *zSql = sqlite3_mprintf( - "ALTER TABLE %Q.'%q_content' RENAME TO '%q_content';" - "ALTER TABLE %Q.'%q_segments' RENAME TO '%q_segments';" - "ALTER TABLE %Q.'%q_segdir' RENAME TO '%q_segdir';" - , p->zDb, p->zName, zName - , p->zDb, p->zName, zName - , p->zDb, p->zName, zName - ); - if( zSql ){ - rc = sqlite3_exec(p->db, zSql, 0, 0, 0); - sqlite3_free(zSql); - } - return rc; -} - -static const sqlite3_module fts2Module = { - /* iVersion */ 0, - /* xCreate */ fulltextCreate, - /* xConnect */ fulltextConnect, - /* xBestIndex */ fulltextBestIndex, - /* xDisconnect */ fulltextDisconnect, - /* xDestroy */ fulltextDestroy, - /* xOpen */ fulltextOpen, - /* xClose */ fulltextClose, - /* xFilter */ fulltextFilter, - /* xNext */ fulltextNext, - /* xEof */ fulltextEof, - /* xColumn */ fulltextColumn, - /* xRowid */ fulltextRowid, - /* xUpdate */ fulltextUpdate, - /* xBegin */ fulltextBegin, - /* xSync */ fulltextSync, - /* xCommit */ fulltextCommit, - /* xRollback */ fulltextRollback, - /* xFindFunction */ fulltextFindFunction, - /* xRename */ fulltextRename, -}; - -static void hashDestroy(void *p){ - fts2Hash *pHash = (fts2Hash *)p; - sqlite3Fts2HashClear(pHash); - sqlite3_free(pHash); -} - -/* -** The fts2 built-in tokenizers - "simple" and "porter" - are implemented -** in files fts2_tokenizer1.c and fts2_porter.c respectively. The following -** two forward declarations are for functions declared in these files -** used to retrieve the respective implementations. -** -** Calling sqlite3Fts2SimpleTokenizerModule() sets the value pointed -** to by the argument to point a the "simple" tokenizer implementation. -** Function ...PorterTokenizerModule() sets *pModule to point to the -** porter tokenizer/stemmer implementation. -*/ -void sqlite3Fts2SimpleTokenizerModule(sqlite3_tokenizer_module const**ppModule); -void sqlite3Fts2PorterTokenizerModule(sqlite3_tokenizer_module const**ppModule); -void sqlite3Fts2IcuTokenizerModule(sqlite3_tokenizer_module const**ppModule); - -int sqlite3Fts2InitHashTable(sqlite3 *, fts2Hash *, const char *); - -/* -** Initialize the fts2 extension. If this extension is built as part -** of the sqlite library, then this function is called directly by -** SQLite. If fts2 is built as a dynamically loadable extension, this -** function is called by the sqlite3_extension_init() entry point. -*/ -int sqlite3Fts2Init(sqlite3 *db){ - int rc = SQLITE_OK; - fts2Hash *pHash = 0; - const sqlite3_tokenizer_module *pSimple = 0; - const sqlite3_tokenizer_module *pPorter = 0; - const sqlite3_tokenizer_module *pIcu = 0; - - sqlite3Fts2SimpleTokenizerModule(&pSimple); - sqlite3Fts2PorterTokenizerModule(&pPorter); -#ifdef SQLITE_ENABLE_ICU - sqlite3Fts2IcuTokenizerModule(&pIcu); -#endif - - /* Allocate and initialize the hash-table used to store tokenizers. */ - pHash = sqlite3_malloc(sizeof(fts2Hash)); - if( !pHash ){ - rc = SQLITE_NOMEM; - }else{ - sqlite3Fts2HashInit(pHash, FTS2_HASH_STRING, 1); - } - - /* Load the built-in tokenizers into the hash table */ - if( rc==SQLITE_OK ){ - if( sqlite3Fts2HashInsert(pHash, "simple", 7, (void *)pSimple) - || sqlite3Fts2HashInsert(pHash, "porter", 7, (void *)pPorter) - || (pIcu && sqlite3Fts2HashInsert(pHash, "icu", 4, (void *)pIcu)) - ){ - rc = SQLITE_NOMEM; - } - } - - /* Create the virtual table wrapper around the hash-table and overload - ** the two scalar functions. If this is successful, register the - ** module with sqlite. - */ - if( SQLITE_OK==rc - && SQLITE_OK==(rc = sqlite3Fts2InitHashTable(db, pHash, "fts2_tokenizer")) - && SQLITE_OK==(rc = sqlite3_overload_function(db, "snippet", -1)) - && SQLITE_OK==(rc = sqlite3_overload_function(db, "offsets", -1)) - && SQLITE_OK==(rc = sqlite3_overload_function(db, "optimize", -1)) -#ifdef SQLITE_TEST - && SQLITE_OK==(rc = sqlite3_overload_function(db, "dump_terms", -1)) - && SQLITE_OK==(rc = sqlite3_overload_function(db, "dump_doclist", -1)) -#endif - ){ - return sqlite3_create_module_v2( - db, "fts2", &fts2Module, (void *)pHash, hashDestroy - ); - } - - /* An error has occurred. Delete the hash table and return the error code. */ - assert( rc!=SQLITE_OK ); - if( pHash ){ - sqlite3Fts2HashClear(pHash); - sqlite3_free(pHash); - } - return rc; -} - -#if !SQLITE_CORE -#ifdef _WIN32 -__declspec(dllexport) -#endif -int sqlite3_fts2_init( - sqlite3 *db, - char **pzErrMsg, - const sqlite3_api_routines *pApi -){ - SQLITE_EXTENSION_INIT2(pApi) - return sqlite3Fts2Init(db); -} -#endif - -#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS2) */ diff --git a/ext/fts2/fts2.h b/ext/fts2/fts2.h deleted file mode 100644 index 4da4c3877b..0000000000 --- a/ext/fts2/fts2.h +++ /dev/null @@ -1,26 +0,0 @@ -/* -** 2006 Oct 10 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -****************************************************************************** -** -** This header file is used by programs that want to link against the -** FTS2 library. All it does is declare the sqlite3Fts2Init() interface. -*/ -#include "sqlite3.h" - -#ifdef __cplusplus -extern "C" { -#endif /* __cplusplus */ - -int sqlite3Fts2Init(sqlite3 *db); - -#ifdef __cplusplus -} /* extern "C" */ -#endif /* __cplusplus */ diff --git a/ext/fts2/fts2_hash.c b/ext/fts2/fts2_hash.c deleted file mode 100644 index 3596dcf0b8..0000000000 --- a/ext/fts2/fts2_hash.c +++ /dev/null @@ -1,376 +0,0 @@ -/* -** 2001 September 22 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This is the implementation of generic hash-tables used in SQLite. -** We've modified it slightly to serve as a standalone hash table -** implementation for the full-text indexing module. -*/ - -/* -** The code in this file is only compiled if: -** -** * The FTS2 module is being built as an extension -** (in which case SQLITE_CORE is not defined), or -** -** * The FTS2 module is being built into the core of -** SQLite (in which case SQLITE_ENABLE_FTS2 is defined). -*/ -#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS2) - -#include -#include -#include - -#include "sqlite3.h" -#include "sqlite3ext.h" -SQLITE_EXTENSION_INIT3 -#include "fts2_hash.h" - -/* -** Malloc and Free functions -*/ -static void *fts2HashMalloc(int n){ - void *p = sqlite3_malloc(n); - if( p ){ - memset(p, 0, n); - } - return p; -} -static void fts2HashFree(void *p){ - sqlite3_free(p); -} - -/* Turn bulk memory into a hash table object by initializing the -** fields of the Hash structure. -** -** "pNew" is a pointer to the hash table that is to be initialized. -** keyClass is one of the constants -** FTS2_HASH_BINARY or FTS2_HASH_STRING. The value of keyClass -** determines what kind of key the hash table will use. "copyKey" is -** true if the hash table should make its own private copy of keys and -** false if it should just use the supplied pointer. -*/ -void sqlite3Fts2HashInit(fts2Hash *pNew, int keyClass, int copyKey){ - assert( pNew!=0 ); - assert( keyClass>=FTS2_HASH_STRING && keyClass<=FTS2_HASH_BINARY ); - pNew->keyClass = keyClass; - pNew->copyKey = copyKey; - pNew->first = 0; - pNew->count = 0; - pNew->htsize = 0; - pNew->ht = 0; -} - -/* Remove all entries from a hash table. Reclaim all memory. -** Call this routine to delete a hash table or to reset a hash table -** to the empty state. -*/ -void sqlite3Fts2HashClear(fts2Hash *pH){ - fts2HashElem *elem; /* For looping over all elements of the table */ - - assert( pH!=0 ); - elem = pH->first; - pH->first = 0; - fts2HashFree(pH->ht); - pH->ht = 0; - pH->htsize = 0; - while( elem ){ - fts2HashElem *next_elem = elem->next; - if( pH->copyKey && elem->pKey ){ - fts2HashFree(elem->pKey); - } - fts2HashFree(elem); - elem = next_elem; - } - pH->count = 0; -} - -/* -** Hash and comparison functions when the mode is FTS2_HASH_STRING -*/ -static int strHash(const void *pKey, int nKey){ - const char *z = (const char *)pKey; - int h = 0; - if( nKey<=0 ) nKey = (int) strlen(z); - while( nKey > 0 ){ - h = (h<<3) ^ h ^ *z++; - nKey--; - } - return h & 0x7fffffff; -} -static int strCompare(const void *pKey1, int n1, const void *pKey2, int n2){ - if( n1!=n2 ) return 1; - return strncmp((const char*)pKey1,(const char*)pKey2,n1); -} - -/* -** Hash and comparison functions when the mode is FTS2_HASH_BINARY -*/ -static int binHash(const void *pKey, int nKey){ - int h = 0; - const char *z = (const char *)pKey; - while( nKey-- > 0 ){ - h = (h<<3) ^ h ^ *(z++); - } - return h & 0x7fffffff; -} -static int binCompare(const void *pKey1, int n1, const void *pKey2, int n2){ - if( n1!=n2 ) return 1; - return memcmp(pKey1,pKey2,n1); -} - -/* -** Return a pointer to the appropriate hash function given the key class. -** -** The C syntax in this function definition may be unfamilar to some -** programmers, so we provide the following additional explanation: -** -** The name of the function is "hashFunction". The function takes a -** single parameter "keyClass". The return value of hashFunction() -** is a pointer to another function. Specifically, the return value -** of hashFunction() is a pointer to a function that takes two parameters -** with types "const void*" and "int" and returns an "int". -*/ -static int (*hashFunction(int keyClass))(const void*,int){ - if( keyClass==FTS2_HASH_STRING ){ - return &strHash; - }else{ - assert( keyClass==FTS2_HASH_BINARY ); - return &binHash; - } -} - -/* -** Return a pointer to the appropriate hash function given the key class. -** -** For help in interpreted the obscure C code in the function definition, -** see the header comment on the previous function. -*/ -static int (*compareFunction(int keyClass))(const void*,int,const void*,int){ - if( keyClass==FTS2_HASH_STRING ){ - return &strCompare; - }else{ - assert( keyClass==FTS2_HASH_BINARY ); - return &binCompare; - } -} - -/* Link an element into the hash table -*/ -static void insertElement( - fts2Hash *pH, /* The complete hash table */ - struct _fts2ht *pEntry, /* The entry into which pNew is inserted */ - fts2HashElem *pNew /* The element to be inserted */ -){ - fts2HashElem *pHead; /* First element already in pEntry */ - pHead = pEntry->chain; - if( pHead ){ - pNew->next = pHead; - pNew->prev = pHead->prev; - if( pHead->prev ){ pHead->prev->next = pNew; } - else { pH->first = pNew; } - pHead->prev = pNew; - }else{ - pNew->next = pH->first; - if( pH->first ){ pH->first->prev = pNew; } - pNew->prev = 0; - pH->first = pNew; - } - pEntry->count++; - pEntry->chain = pNew; -} - - -/* Resize the hash table so that it cantains "new_size" buckets. -** "new_size" must be a power of 2. The hash table might fail -** to resize if sqliteMalloc() fails. -*/ -static void rehash(fts2Hash *pH, int new_size){ - struct _fts2ht *new_ht; /* The new hash table */ - fts2HashElem *elem, *next_elem; /* For looping over existing elements */ - int (*xHash)(const void*,int); /* The hash function */ - - assert( (new_size & (new_size-1))==0 ); - new_ht = (struct _fts2ht *)fts2HashMalloc( new_size*sizeof(struct _fts2ht) ); - if( new_ht==0 ) return; - fts2HashFree(pH->ht); - pH->ht = new_ht; - pH->htsize = new_size; - xHash = hashFunction(pH->keyClass); - for(elem=pH->first, pH->first=0; elem; elem = next_elem){ - int h = (*xHash)(elem->pKey, elem->nKey) & (new_size-1); - next_elem = elem->next; - insertElement(pH, &new_ht[h], elem); - } -} - -/* This function (for internal use only) locates an element in an -** hash table that matches the given key. The hash for this key has -** already been computed and is passed as the 4th parameter. -*/ -static fts2HashElem *findElementGivenHash( - const fts2Hash *pH, /* The pH to be searched */ - const void *pKey, /* The key we are searching for */ - int nKey, - int h /* The hash for this key. */ -){ - fts2HashElem *elem; /* Used to loop thru the element list */ - int count; /* Number of elements left to test */ - int (*xCompare)(const void*,int,const void*,int); /* comparison function */ - - if( pH->ht ){ - struct _fts2ht *pEntry = &pH->ht[h]; - elem = pEntry->chain; - count = pEntry->count; - xCompare = compareFunction(pH->keyClass); - while( count-- && elem ){ - if( (*xCompare)(elem->pKey,elem->nKey,pKey,nKey)==0 ){ - return elem; - } - elem = elem->next; - } - } - return 0; -} - -/* Remove a single entry from the hash table given a pointer to that -** element and a hash on the element's key. -*/ -static void removeElementGivenHash( - fts2Hash *pH, /* The pH containing "elem" */ - fts2HashElem* elem, /* The element to be removed from the pH */ - int h /* Hash value for the element */ -){ - struct _fts2ht *pEntry; - if( elem->prev ){ - elem->prev->next = elem->next; - }else{ - pH->first = elem->next; - } - if( elem->next ){ - elem->next->prev = elem->prev; - } - pEntry = &pH->ht[h]; - if( pEntry->chain==elem ){ - pEntry->chain = elem->next; - } - pEntry->count--; - if( pEntry->count<=0 ){ - pEntry->chain = 0; - } - if( pH->copyKey && elem->pKey ){ - fts2HashFree(elem->pKey); - } - fts2HashFree( elem ); - pH->count--; - if( pH->count<=0 ){ - assert( pH->first==0 ); - assert( pH->count==0 ); - fts2HashClear(pH); - } -} - -/* Attempt to locate an element of the hash table pH with a key -** that matches pKey,nKey. Return the data for this element if it is -** found, or NULL if there is no match. -*/ -void *sqlite3Fts2HashFind(const fts2Hash *pH, const void *pKey, int nKey){ - int h; /* A hash on key */ - fts2HashElem *elem; /* The element that matches key */ - int (*xHash)(const void*,int); /* The hash function */ - - if( pH==0 || pH->ht==0 ) return 0; - xHash = hashFunction(pH->keyClass); - assert( xHash!=0 ); - h = (*xHash)(pKey,nKey); - assert( (pH->htsize & (pH->htsize-1))==0 ); - elem = findElementGivenHash(pH,pKey,nKey, h & (pH->htsize-1)); - return elem ? elem->data : 0; -} - -/* Insert an element into the hash table pH. The key is pKey,nKey -** and the data is "data". -** -** If no element exists with a matching key, then a new -** element is created. A copy of the key is made if the copyKey -** flag is set. NULL is returned. -** -** If another element already exists with the same key, then the -** new data replaces the old data and the old data is returned. -** The key is not copied in this instance. If a malloc fails, then -** the new data is returned and the hash table is unchanged. -** -** If the "data" parameter to this function is NULL, then the -** element corresponding to "key" is removed from the hash table. -*/ -void *sqlite3Fts2HashInsert( - fts2Hash *pH, /* The hash table to insert into */ - const void *pKey, /* The key */ - int nKey, /* Number of bytes in the key */ - void *data /* The data */ -){ - int hraw; /* Raw hash value of the key */ - int h; /* the hash of the key modulo hash table size */ - fts2HashElem *elem; /* Used to loop thru the element list */ - fts2HashElem *new_elem; /* New element added to the pH */ - int (*xHash)(const void*,int); /* The hash function */ - - assert( pH!=0 ); - xHash = hashFunction(pH->keyClass); - assert( xHash!=0 ); - hraw = (*xHash)(pKey, nKey); - assert( (pH->htsize & (pH->htsize-1))==0 ); - h = hraw & (pH->htsize-1); - elem = findElementGivenHash(pH,pKey,nKey,h); - if( elem ){ - void *old_data = elem->data; - if( data==0 ){ - removeElementGivenHash(pH,elem,h); - }else{ - elem->data = data; - } - return old_data; - } - if( data==0 ) return 0; - new_elem = (fts2HashElem*)fts2HashMalloc( sizeof(fts2HashElem) ); - if( new_elem==0 ) return data; - if( pH->copyKey && pKey!=0 ){ - new_elem->pKey = fts2HashMalloc( nKey ); - if( new_elem->pKey==0 ){ - fts2HashFree(new_elem); - return data; - } - memcpy((void*)new_elem->pKey, pKey, nKey); - }else{ - new_elem->pKey = (void*)pKey; - } - new_elem->nKey = nKey; - pH->count++; - if( pH->htsize==0 ){ - rehash(pH,8); - if( pH->htsize==0 ){ - pH->count = 0; - fts2HashFree(new_elem); - return data; - } - } - if( pH->count > pH->htsize ){ - rehash(pH,pH->htsize*2); - } - assert( pH->htsize>0 ); - assert( (pH->htsize & (pH->htsize-1))==0 ); - h = hraw & (pH->htsize-1); - insertElement(pH, &pH->ht[h], new_elem); - new_elem->data = data; - return 0; -} - -#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS2) */ diff --git a/ext/fts2/fts2_hash.h b/ext/fts2/fts2_hash.h deleted file mode 100644 index 02936f18bb..0000000000 --- a/ext/fts2/fts2_hash.h +++ /dev/null @@ -1,110 +0,0 @@ -/* -** 2001 September 22 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This is the header file for the generic hash-table implementation -** used in SQLite. We've modified it slightly to serve as a standalone -** hash table implementation for the full-text indexing module. -** -*/ -#ifndef _FTS2_HASH_H_ -#define _FTS2_HASH_H_ - -/* Forward declarations of structures. */ -typedef struct fts2Hash fts2Hash; -typedef struct fts2HashElem fts2HashElem; - -/* A complete hash table is an instance of the following structure. -** The internals of this structure are intended to be opaque -- client -** code should not attempt to access or modify the fields of this structure -** directly. Change this structure only by using the routines below. -** However, many of the "procedures" and "functions" for modifying and -** accessing this structure are really macros, so we can't really make -** this structure opaque. -*/ -struct fts2Hash { - char keyClass; /* HASH_INT, _POINTER, _STRING, _BINARY */ - char copyKey; /* True if copy of key made on insert */ - int count; /* Number of entries in this table */ - fts2HashElem *first; /* The first element of the array */ - int htsize; /* Number of buckets in the hash table */ - struct _fts2ht { /* the hash table */ - int count; /* Number of entries with this hash */ - fts2HashElem *chain; /* Pointer to first entry with this hash */ - } *ht; -}; - -/* Each element in the hash table is an instance of the following -** structure. All elements are stored on a single doubly-linked list. -** -** Again, this structure is intended to be opaque, but it can't really -** be opaque because it is used by macros. -*/ -struct fts2HashElem { - fts2HashElem *next, *prev; /* Next and previous elements in the table */ - void *data; /* Data associated with this element */ - void *pKey; int nKey; /* Key associated with this element */ -}; - -/* -** There are 2 different modes of operation for a hash table: -** -** FTS2_HASH_STRING pKey points to a string that is nKey bytes long -** (including the null-terminator, if any). Case -** is respected in comparisons. -** -** FTS2_HASH_BINARY pKey points to binary data nKey bytes long. -** memcmp() is used to compare keys. -** -** A copy of the key is made if the copyKey parameter to fts2HashInit is 1. -*/ -#define FTS2_HASH_STRING 1 -#define FTS2_HASH_BINARY 2 - -/* -** Access routines. To delete, insert a NULL pointer. -*/ -void sqlite3Fts2HashInit(fts2Hash*, int keytype, int copyKey); -void *sqlite3Fts2HashInsert(fts2Hash*, const void *pKey, int nKey, void *pData); -void *sqlite3Fts2HashFind(const fts2Hash*, const void *pKey, int nKey); -void sqlite3Fts2HashClear(fts2Hash*); - -/* -** Shorthand for the functions above -*/ -#define fts2HashInit sqlite3Fts2HashInit -#define fts2HashInsert sqlite3Fts2HashInsert -#define fts2HashFind sqlite3Fts2HashFind -#define fts2HashClear sqlite3Fts2HashClear - -/* -** Macros for looping over all elements of a hash table. The idiom is -** like this: -** -** fts2Hash h; -** fts2HashElem *p; -** ... -** for(p=fts2HashFirst(&h); p; p=fts2HashNext(p)){ -** SomeStructure *pData = fts2HashData(p); -** // do something with pData -** } -*/ -#define fts2HashFirst(H) ((H)->first) -#define fts2HashNext(E) ((E)->next) -#define fts2HashData(E) ((E)->data) -#define fts2HashKey(E) ((E)->pKey) -#define fts2HashKeysize(E) ((E)->nKey) - -/* -** Number of entries in a hash table -*/ -#define fts2HashCount(H) ((H)->count) - -#endif /* _FTS2_HASH_H_ */ diff --git a/ext/fts2/fts2_icu.c b/ext/fts2/fts2_icu.c deleted file mode 100644 index 2670301f51..0000000000 --- a/ext/fts2/fts2_icu.c +++ /dev/null @@ -1,260 +0,0 @@ -/* -** 2007 June 22 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** This file implements a tokenizer for fts2 based on the ICU library. -** -** $Id: fts2_icu.c,v 1.3 2008/12/18 05:30:26 danielk1977 Exp $ -*/ - -#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS2) -#ifdef SQLITE_ENABLE_ICU - -#include -#include -#include "fts2_tokenizer.h" - -#include -#include -#include -#include - -typedef struct IcuTokenizer IcuTokenizer; -typedef struct IcuCursor IcuCursor; - -struct IcuTokenizer { - sqlite3_tokenizer base; - char *zLocale; -}; - -struct IcuCursor { - sqlite3_tokenizer_cursor base; - - UBreakIterator *pIter; /* ICU break-iterator object */ - int nChar; /* Number of UChar elements in pInput */ - UChar *aChar; /* Copy of input using utf-16 encoding */ - int *aOffset; /* Offsets of each character in utf-8 input */ - - int nBuffer; - char *zBuffer; - - int iToken; -}; - -/* -** Create a new tokenizer instance. -*/ -static int icuCreate( - int argc, /* Number of entries in argv[] */ - const char * const *argv, /* Tokenizer creation arguments */ - sqlite3_tokenizer **ppTokenizer /* OUT: Created tokenizer */ -){ - IcuTokenizer *p; - int n = 0; - - if( argc>0 ){ - n = strlen(argv[0])+1; - } - p = (IcuTokenizer *)sqlite3_malloc(sizeof(IcuTokenizer)+n); - if( !p ){ - return SQLITE_NOMEM; - } - memset(p, 0, sizeof(IcuTokenizer)); - - if( n ){ - p->zLocale = (char *)&p[1]; - memcpy(p->zLocale, argv[0], n); - } - - *ppTokenizer = (sqlite3_tokenizer *)p; - - return SQLITE_OK; -} - -/* -** Destroy a tokenizer -*/ -static int icuDestroy(sqlite3_tokenizer *pTokenizer){ - IcuTokenizer *p = (IcuTokenizer *)pTokenizer; - sqlite3_free(p); - return SQLITE_OK; -} - -/* -** Prepare to begin tokenizing a particular string. The input -** string to be tokenized is pInput[0..nBytes-1]. A cursor -** used to incrementally tokenize this string is returned in -** *ppCursor. -*/ -static int icuOpen( - sqlite3_tokenizer *pTokenizer, /* The tokenizer */ - const char *zInput, /* Input string */ - int nInput, /* Length of zInput in bytes */ - sqlite3_tokenizer_cursor **ppCursor /* OUT: Tokenization cursor */ -){ - IcuTokenizer *p = (IcuTokenizer *)pTokenizer; - IcuCursor *pCsr; - - const int32_t opt = U_FOLD_CASE_DEFAULT; - UErrorCode status = U_ZERO_ERROR; - int nChar; - - UChar32 c; - int iInput = 0; - int iOut = 0; - - *ppCursor = 0; - - if( nInput<0 ){ - nInput = strlen(zInput); - } - nChar = nInput+1; - pCsr = (IcuCursor *)sqlite3_malloc( - sizeof(IcuCursor) + /* IcuCursor */ - ((nChar+3)&~3) * sizeof(UChar) + /* IcuCursor.aChar[] */ - (nChar+1) * sizeof(int) /* IcuCursor.aOffset[] */ - ); - if( !pCsr ){ - return SQLITE_NOMEM; - } - memset(pCsr, 0, sizeof(IcuCursor)); - pCsr->aChar = (UChar *)&pCsr[1]; - pCsr->aOffset = (int *)&pCsr->aChar[(nChar+3)&~3]; - - pCsr->aOffset[iOut] = iInput; - U8_NEXT(zInput, iInput, nInput, c); - while( c>0 ){ - int isError = 0; - c = u_foldCase(c, opt); - U16_APPEND(pCsr->aChar, iOut, nChar, c, isError); - if( isError ){ - sqlite3_free(pCsr); - return SQLITE_ERROR; - } - pCsr->aOffset[iOut] = iInput; - - if( iInputpIter = ubrk_open(UBRK_WORD, p->zLocale, pCsr->aChar, iOut, &status); - if( !U_SUCCESS(status) ){ - sqlite3_free(pCsr); - return SQLITE_ERROR; - } - pCsr->nChar = iOut; - - ubrk_first(pCsr->pIter); - *ppCursor = (sqlite3_tokenizer_cursor *)pCsr; - return SQLITE_OK; -} - -/* -** Close a tokenization cursor previously opened by a call to icuOpen(). -*/ -static int icuClose(sqlite3_tokenizer_cursor *pCursor){ - IcuCursor *pCsr = (IcuCursor *)pCursor; - ubrk_close(pCsr->pIter); - sqlite3_free(pCsr->zBuffer); - sqlite3_free(pCsr); - return SQLITE_OK; -} - -/* -** Extract the next token from a tokenization cursor. -*/ -static int icuNext( - sqlite3_tokenizer_cursor *pCursor, /* Cursor returned by simpleOpen */ - const char **ppToken, /* OUT: *ppToken is the token text */ - int *pnBytes, /* OUT: Number of bytes in token */ - int *piStartOffset, /* OUT: Starting offset of token */ - int *piEndOffset, /* OUT: Ending offset of token */ - int *piPosition /* OUT: Position integer of token */ -){ - IcuCursor *pCsr = (IcuCursor *)pCursor; - - int iStart = 0; - int iEnd = 0; - int nByte = 0; - - while( iStart==iEnd ){ - UChar32 c; - - iStart = ubrk_current(pCsr->pIter); - iEnd = ubrk_next(pCsr->pIter); - if( iEnd==UBRK_DONE ){ - return SQLITE_DONE; - } - - while( iStartaChar, iWhite, pCsr->nChar, c); - if( u_isspace(c) ){ - iStart = iWhite; - }else{ - break; - } - } - assert(iStart<=iEnd); - } - - do { - UErrorCode status = U_ZERO_ERROR; - if( nByte ){ - char *zNew = sqlite3_realloc(pCsr->zBuffer, nByte); - if( !zNew ){ - return SQLITE_NOMEM; - } - pCsr->zBuffer = zNew; - pCsr->nBuffer = nByte; - } - - u_strToUTF8( - pCsr->zBuffer, pCsr->nBuffer, &nByte, /* Output vars */ - &pCsr->aChar[iStart], iEnd-iStart, /* Input vars */ - &status /* Output success/failure */ - ); - } while( nByte>pCsr->nBuffer ); - - *ppToken = pCsr->zBuffer; - *pnBytes = nByte; - *piStartOffset = pCsr->aOffset[iStart]; - *piEndOffset = pCsr->aOffset[iEnd]; - *piPosition = pCsr->iToken++; - - return SQLITE_OK; -} - -/* -** The set of routines that implement the simple tokenizer -*/ -static const sqlite3_tokenizer_module icuTokenizerModule = { - 0, /* iVersion */ - icuCreate, /* xCreate */ - icuDestroy, /* xCreate */ - icuOpen, /* xOpen */ - icuClose, /* xClose */ - icuNext, /* xNext */ -}; - -/* -** Set *ppModule to point at the implementation of the ICU tokenizer. -*/ -void sqlite3Fts2IcuTokenizerModule( - sqlite3_tokenizer_module const**ppModule -){ - *ppModule = &icuTokenizerModule; -} - -#endif /* defined(SQLITE_ENABLE_ICU) */ -#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS2) */ diff --git a/ext/fts2/fts2_porter.c b/ext/fts2/fts2_porter.c deleted file mode 100644 index 881baf7100..0000000000 --- a/ext/fts2/fts2_porter.c +++ /dev/null @@ -1,644 +0,0 @@ -/* -** 2006 September 30 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** Implementation of the full-text-search tokenizer that implements -** a Porter stemmer. -*/ - -/* -** The code in this file is only compiled if: -** -** * The FTS2 module is being built as an extension -** (in which case SQLITE_CORE is not defined), or -** -** * The FTS2 module is being built into the core of -** SQLite (in which case SQLITE_ENABLE_FTS2 is defined). -*/ -#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS2) - - -#include -#include -#include -#include - -#include "sqlite3.h" -#include "sqlite3ext.h" -SQLITE_EXTENSION_INIT3 -#include "fts2_tokenizer.h" - -/* -** Class derived from sqlite3_tokenizer -*/ -typedef struct porter_tokenizer { - sqlite3_tokenizer base; /* Base class */ -} porter_tokenizer; - -/* -** Class derived from sqlit3_tokenizer_cursor -*/ -typedef struct porter_tokenizer_cursor { - sqlite3_tokenizer_cursor base; - const char *zInput; /* input we are tokenizing */ - int nInput; /* size of the input */ - int iOffset; /* current position in zInput */ - int iToken; /* index of next token to be returned */ - char *zToken; /* storage for current token */ - int nAllocated; /* space allocated to zToken buffer */ -} porter_tokenizer_cursor; - - -/* Forward declaration */ -static const sqlite3_tokenizer_module porterTokenizerModule; - - -/* -** Create a new tokenizer instance. -*/ -static int porterCreate( - int argc, const char * const *argv, - sqlite3_tokenizer **ppTokenizer -){ - porter_tokenizer *t; - t = (porter_tokenizer *) sqlite3_malloc(sizeof(*t)); - if( t==NULL ) return SQLITE_NOMEM; - memset(t, 0, sizeof(*t)); - *ppTokenizer = &t->base; - return SQLITE_OK; -} - -/* -** Destroy a tokenizer -*/ -static int porterDestroy(sqlite3_tokenizer *pTokenizer){ - sqlite3_free(pTokenizer); - return SQLITE_OK; -} - -/* -** Prepare to begin tokenizing a particular string. The input -** string to be tokenized is zInput[0..nInput-1]. A cursor -** used to incrementally tokenize this string is returned in -** *ppCursor. -*/ -static int porterOpen( - sqlite3_tokenizer *pTokenizer, /* The tokenizer */ - const char *zInput, int nInput, /* String to be tokenized */ - sqlite3_tokenizer_cursor **ppCursor /* OUT: Tokenization cursor */ -){ - porter_tokenizer_cursor *c; - - c = (porter_tokenizer_cursor *) sqlite3_malloc(sizeof(*c)); - if( c==NULL ) return SQLITE_NOMEM; - - c->zInput = zInput; - if( zInput==0 ){ - c->nInput = 0; - }else if( nInput<0 ){ - c->nInput = (int)strlen(zInput); - }else{ - c->nInput = nInput; - } - c->iOffset = 0; /* start tokenizing at the beginning */ - c->iToken = 0; - c->zToken = NULL; /* no space allocated, yet. */ - c->nAllocated = 0; - - *ppCursor = &c->base; - return SQLITE_OK; -} - -/* -** Close a tokenization cursor previously opened by a call to -** porterOpen() above. -*/ -static int porterClose(sqlite3_tokenizer_cursor *pCursor){ - porter_tokenizer_cursor *c = (porter_tokenizer_cursor *) pCursor; - sqlite3_free(c->zToken); - sqlite3_free(c); - return SQLITE_OK; -} -/* -** Vowel or consonant -*/ -static const char cType[] = { - 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, - 1, 1, 1, 2, 1 -}; - -/* -** isConsonant() and isVowel() determine if their first character in -** the string they point to is a consonant or a vowel, according -** to Porter ruls. -** -** A consonate is any letter other than 'a', 'e', 'i', 'o', or 'u'. -** 'Y' is a consonant unless it follows another consonant, -** in which case it is a vowel. -** -** In these routine, the letters are in reverse order. So the 'y' rule -** is that 'y' is a consonant unless it is followed by another -** consonent. -*/ -static int isVowel(const char*); -static int isConsonant(const char *z){ - int j; - char x = *z; - if( x==0 ) return 0; - assert( x>='a' && x<='z' ); - j = cType[x-'a']; - if( j<2 ) return j; - return z[1]==0 || isVowel(z + 1); -} -static int isVowel(const char *z){ - int j; - char x = *z; - if( x==0 ) return 0; - assert( x>='a' && x<='z' ); - j = cType[x-'a']; - if( j<2 ) return 1-j; - return isConsonant(z + 1); -} - -/* -** Let any sequence of one or more vowels be represented by V and let -** C be sequence of one or more consonants. Then every word can be -** represented as: -** -** [C] (VC){m} [V] -** -** In prose: A word is an optional consonant followed by zero or -** vowel-consonant pairs followed by an optional vowel. "m" is the -** number of vowel consonant pairs. This routine computes the value -** of m for the first i bytes of a word. -** -** Return true if the m-value for z is 1 or more. In other words, -** return true if z contains at least one vowel that is followed -** by a consonant. -** -** In this routine z[] is in reverse order. So we are really looking -** for an instance of of a consonant followed by a vowel. -*/ -static int m_gt_0(const char *z){ - while( isVowel(z) ){ z++; } - if( *z==0 ) return 0; - while( isConsonant(z) ){ z++; } - return *z!=0; -} - -/* Like mgt0 above except we are looking for a value of m which is -** exactly 1 -*/ -static int m_eq_1(const char *z){ - while( isVowel(z) ){ z++; } - if( *z==0 ) return 0; - while( isConsonant(z) ){ z++; } - if( *z==0 ) return 0; - while( isVowel(z) ){ z++; } - if( *z==0 ) return 1; - while( isConsonant(z) ){ z++; } - return *z==0; -} - -/* Like mgt0 above except we are looking for a value of m>1 instead -** or m>0 -*/ -static int m_gt_1(const char *z){ - while( isVowel(z) ){ z++; } - if( *z==0 ) return 0; - while( isConsonant(z) ){ z++; } - if( *z==0 ) return 0; - while( isVowel(z) ){ z++; } - if( *z==0 ) return 0; - while( isConsonant(z) ){ z++; } - return *z!=0; -} - -/* -** Return TRUE if there is a vowel anywhere within z[0..n-1] -*/ -static int hasVowel(const char *z){ - while( isConsonant(z) ){ z++; } - return *z!=0; -} - -/* -** Return TRUE if the word ends in a double consonant. -** -** The text is reversed here. So we are really looking at -** the first two characters of z[]. -*/ -static int doubleConsonant(const char *z){ - return isConsonant(z) && z[0]==z[1] && isConsonant(z+1); -} - -/* -** Return TRUE if the word ends with three letters which -** are consonant-vowel-consonent and where the final consonant -** is not 'w', 'x', or 'y'. -** -** The word is reversed here. So we are really checking the -** first three letters and the first one cannot be in [wxy]. -*/ -static int star_oh(const char *z){ - return - z[0]!=0 && isConsonant(z) && - z[0]!='w' && z[0]!='x' && z[0]!='y' && - z[1]!=0 && isVowel(z+1) && - z[2]!=0 && isConsonant(z+2); -} - -/* -** If the word ends with zFrom and xCond() is true for the stem -** of the word that preceeds the zFrom ending, then change the -** ending to zTo. -** -** The input word *pz and zFrom are both in reverse order. zTo -** is in normal order. -** -** Return TRUE if zFrom matches. Return FALSE if zFrom does not -** match. Not that TRUE is returned even if xCond() fails and -** no substitution occurs. -*/ -static int stem( - char **pz, /* The word being stemmed (Reversed) */ - const char *zFrom, /* If the ending matches this... (Reversed) */ - const char *zTo, /* ... change the ending to this (not reversed) */ - int (*xCond)(const char*) /* Condition that must be true */ -){ - char *z = *pz; - while( *zFrom && *zFrom==*z ){ z++; zFrom++; } - if( *zFrom!=0 ) return 0; - if( xCond && !xCond(z) ) return 1; - while( *zTo ){ - *(--z) = *(zTo++); - } - *pz = z; - return 1; -} - -/* -** This is the fallback stemmer used when the porter stemmer is -** inappropriate. The input word is copied into the output with -** US-ASCII case folding. If the input word is too long (more -** than 20 bytes if it contains no digits or more than 6 bytes if -** it contains digits) then word is truncated to 20 or 6 bytes -** by taking 10 or 3 bytes from the beginning and end. -*/ -static void copy_stemmer(const char *zIn, int nIn, char *zOut, int *pnOut){ - int i, mx, j; - int hasDigit = 0; - for(i=0; i='A' && c<='Z' ){ - zOut[i] = c - 'A' + 'a'; - }else{ - if( c>='0' && c<='9' ) hasDigit = 1; - zOut[i] = c; - } - } - mx = hasDigit ? 3 : 10; - if( nIn>mx*2 ){ - for(j=mx, i=nIn-mx; i=sizeof(zReverse)-7 ){ - /* The word is too big or too small for the porter stemmer. - ** Fallback to the copy stemmer */ - copy_stemmer(zIn, nIn, zOut, pnOut); - return; - } - for(i=0, j=sizeof(zReverse)-6; i='A' && c<='Z' ){ - zReverse[j] = c + 'a' - 'A'; - }else if( c>='a' && c<='z' ){ - zReverse[j] = c; - }else{ - /* The use of a character not in [a-zA-Z] means that we fallback - ** to the copy stemmer */ - copy_stemmer(zIn, nIn, zOut, pnOut); - return; - } - } - memset(&zReverse[sizeof(zReverse)-5], 0, 5); - z = &zReverse[j+1]; - - - /* Step 1a */ - if( z[0]=='s' ){ - if( - !stem(&z, "sess", "ss", 0) && - !stem(&z, "sei", "i", 0) && - !stem(&z, "ss", "ss", 0) - ){ - z++; - } - } - - /* Step 1b */ - z2 = z; - if( stem(&z, "dee", "ee", m_gt_0) ){ - /* Do nothing. The work was all in the test */ - }else if( - (stem(&z, "gni", "", hasVowel) || stem(&z, "de", "", hasVowel)) - && z!=z2 - ){ - if( stem(&z, "ta", "ate", 0) || - stem(&z, "lb", "ble", 0) || - stem(&z, "zi", "ize", 0) ){ - /* Do nothing. The work was all in the test */ - }else if( doubleConsonant(z) && (*z!='l' && *z!='s' && *z!='z') ){ - z++; - }else if( m_eq_1(z) && star_oh(z) ){ - *(--z) = 'e'; - } - } - - /* Step 1c */ - if( z[0]=='y' && hasVowel(z+1) ){ - z[0] = 'i'; - } - - /* Step 2 */ - switch( z[1] ){ - case 'a': - stem(&z, "lanoita", "ate", m_gt_0) || - stem(&z, "lanoit", "tion", m_gt_0); - break; - case 'c': - stem(&z, "icne", "ence", m_gt_0) || - stem(&z, "icna", "ance", m_gt_0); - break; - case 'e': - stem(&z, "rezi", "ize", m_gt_0); - break; - case 'g': - stem(&z, "igol", "log", m_gt_0); - break; - case 'l': - stem(&z, "ilb", "ble", m_gt_0) || - stem(&z, "illa", "al", m_gt_0) || - stem(&z, "iltne", "ent", m_gt_0) || - stem(&z, "ile", "e", m_gt_0) || - stem(&z, "ilsuo", "ous", m_gt_0); - break; - case 'o': - stem(&z, "noitazi", "ize", m_gt_0) || - stem(&z, "noita", "ate", m_gt_0) || - stem(&z, "rota", "ate", m_gt_0); - break; - case 's': - stem(&z, "msila", "al", m_gt_0) || - stem(&z, "ssenevi", "ive", m_gt_0) || - stem(&z, "ssenluf", "ful", m_gt_0) || - stem(&z, "ssensuo", "ous", m_gt_0); - break; - case 't': - stem(&z, "itila", "al", m_gt_0) || - stem(&z, "itivi", "ive", m_gt_0) || - stem(&z, "itilib", "ble", m_gt_0); - break; - } - - /* Step 3 */ - switch( z[0] ){ - case 'e': - stem(&z, "etaci", "ic", m_gt_0) || - stem(&z, "evita", "", m_gt_0) || - stem(&z, "ezila", "al", m_gt_0); - break; - case 'i': - stem(&z, "itici", "ic", m_gt_0); - break; - case 'l': - stem(&z, "laci", "ic", m_gt_0) || - stem(&z, "luf", "", m_gt_0); - break; - case 's': - stem(&z, "ssen", "", m_gt_0); - break; - } - - /* Step 4 */ - switch( z[1] ){ - case 'a': - if( z[0]=='l' && m_gt_1(z+2) ){ - z += 2; - } - break; - case 'c': - if( z[0]=='e' && z[2]=='n' && (z[3]=='a' || z[3]=='e') && m_gt_1(z+4) ){ - z += 4; - } - break; - case 'e': - if( z[0]=='r' && m_gt_1(z+2) ){ - z += 2; - } - break; - case 'i': - if( z[0]=='c' && m_gt_1(z+2) ){ - z += 2; - } - break; - case 'l': - if( z[0]=='e' && z[2]=='b' && (z[3]=='a' || z[3]=='i') && m_gt_1(z+4) ){ - z += 4; - } - break; - case 'n': - if( z[0]=='t' ){ - if( z[2]=='a' ){ - if( m_gt_1(z+3) ){ - z += 3; - } - }else if( z[2]=='e' ){ - stem(&z, "tneme", "", m_gt_1) || - stem(&z, "tnem", "", m_gt_1) || - stem(&z, "tne", "", m_gt_1); - } - } - break; - case 'o': - if( z[0]=='u' ){ - if( m_gt_1(z+2) ){ - z += 2; - } - }else if( z[3]=='s' || z[3]=='t' ){ - stem(&z, "noi", "", m_gt_1); - } - break; - case 's': - if( z[0]=='m' && z[2]=='i' && m_gt_1(z+3) ){ - z += 3; - } - break; - case 't': - stem(&z, "eta", "", m_gt_1) || - stem(&z, "iti", "", m_gt_1); - break; - case 'u': - if( z[0]=='s' && z[2]=='o' && m_gt_1(z+3) ){ - z += 3; - } - break; - case 'v': - case 'z': - if( z[0]=='e' && z[2]=='i' && m_gt_1(z+3) ){ - z += 3; - } - break; - } - - /* Step 5a */ - if( z[0]=='e' ){ - if( m_gt_1(z+1) ){ - z++; - }else if( m_eq_1(z+1) && !star_oh(z+1) ){ - z++; - } - } - - /* Step 5b */ - if( m_gt_1(z) && z[0]=='l' && z[1]=='l' ){ - z++; - } - - /* z[] is now the stemmed word in reverse order. Flip it back - ** around into forward order and return. - */ - *pnOut = i = strlen(z); - zOut[i] = 0; - while( *z ){ - zOut[--i] = *(z++); - } -} - -/* -** Characters that can be part of a token. We assume any character -** whose value is greater than 0x80 (any UTF character) can be -** part of a token. In other words, delimiters all must have -** values of 0x7f or lower. -*/ -static const char porterIdChar[] = { -/* x0 x1 x2 x3 x4 x5 x6 x7 x8 x9 xA xB xC xD xE xF */ - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, /* 3x */ - 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 4x */ - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, /* 5x */ - 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, /* 6x */ - 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, /* 7x */ -}; -#define isDelim(C) (((ch=C)&0x80)==0 && (ch<0x30 || !porterIdChar[ch-0x30])) - -/* -** Extract the next token from a tokenization cursor. The cursor must -** have been opened by a prior call to porterOpen(). -*/ -static int porterNext( - sqlite3_tokenizer_cursor *pCursor, /* Cursor returned by porterOpen */ - const char **pzToken, /* OUT: *pzToken is the token text */ - int *pnBytes, /* OUT: Number of bytes in token */ - int *piStartOffset, /* OUT: Starting offset of token */ - int *piEndOffset, /* OUT: Ending offset of token */ - int *piPosition /* OUT: Position integer of token */ -){ - porter_tokenizer_cursor *c = (porter_tokenizer_cursor *) pCursor; - const char *z = c->zInput; - - while( c->iOffsetnInput ){ - int iStartOffset, ch; - - /* Scan past delimiter characters */ - while( c->iOffsetnInput && isDelim(z[c->iOffset]) ){ - c->iOffset++; - } - - /* Count non-delimiter characters. */ - iStartOffset = c->iOffset; - while( c->iOffsetnInput && !isDelim(z[c->iOffset]) ){ - c->iOffset++; - } - - if( c->iOffset>iStartOffset ){ - int n = c->iOffset-iStartOffset; - if( n>c->nAllocated ){ - c->nAllocated = n+20; - c->zToken = sqlite3_realloc(c->zToken, c->nAllocated); - if( c->zToken==NULL ) return SQLITE_NOMEM; - } - porter_stemmer(&z[iStartOffset], n, c->zToken, pnBytes); - *pzToken = c->zToken; - *piStartOffset = iStartOffset; - *piEndOffset = c->iOffset; - *piPosition = c->iToken++; - return SQLITE_OK; - } - } - return SQLITE_DONE; -} - -/* -** The set of routines that implement the porter-stemmer tokenizer -*/ -static const sqlite3_tokenizer_module porterTokenizerModule = { - 0, - porterCreate, - porterDestroy, - porterOpen, - porterClose, - porterNext, -}; - -/* -** Allocate a new porter tokenizer. Return a pointer to the new -** tokenizer in *ppModule -*/ -void sqlite3Fts2PorterTokenizerModule( - sqlite3_tokenizer_module const**ppModule -){ - *ppModule = &porterTokenizerModule; -} - -#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS2) */ diff --git a/ext/fts2/fts2_tokenizer.c b/ext/fts2/fts2_tokenizer.c deleted file mode 100644 index dda33a72d2..0000000000 --- a/ext/fts2/fts2_tokenizer.c +++ /dev/null @@ -1,375 +0,0 @@ -/* -** 2007 June 22 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -****************************************************************************** -** -** This is part of an SQLite module implementing full-text search. -** This particular file implements the generic tokenizer interface. -*/ - -/* -** The code in this file is only compiled if: -** -** * The FTS2 module is being built as an extension -** (in which case SQLITE_CORE is not defined), or -** -** * The FTS2 module is being built into the core of -** SQLite (in which case SQLITE_ENABLE_FTS2 is defined). -*/ -#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS2) - - -#include "sqlite3.h" -#include "sqlite3ext.h" -SQLITE_EXTENSION_INIT3 - -#include "fts2_hash.h" -#include "fts2_tokenizer.h" -#include - -/* -** Implementation of the SQL scalar function for accessing the underlying -** hash table. This function may be called as follows: -** -** SELECT (); -** SELECT (, ); -** -** where is the name passed as the second argument -** to the sqlite3Fts2InitHashTable() function (e.g. 'fts2_tokenizer'). -** -** If the argument is specified, it must be a blob value -** containing a pointer to be stored as the hash data corresponding -** to the string . If is not specified, then -** the string must already exist in the has table. Otherwise, -** an error is returned. -** -** Whether or not the argument is specified, the value returned -** is a blob containing the pointer stored as the hash data corresponding -** to string (after the hash-table is updated, if applicable). -*/ -static void scalarFunc( - sqlite3_context *context, - int argc, - sqlite3_value **argv -){ - fts2Hash *pHash; - void *pPtr = 0; - const unsigned char *zName; - int nName; - - assert( argc==1 || argc==2 ); - - pHash = (fts2Hash *)sqlite3_user_data(context); - - zName = sqlite3_value_text(argv[0]); - nName = sqlite3_value_bytes(argv[0])+1; - - if( argc==2 ){ - void *pOld; - int n = sqlite3_value_bytes(argv[1]); - if( n!=sizeof(pPtr) ){ - sqlite3_result_error(context, "argument type mismatch", -1); - return; - } - pPtr = *(void **)sqlite3_value_blob(argv[1]); - pOld = sqlite3Fts2HashInsert(pHash, (void *)zName, nName, pPtr); - if( pOld==pPtr ){ - sqlite3_result_error(context, "out of memory", -1); - return; - } - }else{ - pPtr = sqlite3Fts2HashFind(pHash, zName, nName); - if( !pPtr ){ - char *zErr = sqlite3_mprintf("unknown tokenizer: %s", zName); - sqlite3_result_error(context, zErr, -1); - sqlite3_free(zErr); - return; - } - } - - sqlite3_result_blob(context, (void *)&pPtr, sizeof(pPtr), SQLITE_TRANSIENT); -} - -#ifdef SQLITE_TEST - -#if defined(INCLUDE_SQLITE_TCL_H) -# include "sqlite_tcl.h" -#else -# include "tcl.h" -#endif -#include - -/* -** Implementation of a special SQL scalar function for testing tokenizers -** designed to be used in concert with the Tcl testing framework. This -** function must be called with two arguments: -** -** SELECT (, ); -** SELECT (, ); -** -** where is the name passed as the second argument -** to the sqlite3Fts2InitHashTable() function (e.g. 'fts2_tokenizer') -** concatenated with the string '_test' (e.g. 'fts2_tokenizer_test'). -** -** The return value is a string that may be interpreted as a Tcl -** list. For each token in the , three elements are -** added to the returned list. The first is the token position, the -** second is the token text (folded, stemmed, etc.) and the third is the -** substring of associated with the token. For example, -** using the built-in "simple" tokenizer: -** -** SELECT fts_tokenizer_test('simple', 'I don't see how'); -** -** will return the string: -** -** "{0 i I 1 dont don't 2 see see 3 how how}" -** -*/ -static void testFunc( - sqlite3_context *context, - int argc, - sqlite3_value **argv -){ - fts2Hash *pHash; - sqlite3_tokenizer_module *p; - sqlite3_tokenizer *pTokenizer = 0; - sqlite3_tokenizer_cursor *pCsr = 0; - - const char *zErr = 0; - - const char *zName; - int nName; - const char *zInput; - int nInput; - - const char *zArg = 0; - - const char *zToken; - int nToken; - int iStart; - int iEnd; - int iPos; - - Tcl_Obj *pRet; - - assert( argc==2 || argc==3 ); - - nName = sqlite3_value_bytes(argv[0]); - zName = (const char *)sqlite3_value_text(argv[0]); - nInput = sqlite3_value_bytes(argv[argc-1]); - zInput = (const char *)sqlite3_value_text(argv[argc-1]); - - if( argc==3 ){ - zArg = (const char *)sqlite3_value_text(argv[1]); - } - - pHash = (fts2Hash *)sqlite3_user_data(context); - p = (sqlite3_tokenizer_module *)sqlite3Fts2HashFind(pHash, zName, nName+1); - - if( !p ){ - char *zErr = sqlite3_mprintf("unknown tokenizer: %s", zName); - sqlite3_result_error(context, zErr, -1); - sqlite3_free(zErr); - return; - } - - pRet = Tcl_NewObj(); - Tcl_IncrRefCount(pRet); - - if( SQLITE_OK!=p->xCreate(zArg ? 1 : 0, &zArg, &pTokenizer) ){ - zErr = "error in xCreate()"; - goto finish; - } - pTokenizer->pModule = p; - if( SQLITE_OK!=p->xOpen(pTokenizer, zInput, nInput, &pCsr) ){ - zErr = "error in xOpen()"; - goto finish; - } - pCsr->pTokenizer = pTokenizer; - - while( SQLITE_OK==p->xNext(pCsr, &zToken, &nToken, &iStart, &iEnd, &iPos) ){ - Tcl_ListObjAppendElement(0, pRet, Tcl_NewIntObj(iPos)); - Tcl_ListObjAppendElement(0, pRet, Tcl_NewStringObj(zToken, nToken)); - zToken = &zInput[iStart]; - nToken = iEnd-iStart; - Tcl_ListObjAppendElement(0, pRet, Tcl_NewStringObj(zToken, nToken)); - } - - if( SQLITE_OK!=p->xClose(pCsr) ){ - zErr = "error in xClose()"; - goto finish; - } - if( SQLITE_OK!=p->xDestroy(pTokenizer) ){ - zErr = "error in xDestroy()"; - goto finish; - } - -finish: - if( zErr ){ - sqlite3_result_error(context, zErr, -1); - }else{ - sqlite3_result_text(context, Tcl_GetString(pRet), -1, SQLITE_TRANSIENT); - } - Tcl_DecrRefCount(pRet); -} - -static -int registerTokenizer( - sqlite3 *db, - char *zName, - const sqlite3_tokenizer_module *p -){ - int rc; - sqlite3_stmt *pStmt; - const char zSql[] = "SELECT fts2_tokenizer(?, ?)"; - - rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0); - if( rc!=SQLITE_OK ){ - return rc; - } - - sqlite3_bind_text(pStmt, 1, zName, -1, SQLITE_STATIC); - sqlite3_bind_blob(pStmt, 2, &p, sizeof(p), SQLITE_STATIC); - sqlite3_step(pStmt); - - return sqlite3_finalize(pStmt); -} - -static -int queryFts2Tokenizer( - sqlite3 *db, - char *zName, - const sqlite3_tokenizer_module **pp -){ - int rc; - sqlite3_stmt *pStmt; - const char zSql[] = "SELECT fts2_tokenizer(?)"; - - *pp = 0; - rc = sqlite3_prepare_v2(db, zSql, -1, &pStmt, 0); - if( rc!=SQLITE_OK ){ - return rc; - } - - sqlite3_bind_text(pStmt, 1, zName, -1, SQLITE_STATIC); - if( SQLITE_ROW==sqlite3_step(pStmt) ){ - if( sqlite3_column_type(pStmt, 0)==SQLITE_BLOB ){ - memcpy(pp, sqlite3_column_blob(pStmt, 0), sizeof(*pp)); - } - } - - return sqlite3_finalize(pStmt); -} - -void sqlite3Fts2SimpleTokenizerModule(sqlite3_tokenizer_module const**ppModule); - -/* -** Implementation of the scalar function fts2_tokenizer_internal_test(). -** This function is used for testing only, it is not included in the -** build unless SQLITE_TEST is defined. -** -** The purpose of this is to test that the fts2_tokenizer() function -** can be used as designed by the C-code in the queryFts2Tokenizer and -** registerTokenizer() functions above. These two functions are repeated -** in the README.tokenizer file as an example, so it is important to -** test them. -** -** To run the tests, evaluate the fts2_tokenizer_internal_test() scalar -** function with no arguments. An assert() will fail if a problem is -** detected. i.e.: -** -** SELECT fts2_tokenizer_internal_test(); -** -*/ -static void intTestFunc( - sqlite3_context *context, - int argc, - sqlite3_value **argv -){ - int rc; - const sqlite3_tokenizer_module *p1; - const sqlite3_tokenizer_module *p2; - sqlite3 *db = (sqlite3 *)sqlite3_user_data(context); - - /* Test the query function */ - sqlite3Fts2SimpleTokenizerModule(&p1); - rc = queryFts2Tokenizer(db, "simple", &p2); - assert( rc==SQLITE_OK ); - assert( p1==p2 ); - rc = queryFts2Tokenizer(db, "nosuchtokenizer", &p2); - assert( rc==SQLITE_ERROR ); - assert( p2==0 ); - assert( 0==strcmp(sqlite3_errmsg(db), "unknown tokenizer: nosuchtokenizer") ); - - /* Test the storage function */ - rc = registerTokenizer(db, "nosuchtokenizer", p1); - assert( rc==SQLITE_OK ); - rc = queryFts2Tokenizer(db, "nosuchtokenizer", &p2); - assert( rc==SQLITE_OK ); - assert( p2==p1 ); - - sqlite3_result_text(context, "ok", -1, SQLITE_STATIC); -} - -#endif - -/* -** Set up SQL objects in database db used to access the contents of -** the hash table pointed to by argument pHash. The hash table must -** been initialized to use string keys, and to take a private copy -** of the key when a value is inserted. i.e. by a call similar to: -** -** sqlite3Fts2HashInit(pHash, FTS2_HASH_STRING, 1); -** -** This function adds a scalar function (see header comment above -** scalarFunc() in this file for details) and, if ENABLE_TABLE is -** defined at compilation time, a temporary virtual table (see header -** comment above struct HashTableVtab) to the database schema. Both -** provide read/write access to the contents of *pHash. -** -** The third argument to this function, zName, is used as the name -** of both the scalar and, if created, the virtual table. -*/ -int sqlite3Fts2InitHashTable( - sqlite3 *db, - fts2Hash *pHash, - const char *zName -){ - int rc = SQLITE_OK; - void *p = (void *)pHash; - const int any = SQLITE_ANY; - char *zTest = 0; - char *zTest2 = 0; - -#ifdef SQLITE_TEST - void *pdb = (void *)db; - zTest = sqlite3_mprintf("%s_test", zName); - zTest2 = sqlite3_mprintf("%s_internal_test", zName); - if( !zTest || !zTest2 ){ - rc = SQLITE_NOMEM; - } -#endif - - if( rc!=SQLITE_OK - || (rc = sqlite3_create_function(db, zName, 1, any, p, scalarFunc, 0, 0)) - || (rc = sqlite3_create_function(db, zName, 2, any, p, scalarFunc, 0, 0)) -#ifdef SQLITE_TEST - || (rc = sqlite3_create_function(db, zTest, 2, any, p, testFunc, 0, 0)) - || (rc = sqlite3_create_function(db, zTest, 3, any, p, testFunc, 0, 0)) - || (rc = sqlite3_create_function(db, zTest2, 0, any, pdb, intTestFunc, 0, 0)) -#endif - ); - - sqlite3_free(zTest); - sqlite3_free(zTest2); - return rc; -} - -#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS2) */ diff --git a/ext/fts2/fts2_tokenizer.h b/ext/fts2/fts2_tokenizer.h deleted file mode 100644 index 8db2048d6b..0000000000 --- a/ext/fts2/fts2_tokenizer.h +++ /dev/null @@ -1,145 +0,0 @@ -/* -** 2006 July 10 -** -** The author disclaims copyright to this source code. -** -************************************************************************* -** Defines the interface to tokenizers used by fulltext-search. There -** are three basic components: -** -** sqlite3_tokenizer_module is a singleton defining the tokenizer -** interface functions. This is essentially the class structure for -** tokenizers. -** -** sqlite3_tokenizer is used to define a particular tokenizer, perhaps -** including customization information defined at creation time. -** -** sqlite3_tokenizer_cursor is generated by a tokenizer to generate -** tokens from a particular input. -*/ -#ifndef _FTS2_TOKENIZER_H_ -#define _FTS2_TOKENIZER_H_ - -/* TODO(shess) Only used for SQLITE_OK and SQLITE_DONE at this time. -** If tokenizers are to be allowed to call sqlite3_*() functions, then -** we will need a way to register the API consistently. -*/ -#include "sqlite3.h" - -/* -** Structures used by the tokenizer interface. When a new tokenizer -** implementation is registered, the caller provides a pointer to -** an sqlite3_tokenizer_module containing pointers to the callback -** functions that make up an implementation. -** -** When an fts2 table is created, it passes any arguments passed to -** the tokenizer clause of the CREATE VIRTUAL TABLE statement to the -** sqlite3_tokenizer_module.xCreate() function of the requested tokenizer -** implementation. The xCreate() function in turn returns an -** sqlite3_tokenizer structure representing the specific tokenizer to -** be used for the fts2 table (customized by the tokenizer clause arguments). -** -** To tokenize an input buffer, the sqlite3_tokenizer_module.xOpen() -** method is called. It returns an sqlite3_tokenizer_cursor object -** that may be used to tokenize a specific input buffer based on -** the tokenization rules supplied by a specific sqlite3_tokenizer -** object. -*/ -typedef struct sqlite3_tokenizer_module sqlite3_tokenizer_module; -typedef struct sqlite3_tokenizer sqlite3_tokenizer; -typedef struct sqlite3_tokenizer_cursor sqlite3_tokenizer_cursor; - -struct sqlite3_tokenizer_module { - - /* - ** Structure version. Should always be set to 0. - */ - int iVersion; - - /* - ** Create a new tokenizer. The values in the argv[] array are the - ** arguments passed to the "tokenizer" clause of the CREATE VIRTUAL - ** TABLE statement that created the fts2 table. For example, if - ** the following SQL is executed: - ** - ** CREATE .. USING fts2( ... , tokenizer arg1 arg2) - ** - ** then argc is set to 2, and the argv[] array contains pointers - ** to the strings "arg1" and "arg2". - ** - ** This method should return either SQLITE_OK (0), or an SQLite error - ** code. If SQLITE_OK is returned, then *ppTokenizer should be set - ** to point at the newly created tokenizer structure. The generic - ** sqlite3_tokenizer.pModule variable should not be initialized by - ** this callback. The caller will do so. - */ - int (*xCreate)( - int argc, /* Size of argv array */ - const char *const*argv, /* Tokenizer argument strings */ - sqlite3_tokenizer **ppTokenizer /* OUT: Created tokenizer */ - ); - - /* - ** Destroy an existing tokenizer. The fts2 module calls this method - ** exactly once for each successful call to xCreate(). - */ - int (*xDestroy)(sqlite3_tokenizer *pTokenizer); - - /* - ** Create a tokenizer cursor to tokenize an input buffer. The caller - ** is responsible for ensuring that the input buffer remains valid - ** until the cursor is closed (using the xClose() method). - */ - int (*xOpen)( - sqlite3_tokenizer *pTokenizer, /* Tokenizer object */ - const char *pInput, int nBytes, /* Input buffer */ - sqlite3_tokenizer_cursor **ppCursor /* OUT: Created tokenizer cursor */ - ); - - /* - ** Destroy an existing tokenizer cursor. The fts2 module calls this - ** method exactly once for each successful call to xOpen(). - */ - int (*xClose)(sqlite3_tokenizer_cursor *pCursor); - - /* - ** Retrieve the next token from the tokenizer cursor pCursor. This - ** method should either return SQLITE_OK and set the values of the - ** "OUT" variables identified below, or SQLITE_DONE to indicate that - ** the end of the buffer has been reached, or an SQLite error code. - ** - ** *ppToken should be set to point at a buffer containing the - ** normalized version of the token (i.e. after any case-folding and/or - ** stemming has been performed). *pnBytes should be set to the length - ** of this buffer in bytes. The input text that generated the token is - ** identified by the byte offsets returned in *piStartOffset and - ** *piEndOffset. - ** - ** The buffer *ppToken is set to point at is managed by the tokenizer - ** implementation. It is only required to be valid until the next call - ** to xNext() or xClose(). - */ - /* TODO(shess) current implementation requires pInput to be - ** nul-terminated. This should either be fixed, or pInput/nBytes - ** should be converted to zInput. - */ - int (*xNext)( - sqlite3_tokenizer_cursor *pCursor, /* Tokenizer cursor */ - const char **ppToken, int *pnBytes, /* OUT: Normalized text for token */ - int *piStartOffset, /* OUT: Byte offset of token in input buffer */ - int *piEndOffset, /* OUT: Byte offset of end of token in input buffer */ - int *piPosition /* OUT: Number of tokens returned before this one */ - ); -}; - -struct sqlite3_tokenizer { - const sqlite3_tokenizer_module *pModule; /* The module for this tokenizer */ - /* Tokenizer implementations will typically add additional fields */ -}; - -struct sqlite3_tokenizer_cursor { - sqlite3_tokenizer *pTokenizer; /* Tokenizer for this cursor. */ - /* Tokenizer implementations will typically add additional fields */ -}; - -#endif /* _FTS2_TOKENIZER_H_ */ diff --git a/ext/fts2/fts2_tokenizer1.c b/ext/fts2/fts2_tokenizer1.c deleted file mode 100644 index fe4f9eb4b5..0000000000 --- a/ext/fts2/fts2_tokenizer1.c +++ /dev/null @@ -1,233 +0,0 @@ -/* -** 2006 Oct 10 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -****************************************************************************** -** -** Implementation of the "simple" full-text-search tokenizer. -*/ - -/* -** The code in this file is only compiled if: -** -** * The FTS2 module is being built as an extension -** (in which case SQLITE_CORE is not defined), or -** -** * The FTS2 module is being built into the core of -** SQLite (in which case SQLITE_ENABLE_FTS2 is defined). -*/ -#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS2) - - -#include -#include -#include -#include - -#include "sqlite3.h" -#include "sqlite3ext.h" -SQLITE_EXTENSION_INIT3 -#include "fts2_tokenizer.h" - -typedef struct simple_tokenizer { - sqlite3_tokenizer base; - char delim[128]; /* flag ASCII delimiters */ -} simple_tokenizer; - -typedef struct simple_tokenizer_cursor { - sqlite3_tokenizer_cursor base; - const char *pInput; /* input we are tokenizing */ - int nBytes; /* size of the input */ - int iOffset; /* current position in pInput */ - int iToken; /* index of next token to be returned */ - char *pToken; /* storage for current token */ - int nTokenAllocated; /* space allocated to zToken buffer */ -} simple_tokenizer_cursor; - - -/* Forward declaration */ -static const sqlite3_tokenizer_module simpleTokenizerModule; - -static int simpleDelim(simple_tokenizer *t, unsigned char c){ - return c<0x80 && t->delim[c]; -} - -/* -** Create a new tokenizer instance. -*/ -static int simpleCreate( - int argc, const char * const *argv, - sqlite3_tokenizer **ppTokenizer -){ - simple_tokenizer *t; - - t = (simple_tokenizer *) sqlite3_malloc(sizeof(*t)); - if( t==NULL ) return SQLITE_NOMEM; - memset(t, 0, sizeof(*t)); - - /* TODO(shess) Delimiters need to remain the same from run to run, - ** else we need to reindex. One solution would be a meta-table to - ** track such information in the database, then we'd only want this - ** information on the initial create. - */ - if( argc>1 ){ - int i, n = strlen(argv[1]); - for(i=0; i=0x80 ){ - sqlite3_free(t); - return SQLITE_ERROR; - } - t->delim[ch] = 1; - } - } else { - /* Mark non-alphanumeric ASCII characters as delimiters */ - int i; - for(i=1; i<0x80; i++){ - t->delim[i] = !((i>='0' && i<='9') || (i>='A' && i<='Z') || - (i>='a' && i<='z')); - } - } - - *ppTokenizer = &t->base; - return SQLITE_OK; -} - -/* -** Destroy a tokenizer -*/ -static int simpleDestroy(sqlite3_tokenizer *pTokenizer){ - sqlite3_free(pTokenizer); - return SQLITE_OK; -} - -/* -** Prepare to begin tokenizing a particular string. The input -** string to be tokenized is pInput[0..nBytes-1]. A cursor -** used to incrementally tokenize this string is returned in -** *ppCursor. -*/ -static int simpleOpen( - sqlite3_tokenizer *pTokenizer, /* The tokenizer */ - const char *pInput, int nBytes, /* String to be tokenized */ - sqlite3_tokenizer_cursor **ppCursor /* OUT: Tokenization cursor */ -){ - simple_tokenizer_cursor *c; - - c = (simple_tokenizer_cursor *) sqlite3_malloc(sizeof(*c)); - if( c==NULL ) return SQLITE_NOMEM; - - c->pInput = pInput; - if( pInput==0 ){ - c->nBytes = 0; - }else if( nBytes<0 ){ - c->nBytes = (int)strlen(pInput); - }else{ - c->nBytes = nBytes; - } - c->iOffset = 0; /* start tokenizing at the beginning */ - c->iToken = 0; - c->pToken = NULL; /* no space allocated, yet. */ - c->nTokenAllocated = 0; - - *ppCursor = &c->base; - return SQLITE_OK; -} - -/* -** Close a tokenization cursor previously opened by a call to -** simpleOpen() above. -*/ -static int simpleClose(sqlite3_tokenizer_cursor *pCursor){ - simple_tokenizer_cursor *c = (simple_tokenizer_cursor *) pCursor; - sqlite3_free(c->pToken); - sqlite3_free(c); - return SQLITE_OK; -} - -/* -** Extract the next token from a tokenization cursor. The cursor must -** have been opened by a prior call to simpleOpen(). -*/ -static int simpleNext( - sqlite3_tokenizer_cursor *pCursor, /* Cursor returned by simpleOpen */ - const char **ppToken, /* OUT: *ppToken is the token text */ - int *pnBytes, /* OUT: Number of bytes in token */ - int *piStartOffset, /* OUT: Starting offset of token */ - int *piEndOffset, /* OUT: Ending offset of token */ - int *piPosition /* OUT: Position integer of token */ -){ - simple_tokenizer_cursor *c = (simple_tokenizer_cursor *) pCursor; - simple_tokenizer *t = (simple_tokenizer *) pCursor->pTokenizer; - unsigned char *p = (unsigned char *)c->pInput; - - while( c->iOffsetnBytes ){ - int iStartOffset; - - /* Scan past delimiter characters */ - while( c->iOffsetnBytes && simpleDelim(t, p[c->iOffset]) ){ - c->iOffset++; - } - - /* Count non-delimiter characters. */ - iStartOffset = c->iOffset; - while( c->iOffsetnBytes && !simpleDelim(t, p[c->iOffset]) ){ - c->iOffset++; - } - - if( c->iOffset>iStartOffset ){ - int i, n = c->iOffset-iStartOffset; - if( n>c->nTokenAllocated ){ - c->nTokenAllocated = n+20; - c->pToken = sqlite3_realloc(c->pToken, c->nTokenAllocated); - if( c->pToken==NULL ) return SQLITE_NOMEM; - } - for(i=0; ipToken[i] = (ch>='A' && ch<='Z') ? (ch - 'A' + 'a') : ch; - } - *ppToken = c->pToken; - *pnBytes = n; - *piStartOffset = iStartOffset; - *piEndOffset = c->iOffset; - *piPosition = c->iToken++; - - return SQLITE_OK; - } - } - return SQLITE_DONE; -} - -/* -** The set of routines that implement the simple tokenizer -*/ -static const sqlite3_tokenizer_module simpleTokenizerModule = { - 0, - simpleCreate, - simpleDestroy, - simpleOpen, - simpleClose, - simpleNext, -}; - -/* -** Allocate a new simple tokenizer. Return a pointer to the new -** tokenizer in *ppModule -*/ -void sqlite3Fts2SimpleTokenizerModule( - sqlite3_tokenizer_module const**ppModule -){ - *ppModule = &simpleTokenizerModule; -} - -#endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS2) */ diff --git a/ext/fts2/mkfts2amal.tcl b/ext/fts2/mkfts2amal.tcl deleted file mode 100644 index 5c8d1e93d7..0000000000 --- a/ext/fts2/mkfts2amal.tcl +++ /dev/null @@ -1,116 +0,0 @@ -#!/usr/bin/tclsh -# -# This script builds a single C code file holding all of FTS2 code. -# The name of the output file is fts2amal.c. To build this file, -# first do: -# -# make target_source -# -# The make target above moves all of the source code files into -# a subdirectory named "tsrc". (This script expects to find the files -# there and will not work if they are not found.) -# -# After the "tsrc" directory has been created and populated, run -# this script: -# -# tclsh mkfts2amal.tcl -# -# The amalgamated FTS2 code will be written into fts2amal.c -# - -# Open the output file and write a header comment at the beginning -# of the file. -# -set out [open fts2amal.c w] -set today [clock format [clock seconds] -format "%Y-%m-%d %H:%M:%S UTC" -gmt 1] -puts $out [subst \ -{/****************************************************************************** -** This file is an amalgamation of separate C source files from the SQLite -** Full Text Search extension 2 (fts2). By combining all the individual C -** code files into this single large file, the entire code can be compiled -** as a one translation unit. This allows many compilers to do optimizations -** that would not be possible if the files were compiled separately. It also -** makes the code easier to import into other projects. -** -** This amalgamation was generated on $today. -*/}] - -# These are the header files used by FTS2. The first time any of these -# files are seen in a #include statement in the C code, include the complete -# text of the file in-line. The file only needs to be included once. -# -foreach hdr { - fts2.h - fts2_hash.h - fts2_tokenizer.h - sqlite3.h - sqlite3ext.h -} { - set available_hdr($hdr) 1 -} - -# 78 stars used for comment formatting. -set s78 \ -{*****************************************************************************} - -# Insert a comment into the code -# -proc section_comment {text} { - global out s78 - set n [string length $text] - set nstar [expr {60 - $n}] - set stars [string range $s78 0 $nstar] - puts $out "/************** $text $stars/" -} - -# Read the source file named $filename and write it into the -# sqlite3.c output file. If any #include statements are seen, -# process them approprately. -# -proc copy_file {filename} { - global seen_hdr available_hdr out - set tail [file tail $filename] - section_comment "Begin file $tail" - set in [open $filename r] - while {![eof $in]} { - set line [gets $in] - if {[regexp {^#\s*include\s+["<]([^">]+)[">]} $line all hdr]} { - if {[info exists available_hdr($hdr)]} { - if {$available_hdr($hdr)} { - section_comment "Include $hdr in the middle of $tail" - copy_file tsrc/$hdr - section_comment "Continuing where we left off in $tail" - } - } elseif {![info exists seen_hdr($hdr)]} { - set seen_hdr($hdr) 1 - puts $out $line - } - } elseif {[regexp {^#ifdef __cplusplus} $line]} { - puts $out "#if 0" - } elseif {[regexp {^#line} $line]} { - # Skip #line directives. - } else { - puts $out $line - } - } - close $in - section_comment "End of $tail" -} - - -# Process the source files. Process files containing commonly -# used subroutines first in order to help the compiler find -# inlining opportunities. -# -foreach file { - fts2.c - fts2_hash.c - fts2_porter.c - fts2_tokenizer.c - fts2_tokenizer1.c - fts2_icu.c -} { - copy_file tsrc/$file -} - -close $out diff --git a/ext/fts3/README.syntax b/ext/fts3/README.syntax index 01bc80c5fb..d32ae384c5 100644 --- a/ext/fts3/README.syntax +++ b/ext/fts3/README.syntax @@ -62,20 +62,20 @@ matches rows that contain both the "engineering" and "consultancy" tokens in the same column with not more than 10 other words between them. It does not matter which of the two terms occurs first in the document, only that - they be seperated by only 10 tokens or less. The user may also specify + they be separated by only 10 tokens or less. The user may also specify a different required proximity by adding "/N" immediately after the NEAR operator, where N is an integer. For example: MATCH 'engineering NEAR/5 consultancy' - searches for a row containing an instance of each specified token seperated + searches for a row containing an instance of each specified token separated by not more than 5 other tokens. More than one NEAR operator can be used in as sequence. For example this query: MATCH 'reliable NEAR/2 engineering NEAR/5 consultancy' searches for a row that contains an instance of the token "reliable" - seperated by not more than two tokens from an instance of "engineering", + separated by not more than two tokens from an instance of "engineering", which is in turn separated by not more than 5 other tokens from an instance of the term "consultancy". Phrases enclosed in quotes may also be used as arguments to the NEAR operator. @@ -146,7 +146,7 @@ MATCH '(hello world) OR (simple example)' matches documents that contain both "hello" and "world", and documents - that contain both "simple" and "example". It is not possible to forumlate + that contain both "simple" and "example". It is not possible to formulate such a query using the standard syntax. 2) Instead of separating tokens and phrases by whitespace, an AND operator @@ -174,7 +174,7 @@ 4) Unlike in the standard syntax, where the OR operator has a higher precedence than the implicit AND operator, when using the enhanced - syntax implicit and explict AND operators have a higher precedence + syntax implicit and explicit AND operators have a higher precedence than OR operators. Using the enhanced syntax, the following two queries are equivalent: diff --git a/ext/fts3/fts3.c b/ext/fts3/fts3.c index 074123d658..f178abafed 100644 --- a/ext/fts3/fts3.c +++ b/ext/fts3/fts3.c @@ -87,7 +87,7 @@ ** Here, array { X } means zero or more occurrences of X, adjacent in ** memory. A "position" is an index of a token in the token stream ** generated by the tokenizer. Note that POS_END and POS_COLUMN occur -** in the same logical place as the position element, and act as sentinals +** in the same logical place as the position element, and act as sentinels ** ending a position list array. POS_END is 0. POS_COLUMN is 1. ** The positions numbers are not stored literally but rather as two more ** than the difference from the prior position, or the just the position plus @@ -295,12 +295,6 @@ # define SQLITE_CORE 1 #endif -#include -#include -#include -#include -#include -#include #include "fts3.h" #ifndef SQLITE_CORE @@ -308,6 +302,12 @@ SQLITE_EXTENSION_INIT1 #endif +typedef struct Fts3HashWrapper Fts3HashWrapper; +struct Fts3HashWrapper { + Fts3Hash hash; /* Hash table */ + int nRef; /* Number of pointers to this object */ +}; + static int fts3EvalNext(Fts3Cursor *pCsr); static int fts3EvalStart(Fts3Cursor *pCsr); static int fts3TermSegReaderCursor( @@ -634,6 +634,7 @@ static void fts3DeclareVtab(int *pRc, Fts3Table *p){ zLanguageid = (p->zLanguageid ? p->zLanguageid : "__langid"); sqlite3_vtab_config(p->db, SQLITE_VTAB_CONSTRAINT_SUPPORT, 1); + sqlite3_vtab_config(p->db, SQLITE_VTAB_INNOCUOUS); /* Create a list of user columns for the virtual table */ zCols = sqlite3_mprintf("%Q, ", p->azColumn[0]); @@ -1172,7 +1173,7 @@ static int fts3InitVtab( sqlite3_vtab **ppVTab, /* Write the resulting vtab structure here */ char **pzErr /* Write any error message here */ ){ - Fts3Hash *pHash = (Fts3Hash *)pAux; + Fts3Hash *pHash = &((Fts3HashWrapper*)pAux)->hash; Fts3Table *p = 0; /* Pointer to allocated vtab */ int rc = SQLITE_OK; /* Return code */ int i; /* Iterator variable */ @@ -2337,10 +2338,15 @@ static int fts3PoslistPhraseMerge( if( *p1==POS_COLUMN ){ p1++; p1 += fts3GetVarint32(p1, &iCol1); + /* iCol1==0 indicates corruption. Column 0 does not have a POS_COLUMN + ** entry, so this is actually end-of-doclist. */ + if( iCol1==0 ) return 0; } if( *p2==POS_COLUMN ){ p2++; p2 += fts3GetVarint32(p2, &iCol2); + /* As above, iCol2==0 indicates corruption. */ + if( iCol2==0 ) return 0; } while( 1 ){ @@ -2627,7 +2633,7 @@ static int fts3DoclistOrMerge( ** sizes of the two inputs, plus enough space for exactly one of the input ** docids to grow. ** - ** A symetric argument may be made if the doclists are in descending + ** A symmetric argument may be made if the doclists are in descending ** order. */ aOut = sqlite3_malloc64((i64)n1+n2+FTS3_VARINT_MAX-1+FTS3_BUFFER_PADDING); @@ -2882,7 +2888,7 @@ static int fts3TermSelectMerge( ** ** Similar padding is added in the fts3DoclistOrMerge() function. */ - pTS->aaOutput[0] = sqlite3_malloc(nDoclist + FTS3_VARINT_MAX + 1); + pTS->aaOutput[0] = sqlite3_malloc64((i64)nDoclist + FTS3_VARINT_MAX + 1); pTS->anOutput[0] = nDoclist; if( pTS->aaOutput[0] ){ memcpy(pTS->aaOutput[0], aDoclist, nDoclist); @@ -3883,6 +3889,8 @@ static int fts3RenameMethod( rc = sqlite3Fts3PendingTermsFlush(p); } + p->bIgnoreSavepoint = 1; + if( p->zContentTbl==0 ){ fts3DbExec(&rc, db, "ALTER TABLE %Q.'%q_content' RENAME TO '%q_content';", @@ -3910,6 +3918,8 @@ static int fts3RenameMethod( "ALTER TABLE %Q.'%q_segdir' RENAME TO '%q_segdir';", p->zDb, p->zName, zName ); + + p->bIgnoreSavepoint = 0; return rc; } @@ -3920,12 +3930,28 @@ static int fts3RenameMethod( */ static int fts3SavepointMethod(sqlite3_vtab *pVtab, int iSavepoint){ int rc = SQLITE_OK; - UNUSED_PARAMETER(iSavepoint); - assert( ((Fts3Table *)pVtab)->inTransaction ); - assert( ((Fts3Table *)pVtab)->mxSavepoint <= iSavepoint ); - TESTONLY( ((Fts3Table *)pVtab)->mxSavepoint = iSavepoint ); - if( ((Fts3Table *)pVtab)->bIgnoreSavepoint==0 ){ - rc = fts3SyncMethod(pVtab); + Fts3Table *pTab = (Fts3Table*)pVtab; + assert( pTab->inTransaction ); + assert( pTab->mxSavepoint<=iSavepoint ); + TESTONLY( pTab->mxSavepoint = iSavepoint ); + + if( pTab->bIgnoreSavepoint==0 ){ + if( fts3HashCount(&pTab->aIndex[0].hPending)>0 ){ + char *zSql = sqlite3_mprintf("INSERT INTO %Q.%Q(%Q) VALUES('flush')", + pTab->zDb, pTab->zName, pTab->zName + ); + if( zSql ){ + pTab->bIgnoreSavepoint = 1; + rc = sqlite3_exec(pTab->db, zSql, 0, 0, 0); + pTab->bIgnoreSavepoint = 0; + sqlite3_free(zSql); + }else{ + rc = SQLITE_NOMEM; + } + } + if( rc==SQLITE_OK ){ + pTab->iSavepoint = iSavepoint+1; + } } return rc; } @@ -3936,12 +3962,11 @@ static int fts3SavepointMethod(sqlite3_vtab *pVtab, int iSavepoint){ ** This is a no-op. */ static int fts3ReleaseMethod(sqlite3_vtab *pVtab, int iSavepoint){ - TESTONLY( Fts3Table *p = (Fts3Table*)pVtab ); - UNUSED_PARAMETER(iSavepoint); - UNUSED_PARAMETER(pVtab); - assert( p->inTransaction ); - assert( p->mxSavepoint >= iSavepoint ); - TESTONLY( p->mxSavepoint = iSavepoint-1 ); + Fts3Table *pTab = (Fts3Table*)pVtab; + assert( pTab->inTransaction ); + assert( pTab->mxSavepoint >= iSavepoint ); + TESTONLY( pTab->mxSavepoint = iSavepoint-1 ); + pTab->iSavepoint = iSavepoint; return SQLITE_OK; } @@ -3951,11 +3976,13 @@ static int fts3ReleaseMethod(sqlite3_vtab *pVtab, int iSavepoint){ ** Discard the contents of the pending terms table. */ static int fts3RollbackToMethod(sqlite3_vtab *pVtab, int iSavepoint){ - Fts3Table *p = (Fts3Table*)pVtab; + Fts3Table *pTab = (Fts3Table*)pVtab; UNUSED_PARAMETER(iSavepoint); - assert( p->inTransaction ); - TESTONLY( p->mxSavepoint = iSavepoint ); - sqlite3Fts3PendingTermsClear(p); + assert( pTab->inTransaction ); + TESTONLY( pTab->mxSavepoint = iSavepoint ); + if( (iSavepoint+1)<=pTab->iSavepoint ){ + sqlite3Fts3PendingTermsClear(pTab); + } return SQLITE_OK; } @@ -3974,8 +4001,42 @@ static int fts3ShadowName(const char *zName){ return 0; } +/* +** Implementation of the xIntegrity() method on the FTS3/FTS4 virtual +** table. +*/ +static int fts3IntegrityMethod( + sqlite3_vtab *pVtab, /* The virtual table to be checked */ + const char *zSchema, /* Name of schema in which pVtab lives */ + const char *zTabname, /* Name of the pVTab table */ + int isQuick, /* True if this is a quick_check */ + char **pzErr /* Write error message here */ +){ + Fts3Table *p = (Fts3Table*)pVtab; + int rc = SQLITE_OK; + int bOk = 0; + + UNUSED_PARAMETER(isQuick); + rc = sqlite3Fts3IntegrityCheck(p, &bOk); + assert( rc!=SQLITE_CORRUPT_VTAB ); + if( rc==SQLITE_ERROR || (rc&0xFF)==SQLITE_CORRUPT ){ + *pzErr = sqlite3_mprintf("unable to validate the inverted index for" + " FTS%d table %s.%s: %s", + p->bFts4 ? 4 : 3, zSchema, zTabname, sqlite3_errstr(rc)); + if( *pzErr ) rc = SQLITE_OK; + }else if( rc==SQLITE_OK && bOk==0 ){ + *pzErr = sqlite3_mprintf("malformed inverted index for FTS%d table %s.%s", + p->bFts4 ? 4 : 3, zSchema, zTabname); + if( *pzErr==0 ) rc = SQLITE_NOMEM; + } + sqlite3Fts3SegmentsClose(p); + return rc; +} + + + static const sqlite3_module fts3Module = { - /* iVersion */ 3, + /* iVersion */ 4, /* xCreate */ fts3CreateMethod, /* xConnect */ fts3ConnectMethod, /* xBestIndex */ fts3BestIndexMethod, @@ -3999,6 +4060,7 @@ static const sqlite3_module fts3Module = { /* xRelease */ fts3ReleaseMethod, /* xRollbackTo */ fts3RollbackToMethod, /* xShadowName */ fts3ShadowName, + /* xIntegrity */ fts3IntegrityMethod, }; /* @@ -4007,9 +4069,12 @@ static const sqlite3_module fts3Module = { ** allocated for the tokenizer hash table. */ static void hashDestroy(void *p){ - Fts3Hash *pHash = (Fts3Hash *)p; - sqlite3Fts3HashClear(pHash); - sqlite3_free(pHash); + Fts3HashWrapper *pHash = (Fts3HashWrapper *)p; + pHash->nRef--; + if( pHash->nRef<=0 ){ + sqlite3Fts3HashClear(&pHash->hash); + sqlite3_free(pHash); + } } /* @@ -4039,7 +4104,7 @@ void sqlite3Fts3IcuTokenizerModule(sqlite3_tokenizer_module const**ppModule); */ int sqlite3Fts3Init(sqlite3 *db){ int rc = SQLITE_OK; - Fts3Hash *pHash = 0; + Fts3HashWrapper *pHash = 0; const sqlite3_tokenizer_module *pSimple = 0; const sqlite3_tokenizer_module *pPorter = 0; #ifndef SQLITE_DISABLE_FTS3_UNICODE @@ -4067,23 +4132,24 @@ int sqlite3Fts3Init(sqlite3 *db){ sqlite3Fts3PorterTokenizerModule(&pPorter); /* Allocate and initialize the hash-table used to store tokenizers. */ - pHash = sqlite3_malloc(sizeof(Fts3Hash)); + pHash = sqlite3_malloc(sizeof(Fts3HashWrapper)); if( !pHash ){ rc = SQLITE_NOMEM; }else{ - sqlite3Fts3HashInit(pHash, FTS3_HASH_STRING, 1); + sqlite3Fts3HashInit(&pHash->hash, FTS3_HASH_STRING, 1); + pHash->nRef = 0; } /* Load the built-in tokenizers into the hash table */ if( rc==SQLITE_OK ){ - if( sqlite3Fts3HashInsert(pHash, "simple", 7, (void *)pSimple) - || sqlite3Fts3HashInsert(pHash, "porter", 7, (void *)pPorter) + if( sqlite3Fts3HashInsert(&pHash->hash, "simple", 7, (void *)pSimple) + || sqlite3Fts3HashInsert(&pHash->hash, "porter", 7, (void *)pPorter) #ifndef SQLITE_DISABLE_FTS3_UNICODE - || sqlite3Fts3HashInsert(pHash, "unicode61", 10, (void *)pUnicode) + || sqlite3Fts3HashInsert(&pHash->hash, "unicode61", 10, (void *)pUnicode) #endif #ifdef SQLITE_ENABLE_ICU - || (pIcu && sqlite3Fts3HashInsert(pHash, "icu", 4, (void *)pIcu)) + || (pIcu && sqlite3Fts3HashInsert(&pHash->hash, "icu", 4, (void *)pIcu)) #endif ){ rc = SQLITE_NOMEM; @@ -4092,7 +4158,7 @@ int sqlite3Fts3Init(sqlite3 *db){ #ifdef SQLITE_TEST if( rc==SQLITE_OK ){ - rc = sqlite3Fts3ExprInitTestInterface(db, pHash); + rc = sqlite3Fts3ExprInitTestInterface(db, &pHash->hash); } #endif @@ -4101,23 +4167,26 @@ int sqlite3Fts3Init(sqlite3 *db){ ** module with sqlite. */ if( SQLITE_OK==rc - && SQLITE_OK==(rc = sqlite3Fts3InitHashTable(db, pHash, "fts3_tokenizer")) + && SQLITE_OK==(rc=sqlite3Fts3InitHashTable(db,&pHash->hash,"fts3_tokenizer")) && SQLITE_OK==(rc = sqlite3_overload_function(db, "snippet", -1)) && SQLITE_OK==(rc = sqlite3_overload_function(db, "offsets", 1)) && SQLITE_OK==(rc = sqlite3_overload_function(db, "matchinfo", 1)) && SQLITE_OK==(rc = sqlite3_overload_function(db, "matchinfo", 2)) && SQLITE_OK==(rc = sqlite3_overload_function(db, "optimize", 1)) ){ + pHash->nRef++; rc = sqlite3_create_module_v2( db, "fts3", &fts3Module, (void *)pHash, hashDestroy ); if( rc==SQLITE_OK ){ + pHash->nRef++; rc = sqlite3_create_module_v2( - db, "fts4", &fts3Module, (void *)pHash, 0 + db, "fts4", &fts3Module, (void *)pHash, hashDestroy ); } if( rc==SQLITE_OK ){ - rc = sqlite3Fts3InitTok(db, (void *)pHash); + pHash->nRef++; + rc = sqlite3Fts3InitTok(db, (void *)pHash, hashDestroy); } return rc; } @@ -4126,7 +4195,7 @@ int sqlite3Fts3Init(sqlite3 *db){ /* An error has occurred. Delete the hash table and return the error code. */ assert( rc!=SQLITE_OK ); if( pHash ){ - sqlite3Fts3HashClear(pHash); + sqlite3Fts3HashClear(&pHash->hash); sqlite3_free(pHash); } return rc; @@ -4295,8 +4364,7 @@ static int fts3EvalDeferredPhrase(Fts3Cursor *pCsr, Fts3Phrase *pPhrase){ char *aPoslist = 0; /* Position list for deferred tokens */ int nPoslist = 0; /* Number of bytes in aPoslist */ int iPrev = -1; /* Token number of previous deferred token */ - - assert( pPhrase->doclist.bFreeList==0 ); + char *aFree = (pPhrase->doclist.bFreeList ? pPhrase->doclist.pList : 0); for(iToken=0; iTokennToken; iToken++){ Fts3PhraseToken *pToken = &pPhrase->aToken[iToken]; @@ -4310,6 +4378,7 @@ static int fts3EvalDeferredPhrase(Fts3Cursor *pCsr, Fts3Phrase *pPhrase){ if( pList==0 ){ sqlite3_free(aPoslist); + sqlite3_free(aFree); pPhrase->doclist.pList = 0; pPhrase->doclist.nList = 0; return SQLITE_OK; @@ -4330,6 +4399,7 @@ static int fts3EvalDeferredPhrase(Fts3Cursor *pCsr, Fts3Phrase *pPhrase){ nPoslist = (int)(aOut - aPoslist); if( nPoslist==0 ){ sqlite3_free(aPoslist); + sqlite3_free(aFree); pPhrase->doclist.pList = 0; pPhrase->doclist.nList = 0; return SQLITE_OK; @@ -4362,13 +4432,14 @@ static int fts3EvalDeferredPhrase(Fts3Cursor *pCsr, Fts3Phrase *pPhrase){ nDistance = iPrev - nMaxUndeferred; } - aOut = (char *)sqlite3_malloc(nPoslist+8); + aOut = (char *)sqlite3Fts3MallocZero(((i64)nPoslist)+FTS3_BUFFER_PADDING); if( !aOut ){ sqlite3_free(aPoslist); return SQLITE_NOMEM; } pPhrase->doclist.pList = aOut; + assert( p1 && p2 ); if( fts3PoslistPhraseMerge(&aOut, nDistance, 0, 1, &p1, &p2) ){ pPhrase->doclist.bFreeList = 1; pPhrase->doclist.nList = (int)(aOut - pPhrase->doclist.pList); @@ -4381,6 +4452,7 @@ static int fts3EvalDeferredPhrase(Fts3Cursor *pCsr, Fts3Phrase *pPhrase){ } } + if( pPhrase->doclist.pList!=aFree ) sqlite3_free(aFree); return SQLITE_OK; } #endif /* SQLITE_DISABLE_FTS4_DEFERRED */ @@ -4659,7 +4731,7 @@ static int incrPhraseTokenNext( ** ** * does not contain any deferred tokens. ** -** Advance it to the next matching documnent in the database and populate +** Advance it to the next matching document in the database and populate ** the Fts3Doclist.pList and nList fields. ** ** If there is no "next" entry and no error occurs, then *pbEof is set to @@ -4729,7 +4801,7 @@ static int fts3EvalIncrPhraseNext( if( bEof==0 ){ int nList = 0; int nByte = a[p->nToken-1].nList; - char *aDoclist = sqlite3_malloc(nByte+FTS3_BUFFER_PADDING); + char *aDoclist = sqlite3_malloc64((i64)nByte+FTS3_BUFFER_PADDING); if( !aDoclist ) return SQLITE_NOMEM; memcpy(aDoclist, a[p->nToken-1].pList, nByte+1); memset(&aDoclist[nByte], 0, FTS3_BUFFER_PADDING); @@ -5271,9 +5343,8 @@ static void fts3EvalNextRow( Fts3Expr *pExpr, /* Expr. to advance to next matching row */ int *pRc /* IN/OUT: Error code */ ){ - if( *pRc==SQLITE_OK ){ + if( *pRc==SQLITE_OK && pExpr->bEof==0 ){ int bDescDoclist = pCsr->bDesc; /* Used by DOCID_CMP() macro */ - assert( pExpr->bEof==0 ); pExpr->bStart = 1; switch( pExpr->eType ){ @@ -5446,7 +5517,7 @@ static int fts3EvalNearTest(Fts3Expr *pExpr, int *pRc){ nTmp += p->pRight->pPhrase->doclist.nList; } nTmp += p->pPhrase->doclist.nList; - aTmp = sqlite3_malloc64(nTmp*2); + aTmp = sqlite3_malloc64(nTmp*2 + FTS3_VARINT_MAX); if( !aTmp ){ *pRc = SQLITE_NOMEM; res = 0; @@ -5555,11 +5626,10 @@ static int fts3EvalTestExpr( default: { #ifndef SQLITE_DISABLE_FTS4_DEFERRED - if( pCsr->pDeferred - && (pExpr->iDocid==pCsr->iPrevId || pExpr->bDeferred) - ){ + if( pCsr->pDeferred && (pExpr->bDeferred || ( + pExpr->iDocid==pCsr->iPrevId && pExpr->pPhrase->doclist.pList + ))){ Fts3Phrase *pPhrase = pExpr->pPhrase; - assert( pExpr->bDeferred || pPhrase->doclist.bFreeList==0 ); if( pExpr->bDeferred ){ fts3EvalInvalidatePoslist(pPhrase); } @@ -5668,7 +5738,7 @@ static int fts3EvalNext(Fts3Cursor *pCsr){ } /* -** Restart interation for expression pExpr so that the next call to +** Restart iteration for expression pExpr so that the next call to ** fts3EvalNext() visits the first row. Do not allow incremental ** loading or merging of phrase doclists for this iteration. ** @@ -5711,6 +5781,24 @@ static void fts3EvalRestart( } } +/* +** Expression node pExpr is an MSR phrase. This function restarts pExpr +** so that it is a regular phrase query, not an MSR. SQLITE_OK is returned +** if successful, or an SQLite error code otherwise. +*/ +int sqlite3Fts3MsrCancel(Fts3Cursor *pCsr, Fts3Expr *pExpr){ + int rc = SQLITE_OK; + if( pExpr->bEof==0 ){ + i64 iDocid = pExpr->iDocid; + fts3EvalRestart(pCsr, pExpr, &rc); + while( rc==SQLITE_OK && pExpr->iDocid!=iDocid ){ + fts3EvalNextRow(pCsr, pExpr, &rc); + if( pExpr->bEof ) rc = FTS_CORRUPT_VTAB; + } + } + return rc; +} + /* ** After allocating the Fts3Expr.aMI[] array for each phrase in the ** expression rooted at pExpr, the cursor iterates through all rows matched @@ -5750,6 +5838,22 @@ static void fts3EvalUpdateCounts(Fts3Expr *pExpr, int nCol){ } } +/* +** This is an sqlite3Fts3ExprIterate() callback. If the Fts3Expr.aMI[] array +** has not yet been allocated, allocate and zero it. Otherwise, just zero +** it. +*/ +static int fts3AllocateMSI(Fts3Expr *pExpr, int iPhrase, void *pCtx){ + Fts3Table *pTab = (Fts3Table*)pCtx; + UNUSED_PARAMETER(iPhrase); + if( pExpr->aMI==0 ){ + pExpr->aMI = (u32 *)sqlite3_malloc64(pTab->nColumn * 3 * sizeof(u32)); + if( pExpr->aMI==0 ) return SQLITE_NOMEM; + } + memset(pExpr->aMI, 0, pTab->nColumn * 3 * sizeof(u32)); + return SQLITE_OK; +} + /* ** Expression pExpr must be of type FTSQUERY_PHRASE. ** @@ -5771,7 +5875,6 @@ static int fts3EvalGatherStats( if( pExpr->aMI==0 ){ Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab; Fts3Expr *pRoot; /* Root of NEAR expression */ - Fts3Expr *p; /* Iterator used for several purposes */ sqlite3_int64 iPrevId = pCsr->iPrevId; sqlite3_int64 iDocid; @@ -5779,7 +5882,9 @@ static int fts3EvalGatherStats( /* Find the root of the NEAR expression */ pRoot = pExpr; - while( pRoot->pParent && pRoot->pParent->eType==FTSQUERY_NEAR ){ + while( pRoot->pParent + && (pRoot->pParent->eType==FTSQUERY_NEAR || pRoot->bDeferred) + ){ pRoot = pRoot->pParent; } iDocid = pRoot->iDocid; @@ -5787,14 +5892,8 @@ static int fts3EvalGatherStats( assert( pRoot->bStart ); /* Allocate space for the aMSI[] array of each FTSQUERY_PHRASE node */ - for(p=pRoot; p; p=p->pLeft){ - Fts3Expr *pE = (p->eType==FTSQUERY_PHRASE?p:p->pRight); - assert( pE->aMI==0 ); - pE->aMI = (u32 *)sqlite3_malloc64(pTab->nColumn * 3 * sizeof(u32)); - if( !pE->aMI ) return SQLITE_NOMEM; - memset(pE->aMI, 0, pTab->nColumn * 3 * sizeof(u32)); - } - + rc = sqlite3Fts3ExprIterate(pRoot, fts3AllocateMSI, (void*)pTab); + if( rc!=SQLITE_OK ) return rc; fts3EvalRestart(pCsr, pRoot, &rc); while( pCsr->isEof==0 && rc==SQLITE_OK ){ @@ -5950,6 +6049,7 @@ int sqlite3Fts3EvalPhrasePoslist( u8 bTreeEof = 0; Fts3Expr *p; /* Used to iterate from pExpr to root */ Fts3Expr *pNear; /* Most senior NEAR ancestor (or pExpr) */ + Fts3Expr *pRun; /* Closest non-deferred ancestor of pNear */ int bMatch; /* Check if this phrase descends from an OR expression node. If not, @@ -5964,25 +6064,30 @@ int sqlite3Fts3EvalPhrasePoslist( if( p->bEof ) bTreeEof = 1; } if( bOr==0 ) return SQLITE_OK; + pRun = pNear; + while( pRun->bDeferred ){ + assert( pRun->pParent ); + pRun = pRun->pParent; + } /* This is the descendent of an OR node. In this case we cannot use ** an incremental phrase. Load the entire doclist for the phrase ** into memory in this case. */ if( pPhrase->bIncr ){ - int bEofSave = pNear->bEof; - fts3EvalRestart(pCsr, pNear, &rc); - while( rc==SQLITE_OK && !pNear->bEof ){ - fts3EvalNextRow(pCsr, pNear, &rc); - if( bEofSave==0 && pNear->iDocid==iDocid ) break; + int bEofSave = pRun->bEof; + fts3EvalRestart(pCsr, pRun, &rc); + while( rc==SQLITE_OK && !pRun->bEof ){ + fts3EvalNextRow(pCsr, pRun, &rc); + if( bEofSave==0 && pRun->iDocid==iDocid ) break; } assert( rc!=SQLITE_OK || pPhrase->bIncr==0 ); - if( rc==SQLITE_OK && pNear->bEof!=bEofSave ){ + if( rc==SQLITE_OK && pRun->bEof!=bEofSave ){ rc = FTS_CORRUPT_VTAB; } } if( bTreeEof ){ - while( rc==SQLITE_OK && !pNear->bEof ){ - fts3EvalNextRow(pCsr, pNear, &rc); + while( rc==SQLITE_OK && !pRun->bEof ){ + fts3EvalNextRow(pCsr, pRun, &rc); } } if( rc!=SQLITE_OK ) return rc; @@ -6081,7 +6186,7 @@ int sqlite3Fts3Corrupt(){ } #endif -#if !SQLITE_CORE +#if !defined(SQLITE_CORE) /* ** Initialize API pointer table, if required. */ diff --git a/ext/fts3/fts3Int.h b/ext/fts3/fts3Int.h index 3a62ccc7a7..e98b90a753 100644 --- a/ext/fts3/fts3Int.h +++ b/ext/fts3/fts3Int.h @@ -14,10 +14,20 @@ #ifndef _FTSINT_H #define _FTSINT_H -#if !defined(NDEBUG) && !defined(SQLITE_DEBUG) +/* +** Activate assert() only if SQLITE_TEST is enabled. +*/ +#if !defined(NDEBUG) && !defined(SQLITE_DEBUG) # define NDEBUG 1 #endif +#include +#include +#include +#include +#include +#include + /* FTS3/FTS4 require virtual tables */ #ifdef SQLITE_OMIT_VIRTUALTABLE # undef SQLITE_ENABLE_FTS3 @@ -37,7 +47,7 @@ /* If not building as part of the core, include sqlite3ext.h. */ #ifndef SQLITE_CORE -# include "sqlite3ext.h" +# include "sqlite3ext.h" SQLITE_EXTENSION_INIT3 #endif @@ -179,13 +189,6 @@ typedef sqlite3_int64 i64; /* 8-byte signed integer */ */ #define UNUSED_PARAMETER(x) (void)(x) -/* -** Activate assert() only if SQLITE_TEST is enabled. -*/ -#if !defined(NDEBUG) && !defined(SQLITE_DEBUG) -# define NDEBUG 1 -#endif - /* ** The TESTONLY macro is used to enclose variable declarations or ** other bits of code that are needed to support the arguments @@ -202,6 +205,19 @@ typedef sqlite3_int64 i64; /* 8-byte signed integer */ #define deliberate_fall_through +/* +** Macros needed to provide flexible arrays in a portable way +*/ +#ifndef offsetof +# define offsetof(ST,M) ((size_t)((char*)&((ST*)0)->M - (char*)0)) +#endif +#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) +# define FLEXARRAY +#else +# define FLEXARRAY 1 +#endif + + #endif /* SQLITE_AMALGAMATION */ #ifdef SQLITE_DEBUG @@ -265,6 +281,7 @@ struct Fts3Table { int nPgsz; /* Page size for host database */ char *zSegmentsTbl; /* Name of %_segments table */ sqlite3_blob *pSegments; /* Blob handle open on %_segments table */ + int iSavepoint; /* ** The following array of hash tables is used to buffer pending index @@ -305,7 +322,7 @@ struct Fts3Table { #endif #if defined(SQLITE_DEBUG) || defined(SQLITE_TEST) - /* True to disable the incremental doclist optimization. This is controled + /* True to disable the incremental doclist optimization. This is controlled ** by special insert command 'test-no-incr-doclist'. */ int bNoIncrDoclist; @@ -357,7 +374,7 @@ struct Fts3Cursor { /* ** The Fts3Cursor.eSearch member is always set to one of the following. -** Actualy, Fts3Cursor.eSearch can be greater than or equal to +** Actually, Fts3Cursor.eSearch can be greater than or equal to ** FTS3_FULLTEXT_SEARCH. If so, then Fts3Cursor.eSearch - 2 is the index ** of the column to be searched. For example, in ** @@ -430,9 +447,13 @@ struct Fts3Phrase { */ int nToken; /* Number of tokens in the phrase */ int iColumn; /* Index of column this phrase must match */ - Fts3PhraseToken aToken[1]; /* One entry for each token in the phrase */ + Fts3PhraseToken aToken[FLEXARRAY]; /* One for each token in the phrase */ }; +/* Size (in bytes) of an Fts3Phrase object large enough to hold N tokens */ +#define SZ_FTS3PHRASE(N) \ + (offsetof(Fts3Phrase,aToken)+(N)*sizeof(Fts3PhraseToken)) + /* ** A tree of these objects forms the RHS of a MATCH operator. ** @@ -558,7 +579,7 @@ struct Fts3MultiSegReader { int nAdvance; /* How many seg-readers to advance */ Fts3SegFilter *pFilter; /* Pointer to filter object */ char *aBuffer; /* Buffer to merge doclists in */ - int nBuffer; /* Allocated size of aBuffer[] in bytes */ + i64 nBuffer; /* Allocated size of aBuffer[] in bytes */ int iColFilter; /* If >=0, filter for this column */ int bRestart; @@ -639,9 +660,10 @@ int sqlite3Fts3MsrIncrNext( int sqlite3Fts3EvalPhrasePoslist(Fts3Cursor *, Fts3Expr *, int iCol, char **); int sqlite3Fts3MsrOvfl(Fts3Cursor *, Fts3MultiSegReader *, int *); int sqlite3Fts3MsrIncrRestart(Fts3MultiSegReader *pCsr); +int sqlite3Fts3MsrCancel(Fts3Cursor*, Fts3Expr*); /* fts3_tokenize_vtab.c */ -int sqlite3Fts3InitTok(sqlite3*, Fts3Hash *); +int sqlite3Fts3InitTok(sqlite3*, Fts3Hash *, void(*xDestroy)(void*)); /* fts3_unicode2.c (functions generated by parsing unicode text files) */ #ifndef SQLITE_DISABLE_FTS3_UNICODE @@ -650,5 +672,9 @@ int sqlite3FtsUnicodeIsalnum(int); int sqlite3FtsUnicodeIsdiacritic(int); #endif +int sqlite3Fts3ExprIterate(Fts3Expr*, int (*x)(Fts3Expr*,int,void*), void*); + +int sqlite3Fts3IntegrityCheck(Fts3Table *p, int *pbOk); + #endif /* !SQLITE_CORE || SQLITE_ENABLE_FTS3 */ #endif /* _FTSINT_H */ diff --git a/ext/fts3/fts3_aux.c b/ext/fts3/fts3_aux.c index d3b194c942..439d579366 100644 --- a/ext/fts3/fts3_aux.c +++ b/ext/fts3/fts3_aux.c @@ -545,7 +545,8 @@ int sqlite3Fts3InitAux(sqlite3 *db){ 0, /* xSavepoint */ 0, /* xRelease */ 0, /* xRollbackTo */ - 0 /* xShadowName */ + 0, /* xShadowName */ + 0 /* xIntegrity */ }; int rc; /* Return code */ diff --git a/ext/fts3/fts3_expr.c b/ext/fts3/fts3_expr.c index ea8167c595..681d4e8625 100644 --- a/ext/fts3/fts3_expr.c +++ b/ext/fts3/fts3_expr.c @@ -161,6 +161,23 @@ int sqlite3Fts3OpenTokenizer( */ static int fts3ExprParse(ParseContext *, const char *, int, Fts3Expr **, int *); +/* +** Search buffer z[], size n, for a '"' character. Or, if enable_parenthesis +** is defined, search for '(' and ')' as well. Return the index of the first +** such character in the buffer. If there is no such character, return -1. +*/ +static int findBarredChar(const char *z, int n){ + int ii; + for(ii=0; iiiLangid, z, i, &pCursor); + *pnConsumed = n; + rc = sqlite3Fts3OpenTokenizer(pTokenizer, pParse->iLangid, z, n, &pCursor); if( rc==SQLITE_OK ){ const char *zToken; int nToken = 0, iStart = 0, iEnd = 0, iPosition = 0; @@ -202,7 +212,18 @@ static int getNextToken( rc = pModule->xNext(pCursor, &zToken, &nToken, &iStart, &iEnd, &iPosition); if( rc==SQLITE_OK ){ - nByte = sizeof(Fts3Expr) + sizeof(Fts3Phrase) + nToken; + /* Check that this tokenization did not gobble up any " characters. Or, + ** if enable_parenthesis is true, that it did not gobble up any + ** open or close parenthesis characters either. If it did, call + ** getNextToken() again, but pass only that part of the input buffer + ** up to the first such character. */ + int iBarred = findBarredChar(z, iEnd); + if( iBarred>=0 ){ + pModule->xClose(pCursor); + return getNextToken(pParse, iCol, z, iBarred, ppExpr, pnConsumed); + } + + nByte = sizeof(Fts3Expr) + SZ_FTS3PHRASE(1) + nToken; pRet = (Fts3Expr *)sqlite3Fts3MallocZero(nByte); if( !pRet ){ rc = SQLITE_NOMEM; @@ -212,7 +233,7 @@ static int getNextToken( pRet->pPhrase->nToken = 1; pRet->pPhrase->iColumn = iCol; pRet->pPhrase->aToken[0].n = nToken; - pRet->pPhrase->aToken[0].z = (char *)&pRet->pPhrase[1]; + pRet->pPhrase->aToken[0].z = (char*)&pRet->pPhrase->aToken[1]; memcpy(pRet->pPhrase->aToken[0].z, zToken, nToken); if( iEnd=0 ){ + *pnConsumed = iBarred; + } rc = SQLITE_OK; } @@ -283,9 +308,9 @@ static int getNextString( Fts3Expr *p = 0; sqlite3_tokenizer_cursor *pCursor = 0; char *zTemp = 0; - int nTemp = 0; + i64 nTemp = 0; - const int nSpace = sizeof(Fts3Expr) + sizeof(Fts3Phrase); + const int nSpace = sizeof(Fts3Expr) + SZ_FTS3PHRASE(1); int nToken = 0; /* The final Fts3Expr data structure, including the Fts3Phrase, @@ -319,10 +344,11 @@ static int getNextString( Fts3PhraseToken *pToken; p = fts3ReallocOrFree(p, nSpace + ii*sizeof(Fts3PhraseToken)); - if( !p ) goto no_mem; - zTemp = fts3ReallocOrFree(zTemp, nTemp + nByte); - if( !zTemp ) goto no_mem; + if( !zTemp || !p ){ + rc = SQLITE_NOMEM; + goto getnextstring_out; + } assert( nToken==ii ); pToken = &((Fts3Phrase *)(&p[1]))->aToken[ii]; @@ -337,9 +363,6 @@ static int getNextString( nToken = ii+1; } } - - pModule->xClose(pCursor); - pCursor = 0; } if( rc==SQLITE_DONE ){ @@ -347,7 +370,10 @@ static int getNextString( char *zBuf = 0; p = fts3ReallocOrFree(p, nSpace + nToken*sizeof(Fts3PhraseToken) + nTemp); - if( !p ) goto no_mem; + if( !p ){ + rc = SQLITE_NOMEM; + goto getnextstring_out; + } memset(p, 0, (char *)&(((Fts3Phrase *)&p[1])->aToken[0])-(char *)p); p->eType = FTSQUERY_PHRASE; p->pPhrase = (Fts3Phrase *)&p[1]; @@ -355,11 +381,9 @@ static int getNextString( p->pPhrase->nToken = nToken; zBuf = (char *)&p->pPhrase->aToken[nToken]; + assert( nTemp==0 || zTemp ); if( zTemp ){ memcpy(zBuf, zTemp, nTemp); - sqlite3_free(zTemp); - }else{ - assert( nTemp==0 ); } for(jj=0; jjpPhrase->nToken; jj++){ @@ -369,17 +393,17 @@ static int getNextString( rc = SQLITE_OK; } - *ppExpr = p; - return rc; -no_mem: - + getnextstring_out: if( pCursor ){ pModule->xClose(pCursor); } sqlite3_free(zTemp); - sqlite3_free(p); - *ppExpr = 0; - return SQLITE_NOMEM; + if( rc!=SQLITE_OK ){ + sqlite3_free(p); + p = 0; + } + *ppExpr = p; + return rc; } /* @@ -658,7 +682,7 @@ static int fts3ExprParse( /* The isRequirePhrase variable is set to true if a phrase or ** an expression contained in parenthesis is required. If a - ** binary operator (AND, OR, NOT or NEAR) is encounted when + ** binary operator (AND, OR, NOT or NEAR) is encountered when ** isRequirePhrase is set, this is a syntax error. */ if( !isPhrase && isRequirePhrase ){ @@ -1240,7 +1264,6 @@ static void fts3ExprTestCommon( } if( rc!=SQLITE_OK && rc!=SQLITE_NOMEM ){ - sqlite3Fts3ExprFree(pExpr); sqlite3_result_error(context, "Error parsing expression", -1); }else if( rc==SQLITE_NOMEM || !(zBuf = exprToString(pExpr, 0)) ){ sqlite3_result_error_nomem(context); diff --git a/ext/fts3/fts3_hash.c b/ext/fts3/fts3_hash.c index 63e55b3dc9..1918be4cb7 100644 --- a/ext/fts3/fts3_hash.c +++ b/ext/fts3/fts3_hash.c @@ -187,7 +187,7 @@ static void fts3HashInsertElement( } -/* Resize the hash table so that it cantains "new_size" buckets. +/* Resize the hash table so that it contains "new_size" buckets. ** "new_size" must be a power of 2. The hash table might fail ** to resize if sqliteMalloc() fails. ** diff --git a/ext/fts3/fts3_porter.c b/ext/fts3/fts3_porter.c index 8fb4c25daa..35e81b74af 100644 --- a/ext/fts3/fts3_porter.c +++ b/ext/fts3/fts3_porter.c @@ -256,7 +256,7 @@ static int star_oh(const char *z){ /* ** If the word ends with zFrom and xCond() is true for the stem -** of the word that preceeds the zFrom ending, then change the +** of the word that precedes the zFrom ending, then change the ** ending to zTo. ** ** The input word *pz and zFrom are both in reverse order. zTo @@ -621,7 +621,7 @@ static int porterNext( if( n>c->nAllocated ){ char *pNew; c->nAllocated = n+20; - pNew = sqlite3_realloc(c->zToken, c->nAllocated); + pNew = sqlite3_realloc64(c->zToken, c->nAllocated); if( !pNew ) return SQLITE_NOMEM; c->zToken = pNew; } diff --git a/ext/fts3/fts3_snippet.c b/ext/fts3/fts3_snippet.c index c5192c4eb0..62e27d30bf 100644 --- a/ext/fts3/fts3_snippet.c +++ b/ext/fts3/fts3_snippet.c @@ -17,10 +17,6 @@ #include #include -#ifndef SQLITE_AMALGAMATION -typedef sqlite3_int64 i64; -#endif - /* ** Characters that may appear in the second argument to matchinfo(). */ @@ -41,7 +37,7 @@ typedef sqlite3_int64 i64; /* -** Used as an fts3ExprIterate() context when loading phrase doclists to +** Used as an sqlite3Fts3ExprIterate() context when loading phrase doclists to ** Fts3Expr.aDoclist[]/nDoclist. */ typedef struct LoadDoclistCtx LoadDoclistCtx; @@ -85,7 +81,7 @@ struct SnippetFragment { }; /* -** This type is used as an fts3ExprIterate() context object while +** This type is used as an sqlite3Fts3ExprIterate() context object while ** accumulating the data returned by the matchinfo() function. */ typedef struct MatchInfo MatchInfo; @@ -108,9 +104,13 @@ struct MatchinfoBuffer { int nElem; int bGlobal; /* Set if global data is loaded */ char *zMatchinfo; - u32 aMatchinfo[1]; + u32 aMI[FLEXARRAY]; }; +/* Size (in bytes) of a MatchinfoBuffer sufficient for N elements */ +#define SZ_MATCHINFOBUFFER(N) \ + (offsetof(MatchinfoBuffer,aMI)+(((N)+1)/2)*sizeof(u64)) + /* ** The snippet() and offsets() functions both return text values. An instance @@ -135,13 +135,13 @@ struct StrBuffer { static MatchinfoBuffer *fts3MIBufferNew(size_t nElem, const char *zMatchinfo){ MatchinfoBuffer *pRet; sqlite3_int64 nByte = sizeof(u32) * (2*(sqlite3_int64)nElem + 1) - + sizeof(MatchinfoBuffer); + + SZ_MATCHINFOBUFFER(1); sqlite3_int64 nStr = strlen(zMatchinfo); pRet = sqlite3Fts3MallocZero(nByte + nStr+1); if( pRet ){ - pRet->aMatchinfo[0] = (u8*)(&pRet->aMatchinfo[1]) - (u8*)pRet; - pRet->aMatchinfo[1+nElem] = pRet->aMatchinfo[0] + pRet->aMI[0] = (u8*)(&pRet->aMI[1]) - (u8*)pRet; + pRet->aMI[1+nElem] = pRet->aMI[0] + sizeof(u32)*((int)nElem+1); pRet->nElem = (int)nElem; pRet->zMatchinfo = ((char*)pRet) + nByte; @@ -155,10 +155,10 @@ static MatchinfoBuffer *fts3MIBufferNew(size_t nElem, const char *zMatchinfo){ static void fts3MIBufferFree(void *p){ MatchinfoBuffer *pBuf = (MatchinfoBuffer*)((u8*)p - ((u32*)p)[-1]); - assert( (u32*)p==&pBuf->aMatchinfo[1] - || (u32*)p==&pBuf->aMatchinfo[pBuf->nElem+2] + assert( (u32*)p==&pBuf->aMI[1] + || (u32*)p==&pBuf->aMI[pBuf->nElem+2] ); - if( (u32*)p==&pBuf->aMatchinfo[1] ){ + if( (u32*)p==&pBuf->aMI[1] ){ pBuf->aRef[1] = 0; }else{ pBuf->aRef[2] = 0; @@ -175,18 +175,18 @@ static void (*fts3MIBufferAlloc(MatchinfoBuffer *p, u32 **paOut))(void*){ if( p->aRef[1]==0 ){ p->aRef[1] = 1; - aOut = &p->aMatchinfo[1]; + aOut = &p->aMI[1]; xRet = fts3MIBufferFree; } else if( p->aRef[2]==0 ){ p->aRef[2] = 1; - aOut = &p->aMatchinfo[p->nElem+2]; + aOut = &p->aMI[p->nElem+2]; xRet = fts3MIBufferFree; }else{ aOut = (u32*)sqlite3_malloc64(p->nElem * sizeof(u32)); if( aOut ){ xRet = sqlite3_free; - if( p->bGlobal ) memcpy(aOut, &p->aMatchinfo[1], p->nElem*sizeof(u32)); + if( p->bGlobal ) memcpy(aOut, &p->aMI[1], p->nElem*sizeof(u32)); } } @@ -196,7 +196,7 @@ static void (*fts3MIBufferAlloc(MatchinfoBuffer *p, u32 **paOut))(void*){ static void fts3MIBufferSetGlobal(MatchinfoBuffer *p){ p->bGlobal = 1; - memcpy(&p->aMatchinfo[2+p->nElem], &p->aMatchinfo[1], p->nElem*sizeof(u32)); + memcpy(&p->aMI[2+p->nElem], &p->aMI[1], p->nElem*sizeof(u32)); } /* @@ -244,7 +244,7 @@ static void fts3GetDeltaPosition(char **pp, i64 *piPos){ } /* -** Helper function for fts3ExprIterate() (see below). +** Helper function for sqlite3Fts3ExprIterate() (see below). */ static int fts3ExprIterate2( Fts3Expr *pExpr, /* Expression to iterate phrases of */ @@ -278,7 +278,7 @@ static int fts3ExprIterate2( ** Otherwise, SQLITE_OK is returned after a callback has been made for ** all eligible phrase nodes. */ -static int fts3ExprIterate( +int sqlite3Fts3ExprIterate( Fts3Expr *pExpr, /* Expression to iterate phrases of */ int (*x)(Fts3Expr*,int,void*), /* Callback function to invoke for phrases */ void *pCtx /* Second argument to pass to callback */ @@ -287,10 +287,9 @@ static int fts3ExprIterate( return fts3ExprIterate2(pExpr, &iPhrase, x, pCtx); } - /* -** This is an fts3ExprIterate() callback used while loading the doclists -** for each phrase into Fts3Expr.aDoclist[]/nDoclist. See also +** This is an sqlite3Fts3ExprIterate() callback used while loading the +** doclists for each phrase into Fts3Expr.aDoclist[]/nDoclist. See also ** fts3ExprLoadDoclists(). */ static int fts3ExprLoadDoclistsCb(Fts3Expr *pExpr, int iPhrase, void *ctx){ @@ -322,9 +321,9 @@ static int fts3ExprLoadDoclists( int *pnToken /* OUT: Number of tokens in query */ ){ int rc; /* Return Code */ - LoadDoclistCtx sCtx = {0,0,0}; /* Context for fts3ExprIterate() */ + LoadDoclistCtx sCtx = {0,0,0}; /* Context for sqlite3Fts3ExprIterate() */ sCtx.pCsr = pCsr; - rc = fts3ExprIterate(pCsr->pExpr, fts3ExprLoadDoclistsCb, (void *)&sCtx); + rc = sqlite3Fts3ExprIterate(pCsr->pExpr,fts3ExprLoadDoclistsCb,(void*)&sCtx); if( pnPhrase ) *pnPhrase = sCtx.nPhrase; if( pnToken ) *pnToken = sCtx.nToken; return rc; @@ -337,7 +336,7 @@ static int fts3ExprPhraseCountCb(Fts3Expr *pExpr, int iPhrase, void *ctx){ } static int fts3ExprPhraseCount(Fts3Expr *pExpr){ int nPhrase = 0; - (void)fts3ExprIterate(pExpr, fts3ExprPhraseCountCb, (void *)&nPhrase); + (void)sqlite3Fts3ExprIterate(pExpr, fts3ExprPhraseCountCb, (void *)&nPhrase); return nPhrase; } @@ -399,6 +398,7 @@ static int fts3SnippetNextCandidate(SnippetIter *pIter){ return 1; } + assert( pIter->nSnippet>=0 ); pIter->iCurrent = iStart = iEnd - pIter->nSnippet + 1; for(i=0; inPhrase; i++){ SnippetPhrase *pPhrase = &pIter->aPhrase[i]; @@ -447,7 +447,7 @@ static void fts3SnippetDetails( } mCover |= mPhrase; - for(j=0; jnToken; j++){ + for(j=0; jnToken && jnSnippet; j++){ mHighlight |= (mPos>>j); } @@ -465,8 +465,9 @@ static void fts3SnippetDetails( } /* -** This function is an fts3ExprIterate() callback used by fts3BestSnippet(). -** Each invocation populates an element of the SnippetIter.aPhrase[] array. +** This function is an sqlite3Fts3ExprIterate() callback used by +** fts3BestSnippet(). Each invocation populates an element of the +** SnippetIter.aPhrase[] array. */ static int fts3SnippetFindPositions(Fts3Expr *pExpr, int iPhrase, void *ctx){ SnippetIter *p = (SnippetIter *)ctx; @@ -556,7 +557,9 @@ static int fts3BestSnippet( sIter.nSnippet = nSnippet; sIter.nPhrase = nList; sIter.iCurrent = -1; - rc = fts3ExprIterate(pCsr->pExpr, fts3SnippetFindPositions, (void*)&sIter); + rc = sqlite3Fts3ExprIterate( + pCsr->pExpr, fts3SnippetFindPositions, (void*)&sIter + ); if( rc==SQLITE_OK ){ /* Set the *pmSeen output variable. */ @@ -608,7 +611,7 @@ static int fts3StringAppend( } /* If there is insufficient space allocated at StrBuffer.z, use realloc() - ** to grow the buffer until so that it is big enough to accomadate the + ** to grow the buffer until so that it is big enough to accommodate the ** appended data. */ if( pStr->n+nAppend+1>=pStr->nAlloc ){ @@ -917,10 +920,10 @@ static int fts3ExprLHitGather( } /* -** fts3ExprIterate() callback used to collect the "global" matchinfo stats -** for a single query. +** sqlite3Fts3ExprIterate() callback used to collect the "global" matchinfo +** stats for a single query. ** -** fts3ExprIterate() callback to load the 'global' elements of a +** sqlite3Fts3ExprIterate() callback to load the 'global' elements of a ** FTS3_MATCHINFO_HITS matchinfo array. The global stats are those elements ** of the matchinfo array that are constant for all rows returned by the ** current query. @@ -955,7 +958,7 @@ static int fts3ExprGlobalHitsCb( } /* -** fts3ExprIterate() callback used to collect the "local" part of the +** sqlite3Fts3ExprIterate() callback used to collect the "local" part of the ** FTS3_MATCHINFO_HITS array. The local stats are those elements of the ** array that are different for each row returned by the query. */ @@ -1020,16 +1023,16 @@ static size_t fts3MatchinfoSize(MatchInfo *pInfo, char cArg){ break; case FTS3_MATCHINFO_LHITS: - nVal = pInfo->nCol * pInfo->nPhrase; + nVal = (size_t)pInfo->nCol * pInfo->nPhrase; break; case FTS3_MATCHINFO_LHITS_BM: - nVal = pInfo->nPhrase * ((pInfo->nCol + 31) / 32); + nVal = (size_t)pInfo->nPhrase * ((pInfo->nCol + 31) / 32); break; default: assert( cArg==FTS3_MATCHINFO_HITS ); - nVal = pInfo->nCol * pInfo->nPhrase * 3; + nVal = (size_t)pInfo->nCol * pInfo->nPhrase * 3; break; } @@ -1151,7 +1154,7 @@ static int fts3MatchinfoLcs(Fts3Cursor *pCsr, MatchInfo *pInfo){ **/ aIter = sqlite3Fts3MallocZero(sizeof(LcsIterator) * pCsr->nPhrase); if( !aIter ) return SQLITE_NOMEM; - (void)fts3ExprIterate(pCsr->pExpr, fts3MatchinfoLcsCb, (void*)aIter); + (void)sqlite3Fts3ExprIterate(pCsr->pExpr, fts3MatchinfoLcsCb, (void*)aIter); for(i=0; inPhrase; i++){ LcsIterator *pIter = &aIter[i]; @@ -1328,11 +1331,11 @@ static int fts3MatchinfoValues( rc = fts3MatchinfoSelectDoctotal(pTab, &pSelect, &pInfo->nDoc,0,0); if( rc!=SQLITE_OK ) break; } - rc = fts3ExprIterate(pExpr, fts3ExprGlobalHitsCb,(void*)pInfo); + rc = sqlite3Fts3ExprIterate(pExpr, fts3ExprGlobalHitsCb,(void*)pInfo); sqlite3Fts3EvalTestDeferred(pCsr, &rc); if( rc!=SQLITE_OK ) break; } - (void)fts3ExprIterate(pExpr, fts3ExprLocalHitsCb,(void*)pInfo); + (void)sqlite3Fts3ExprIterate(pExpr, fts3ExprLocalHitsCb,(void*)pInfo); break; } } @@ -1555,7 +1558,7 @@ struct TermOffsetCtx { }; /* -** This function is an fts3ExprIterate() callback used by sqlite3Fts3Offsets(). +** This function is an sqlite3Fts3ExprIterate() callback used by sqlite3Fts3Offsets(). */ static int fts3ExprTermOffsetInit(Fts3Expr *pExpr, int iPhrase, void *ctx){ TermOffsetCtx *p = (TermOffsetCtx *)ctx; @@ -1583,6 +1586,22 @@ static int fts3ExprTermOffsetInit(Fts3Expr *pExpr, int iPhrase, void *ctx){ return rc; } +/* +** If expression pExpr is a phrase expression that uses an MSR query, +** restart it as a regular, non-incremental query. Return SQLITE_OK +** if successful, or an SQLite error code otherwise. +*/ +static int fts3ExprRestartIfCb(Fts3Expr *pExpr, int iPhrase, void *ctx){ + TermOffsetCtx *p = (TermOffsetCtx*)ctx; + int rc = SQLITE_OK; + UNUSED_PARAMETER(iPhrase); + if( pExpr->pPhrase && pExpr->pPhrase->bIncr ){ + rc = sqlite3Fts3MsrCancel(p->pCsr, pExpr); + pExpr->pPhrase->bIncr = 0; + } + return rc; +} + /* ** Implementation of offsets() function. */ @@ -1619,6 +1638,12 @@ void sqlite3Fts3Offsets( sCtx.iDocid = pCsr->iPrevId; sCtx.pCsr = pCsr; + /* If a query restart will be required, do it here, rather than later of + ** after pointers to poslist buffers that may be invalidated by a restart + ** have been saved. */ + rc = sqlite3Fts3ExprIterate(pCsr->pExpr, fts3ExprRestartIfCb, (void*)&sCtx); + if( rc!=SQLITE_OK ) goto offsets_out; + /* Loop through the table columns, appending offset information to ** string-buffer res for each column. */ @@ -1637,7 +1662,9 @@ void sqlite3Fts3Offsets( */ sCtx.iCol = iCol; sCtx.iTerm = 0; - rc = fts3ExprIterate(pCsr->pExpr, fts3ExprTermOffsetInit, (void*)&sCtx); + rc = sqlite3Fts3ExprIterate( + pCsr->pExpr, fts3ExprTermOffsetInit, (void*)&sCtx + ); if( rc!=SQLITE_OK ) goto offsets_out; /* Retreive the text stored in column iCol. If an SQL NULL is stored diff --git a/ext/fts3/fts3_term.c b/ext/fts3/fts3_term.c index 47e244e22c..655dd9f35a 100644 --- a/ext/fts3/fts3_term.c +++ b/ext/fts3/fts3_term.c @@ -78,6 +78,8 @@ static int fts3termConnectMethod( iIndex = atoi(argv[4]); argc--; } + + *ppVtab = 0; /* The user should specify a single argument - the name of an fts3 table. */ if( argc!=4 ){ @@ -95,12 +97,17 @@ static int fts3termConnectMethod( rc = sqlite3_declare_vtab(db, FTS3_TERMS_SCHEMA); if( rc!=SQLITE_OK ) return rc; - nByte = sizeof(Fts3termTable) + sizeof(Fts3Table) + nDb + nFts3 + 2; - p = (Fts3termTable *)sqlite3_malloc64(nByte); + nByte = sizeof(Fts3termTable); + p = (Fts3termTable *)sqlite3Fts3MallocZero(nByte); if( !p ) return SQLITE_NOMEM; - memset(p, 0, (size_t)nByte); - p->pFts3Tab = (Fts3Table *)&p[1]; + p->pFts3Tab = (Fts3Table*)sqlite3Fts3MallocZero( + sizeof(Fts3Table) + nDb + nFts3 + 2 + ); + if( p->pFts3Tab==0 ){ + sqlite3_free(p); + return SQLITE_NOMEM; + } p->pFts3Tab->zDb = (char *)&p->pFts3Tab[1]; p->pFts3Tab->zName = &p->pFts3Tab->zDb[nDb+1]; p->pFts3Tab->db = db; @@ -130,6 +137,7 @@ static int fts3termDisconnectMethod(sqlite3_vtab *pVtab){ sqlite3_finalize(pFts3->aStmt[i]); } sqlite3_free(pFts3->zSegmentsTbl); + sqlite3_free(pFts3); sqlite3_free(p); return SQLITE_OK; } @@ -362,7 +370,8 @@ int sqlite3Fts3InitTerm(sqlite3 *db){ 0, /* xSavepoint */ 0, /* xRelease */ 0, /* xRollbackTo */ - 0 /* xShadowName */ + 0, /* xShadowName */ + 0 /* xIntegrity */ }; int rc; /* Return code */ diff --git a/ext/fts3/fts3_test.c b/ext/fts3/fts3_test.c index 49a8476bf3..70bccf0c52 100644 --- a/ext/fts3/fts3_test.c +++ b/ext/fts3/fts3_test.c @@ -18,14 +18,7 @@ ** that the sqlite3_tokenizer_module.xLanguage() method is invoked correctly. */ -#if defined(INCLUDE_SQLITE_TCL_H) -# include "sqlite_tcl.h" -#else -# include "tcl.h" -# ifndef SQLITE_TCLAPI -# define SQLITE_TCLAPI -# endif -#endif +#include "tclsqlite.h" #include #include @@ -167,7 +160,8 @@ static int SQLITE_TCLAPI fts3_near_match_cmd( Tcl_Obj *pPhrasecount = 0; Tcl_Obj **apExprToken; - int nExprToken; + Tcl_Size nExprToken; + Tcl_Size nn; UNUSED_PARAMETER(clientData); @@ -201,37 +195,40 @@ static int SQLITE_TCLAPI fts3_near_match_cmd( } } - rc = Tcl_ListObjGetElements(interp, objv[1], &doc.nToken, &apDocToken); + rc = Tcl_ListObjGetElements(interp, objv[1], &nn, &apDocToken); + doc.nToken = (int)nn; if( rc!=TCL_OK ) goto near_match_out; doc.aToken = (NearToken *)ckalloc(doc.nToken*sizeof(NearToken)); for(ii=0; iiNM_MAX_TOKEN ){ - Tcl_AppendResult(interp, "Too many tokens in phrase", 0); + Tcl_AppendResult(interp, "Too many tokens in phrase", NULL); rc = TCL_ERROR; goto near_match_out; } - for(jj=0; jjz = Tcl_GetStringFromObj(apToken[jj], &pT->n); + pT->z = Tcl_GetStringFromObj(apToken[jj], &nn); + pT->n = (int)nn; } - aPhrase[ii].nToken = nToken; + aPhrase[ii].nToken = (int)nToken; } for(ii=1; iizInput = sqlite3_malloc64(nByte+1); if( pCsr->zInput==0 ){ rc = SQLITE_NOMEM; @@ -420,7 +420,7 @@ static int fts3tokRowidMethod( ** Register the fts3tok module with database connection db. Return SQLITE_OK ** if successful or an error code if sqlite3_create_module() fails. */ -int sqlite3Fts3InitTok(sqlite3 *db, Fts3Hash *pHash){ +int sqlite3Fts3InitTok(sqlite3 *db, Fts3Hash *pHash, void(*xDestroy)(void*)){ static const sqlite3_module fts3tok_module = { 0, /* iVersion */ fts3tokConnectMethod, /* xCreate */ @@ -445,11 +445,14 @@ int sqlite3Fts3InitTok(sqlite3 *db, Fts3Hash *pHash){ 0, /* xSavepoint */ 0, /* xRelease */ 0, /* xRollbackTo */ - 0 /* xShadowName */ + 0, /* xShadowName */ + 0 /* xIntegrity */ }; int rc; /* Return code */ - rc = sqlite3_create_module(db, "fts3tokenize", &fts3tok_module, (void*)pHash); + rc = sqlite3_create_module_v2( + db, "fts3tokenize", &fts3tok_module, (void*)pHash, xDestroy + ); return rc; } diff --git a/ext/fts3/fts3_tokenizer.c b/ext/fts3/fts3_tokenizer.c index eab3f513e5..24c237a89d 100644 --- a/ext/fts3/fts3_tokenizer.c +++ b/ext/fts3/fts3_tokenizer.c @@ -226,11 +226,7 @@ int sqlite3Fts3InitTokenizer( #ifdef SQLITE_TEST -#if defined(INCLUDE_SQLITE_TCL_H) -# include "sqlite_tcl.h" -#else -# include "tcl.h" -#endif +#include "tclsqlite.h" #include /* diff --git a/ext/fts3/fts3_tokenizer1.c b/ext/fts3/fts3_tokenizer1.c index deea06d92b..78e5889da5 100644 --- a/ext/fts3/fts3_tokenizer1.c +++ b/ext/fts3/fts3_tokenizer1.c @@ -185,7 +185,7 @@ static int simpleNext( if( n>c->nTokenAllocated ){ char *pNew; c->nTokenAllocated = n+20; - pNew = sqlite3_realloc(c->pToken, c->nTokenAllocated); + pNew = sqlite3_realloc64(c->pToken, c->nTokenAllocated); if( !pNew ) return SQLITE_NOMEM; c->pToken = pNew; } diff --git a/ext/fts3/fts3_write.c b/ext/fts3/fts3_write.c index 201e5813c6..19dff31f00 100644 --- a/ext/fts3/fts3_write.c +++ b/ext/fts3/fts3_write.c @@ -649,7 +649,7 @@ static int fts3PendingListAppendVarint( /* Allocate or grow the PendingList as required. */ if( !p ){ - p = sqlite3_malloc(sizeof(*p) + 100); + p = sqlite3_malloc64(sizeof(*p) + 100); if( !p ){ return SQLITE_NOMEM; } @@ -658,14 +658,14 @@ static int fts3PendingListAppendVarint( p->nData = 0; } else if( p->nData+FTS3_VARINT_MAX+1>p->nSpace ){ - int nNew = p->nSpace * 2; - p = sqlite3_realloc(p, sizeof(*p) + nNew); + i64 nNew = p->nSpace * 2; + p = sqlite3_realloc64(p, sizeof(*p) + nNew); if( !p ){ sqlite3_free(*pp); *pp = 0; return SQLITE_NOMEM; } - p->nSpace = nNew; + p->nSpace = (int)nNew; p->aData = (char *)&p[1]; } @@ -1222,7 +1222,7 @@ int sqlite3Fts3ReadBlock( int nByte = sqlite3_blob_bytes(p->pSegments); *pnBlob = nByte; if( paBlob ){ - char *aByte = sqlite3_malloc(nByte + FTS3_NODE_PADDING); + char *aByte = sqlite3_malloc64((i64)nByte + FTS3_NODE_PADDING); if( !aByte ){ rc = SQLITE_NOMEM; }else{ @@ -1339,7 +1339,7 @@ static int fts3SegReaderNext( int nTerm = fts3HashKeysize(pElem); if( (nTerm+1)>pReader->nTermAlloc ){ sqlite3_free(pReader->zTerm); - pReader->zTerm = (char*)sqlite3_malloc((nTerm+1)*2); + pReader->zTerm = (char*)sqlite3_malloc64(((i64)nTerm+1)*2); if( !pReader->zTerm ) return SQLITE_NOMEM; pReader->nTermAlloc = (nTerm+1)*2; } @@ -1347,7 +1347,7 @@ static int fts3SegReaderNext( pReader->zTerm[nTerm] = '\0'; pReader->nTerm = nTerm; - aCopy = (char*)sqlite3_malloc(nCopy); + aCopy = (char*)sqlite3_malloc64(nCopy); if( !aCopy ) return SQLITE_NOMEM; memcpy(aCopy, pList->aData, nCopy); pReader->nNode = pReader->nDoclist = nCopy; @@ -1634,7 +1634,7 @@ int sqlite3Fts3SegReaderNew( nExtra = nRoot + FTS3_NODE_PADDING; } - pReader = (Fts3SegReader *)sqlite3_malloc(sizeof(Fts3SegReader) + nExtra); + pReader = (Fts3SegReader *)sqlite3_malloc64(sizeof(Fts3SegReader) + nExtra); if( !pReader ){ return SQLITE_NOMEM; } @@ -1726,7 +1726,7 @@ int sqlite3Fts3SegReaderPending( if( nElem==nAlloc ){ Fts3HashElem **aElem2; nAlloc += 16; - aElem2 = (Fts3HashElem **)sqlite3_realloc( + aElem2 = (Fts3HashElem **)sqlite3_realloc64( aElem, nAlloc*sizeof(Fts3HashElem *) ); if( !aElem2 ){ @@ -2060,7 +2060,7 @@ static int fts3NodeAddTerm( ** this is not expected to be a serious problem. */ assert( pTree->aData==(char *)&pTree[1] ); - pTree->aData = (char *)sqlite3_malloc(nReq); + pTree->aData = (char *)sqlite3_malloc64(nReq); if( !pTree->aData ){ return SQLITE_NOMEM; } @@ -2078,7 +2078,7 @@ static int fts3NodeAddTerm( if( isCopyTerm ){ if( pTree->nMalloczMalloc, nTerm*2); + char *zNew = sqlite3_realloc64(pTree->zMalloc, (i64)nTerm*2); if( !zNew ){ return SQLITE_NOMEM; } @@ -2104,7 +2104,7 @@ static int fts3NodeAddTerm( ** now. Instead, the term is inserted into the parent of pTree. If pTree ** has no parent, one is created here. */ - pNew = (SegmentNode *)sqlite3_malloc(sizeof(SegmentNode) + p->nNodeSize); + pNew = (SegmentNode *)sqlite3_malloc64(sizeof(SegmentNode) + p->nNodeSize); if( !pNew ){ return SQLITE_NOMEM; } @@ -2242,7 +2242,7 @@ static int fts3SegWriterAdd( ){ int nPrefix; /* Size of term prefix in bytes */ int nSuffix; /* Size of term suffix in bytes */ - int nReq; /* Number of bytes required on leaf page */ + i64 nReq; /* Number of bytes required on leaf page */ int nData; SegmentWriter *pWriter = *ppWriter; @@ -2251,13 +2251,13 @@ static int fts3SegWriterAdd( sqlite3_stmt *pStmt; /* Allocate the SegmentWriter structure */ - pWriter = (SegmentWriter *)sqlite3_malloc(sizeof(SegmentWriter)); + pWriter = (SegmentWriter *)sqlite3_malloc64(sizeof(SegmentWriter)); if( !pWriter ) return SQLITE_NOMEM; memset(pWriter, 0, sizeof(SegmentWriter)); *ppWriter = pWriter; /* Allocate a buffer in which to accumulate data */ - pWriter->aData = (char *)sqlite3_malloc(p->nNodeSize); + pWriter->aData = (char *)sqlite3_malloc64(p->nNodeSize); if( !pWriter->aData ) return SQLITE_NOMEM; pWriter->nSize = p->nNodeSize; @@ -2332,7 +2332,7 @@ static int fts3SegWriterAdd( ** the buffer to make it large enough. */ if( nReq>pWriter->nSize ){ - char *aNew = sqlite3_realloc(pWriter->aData, nReq); + char *aNew = sqlite3_realloc64(pWriter->aData, nReq); if( !aNew ) return SQLITE_NOMEM; pWriter->aData = aNew; pWriter->nSize = nReq; @@ -2357,7 +2357,7 @@ static int fts3SegWriterAdd( */ if( isCopyTerm ){ if( nTerm>pWriter->nMalloc ){ - char *zNew = sqlite3_realloc(pWriter->zMalloc, nTerm*2); + char *zNew = sqlite3_realloc64(pWriter->zMalloc, (i64)nTerm*2); if( !zNew ){ return SQLITE_NOMEM; } @@ -2665,18 +2665,20 @@ static void fts3ColumnFilter( static int fts3MsrBufferData( Fts3MultiSegReader *pMsr, /* Multi-segment-reader handle */ char *pList, - int nList + i64 nList ){ - if( nList>pMsr->nBuffer ){ + if( (nList+FTS3_NODE_PADDING)>pMsr->nBuffer ){ char *pNew; - pMsr->nBuffer = nList*2; - pNew = (char *)sqlite3_realloc(pMsr->aBuffer, pMsr->nBuffer); + int nNew = nList*2 + FTS3_NODE_PADDING; + pNew = (char *)sqlite3_realloc64(pMsr->aBuffer, nNew); if( !pNew ) return SQLITE_NOMEM; pMsr->aBuffer = pNew; + pMsr->nBuffer = nNew; } assert( nList>0 ); memcpy(pMsr->aBuffer, pList, nList); + memset(&pMsr->aBuffer[nList], 0, FTS3_NODE_PADDING); return SQLITE_OK; } @@ -2726,7 +2728,7 @@ int sqlite3Fts3MsrIncrNext( fts3SegReaderSort(pMsr->apSegment, nMerge, j, xCmp); if( nList>0 && fts3SegReaderIsPending(apSegment[0]) ){ - rc = fts3MsrBufferData(pMsr, pList, nList+1); + rc = fts3MsrBufferData(pMsr, pList, (i64)nList+1); if( rc!=SQLITE_OK ) return rc; assert( (pMsr->aBuffer[nList] & 0xFE)==0x00 ); pList = pMsr->aBuffer; @@ -2863,11 +2865,11 @@ int sqlite3Fts3MsrIncrRestart(Fts3MultiSegReader *pCsr){ return SQLITE_OK; } -static int fts3GrowSegReaderBuffer(Fts3MultiSegReader *pCsr, int nReq){ +static int fts3GrowSegReaderBuffer(Fts3MultiSegReader *pCsr, i64 nReq){ if( nReq>pCsr->nBuffer ){ char *aNew; pCsr->nBuffer = nReq*2; - aNew = sqlite3_realloc(pCsr->aBuffer, pCsr->nBuffer); + aNew = sqlite3_realloc64(pCsr->aBuffer, pCsr->nBuffer); if( !aNew ){ return SQLITE_NOMEM; } @@ -2958,7 +2960,8 @@ int sqlite3Fts3SegReaderStep( ){ pCsr->nDoclist = apSegment[0]->nDoclist; if( fts3SegReaderIsPending(apSegment[0]) ){ - rc = fts3MsrBufferData(pCsr, apSegment[0]->aDoclist, pCsr->nDoclist); + rc = fts3MsrBufferData(pCsr, apSegment[0]->aDoclist, + (i64)pCsr->nDoclist); pCsr->aDoclist = pCsr->aBuffer; }else{ pCsr->aDoclist = apSegment[0]->aDoclist; @@ -3011,7 +3014,8 @@ int sqlite3Fts3SegReaderStep( nByte = sqlite3Fts3VarintLen(iDelta) + (isRequirePos?nList+1:0); - rc = fts3GrowSegReaderBuffer(pCsr, nByte+nDoclist+FTS3_NODE_PADDING); + rc = fts3GrowSegReaderBuffer(pCsr, + (i64)nByte+nDoclist+FTS3_NODE_PADDING); if( rc ) return rc; if( isFirst ){ @@ -3037,7 +3041,7 @@ int sqlite3Fts3SegReaderStep( fts3SegReaderSort(apSegment, nMerge, j, xCmp); } if( nDoclist>0 ){ - rc = fts3GrowSegReaderBuffer(pCsr, nDoclist+FTS3_NODE_PADDING); + rc = fts3GrowSegReaderBuffer(pCsr, (i64)nDoclist+FTS3_NODE_PADDING); if( rc ) return rc; memset(&pCsr->aBuffer[nDoclist], 0, FTS3_NODE_PADDING); pCsr->aDoclist = pCsr->aBuffer; @@ -3321,7 +3325,6 @@ int sqlite3Fts3PendingTermsFlush(Fts3Table *p){ rc = fts3SegmentMerge(p, p->iPrevLangid, i, FTS3_SEGCURSOR_PENDING); if( rc==SQLITE_DONE ) rc = SQLITE_OK; } - sqlite3Fts3PendingTermsClear(p); /* Determine the auto-incr-merge setting if unknown. If enabled, ** estimate the number of leaf blocks of content to be written @@ -3343,6 +3346,10 @@ int sqlite3Fts3PendingTermsFlush(Fts3Table *p){ rc = sqlite3_reset(pStmt); } } + + if( rc==SQLITE_OK ){ + sqlite3Fts3PendingTermsClear(p); + } return rc; } @@ -3707,8 +3714,8 @@ struct NodeWriter { ** to an appendable b-tree segment. */ struct IncrmergeWriter { - int nLeafEst; /* Space allocated for leaf blocks */ - int nWork; /* Number of leaf pages flushed */ + i64 nLeafEst; /* Space allocated for leaf blocks */ + i64 nWork; /* Number of leaf pages flushed */ sqlite3_int64 iAbsLevel; /* Absolute level of input segments */ int iIdx; /* Index of *output* segment in iAbsLevel+1 */ sqlite3_int64 iStart; /* Block number of first allocated block */ @@ -3750,7 +3757,7 @@ struct NodeReader { static void blobGrowBuffer(Blob *pBlob, int nMin, int *pRc){ if( *pRc==SQLITE_OK && nMin>pBlob->nAlloc ){ int nAlloc = nMin; - char *a = (char *)sqlite3_realloc(pBlob->a, nAlloc); + char *a = (char *)sqlite3_realloc64(pBlob->a, nAlloc); if( a ){ pBlob->nAlloc = nAlloc; pBlob->a = a; @@ -3899,6 +3906,8 @@ static int fts3IncrmergePush( pBlk->n += sqlite3Fts3PutVarint(&pBlk->a[pBlk->n], nPrefix); } pBlk->n += sqlite3Fts3PutVarint(&pBlk->a[pBlk->n], nSuffix); + assert( nPrefix+nSuffix<=nTerm ); + assert( nPrefix>=0 ); memcpy(&pBlk->a[pBlk->n], &zTerm[nPrefix], nSuffix); pBlk->n += nSuffix; @@ -3947,7 +3956,7 @@ static int fts3IncrmergePush( ** ** It is assumed that the buffer associated with pNode is already large ** enough to accommodate the new entry. The buffer associated with pPrev -** is extended by this function if requrired. +** is extended by this function if required. ** ** If an error (i.e. OOM condition) occurs, an SQLite error code is ** returned. Otherwise, SQLITE_OK. @@ -3972,6 +3981,8 @@ static int fts3AppendToNode( blobGrowBuffer(pPrev, nTerm, &rc); if( rc!=SQLITE_OK ) return rc; + assert( pPrev!=0 ); + assert( pPrev->a!=0 ); nPrefix = fts3PrefixCompress(pPrev->a, pPrev->n, zTerm, nTerm); nSuffix = nTerm - nPrefix; @@ -4021,15 +4032,20 @@ static int fts3IncrmergeAppend( pLeaf = &pWriter->aNodeWriter[0]; nPrefix = fts3PrefixCompress(pLeaf->key.a, pLeaf->key.n, zTerm, nTerm); nSuffix = nTerm - nPrefix; + if(nSuffix<=0 ) return FTS_CORRUPT_VTAB; nSpace = sqlite3Fts3VarintLen(nPrefix); nSpace += sqlite3Fts3VarintLen(nSuffix) + nSuffix; nSpace += sqlite3Fts3VarintLen(nDoclist) + nDoclist; /* If the current block is not empty, and if adding this term/doclist - ** to the current block would make it larger than Fts3Table.nNodeSize - ** bytes, write this block out to the database. */ - if( pLeaf->block.n>0 && (pLeaf->block.n + nSpace)>p->nNodeSize ){ + ** to the current block would make it larger than Fts3Table.nNodeSize bytes, + ** and if there is still room for another leaf page, write this block out to + ** the database. */ + if( pLeaf->block.n>0 + && (pLeaf->block.n + nSpace)>p->nNodeSize + && pLeaf->iBlock < (pWriter->iStart + pWriter->nLeafEst) + ){ rc = fts3WriteSegment(p, pLeaf->iBlock, pLeaf->block.a, pLeaf->block.n); pWriter->nWork++; @@ -4340,6 +4356,7 @@ static int fts3IncrmergeLoad( for(i=nHeight; i>=0 && rc==SQLITE_OK; i--){ NodeReader reader; + memset(&reader, 0, sizeof(reader)); pNode = &pWriter->aNodeWriter[i]; if( pNode->block.a){ @@ -4360,7 +4377,7 @@ static int fts3IncrmergeLoad( rc = sqlite3Fts3ReadBlock(p, reader.iChild, &aBlock, &nBlock,0); blobGrowBuffer(&pNode->block, MAX(nBlock, p->nNodeSize)+FTS3_NODE_PADDING, &rc - ); + ); if( rc==SQLITE_OK ){ memcpy(pNode->block.a, aBlock, nBlock); pNode->block.n = nBlock; @@ -4444,7 +4461,7 @@ static int fts3IncrmergeWriter( ){ int rc; /* Return Code */ int i; /* Iterator variable */ - int nLeafEst = 0; /* Blocks allocated for leaf nodes */ + i64 nLeafEst = 0; /* Blocks allocated for leaf nodes */ sqlite3_stmt *pLeafEst = 0; /* SQL used to determine nLeafEst */ sqlite3_stmt *pFirstBlock = 0; /* SQL used to determine first block */ @@ -4454,7 +4471,7 @@ static int fts3IncrmergeWriter( sqlite3_bind_int64(pLeafEst, 1, iAbsLevel); sqlite3_bind_int64(pLeafEst, 2, pCsr->nSegment); if( SQLITE_ROW==sqlite3_step(pLeafEst) ){ - nLeafEst = sqlite3_column_int(pLeafEst, 0); + nLeafEst = sqlite3_column_int64(pLeafEst, 0); } rc = sqlite3_reset(pLeafEst); } @@ -4544,7 +4561,7 @@ static int fts3RepackSegdirLevel( if( nIdx>=nAlloc ){ int *aNew; nAlloc += 16; - aNew = sqlite3_realloc(aIdx, nAlloc*sizeof(int)); + aNew = sqlite3_realloc64(aIdx, nAlloc*sizeof(int)); if( !aNew ){ rc = SQLITE_NOMEM; break; @@ -4918,7 +4935,7 @@ int sqlite3Fts3Incrmerge(Fts3Table *p, int nMerge, int nMin){ /* Allocate space for the cursor, filter and writer objects */ const int nAlloc = sizeof(*pCsr) + sizeof(*pFilter) + sizeof(*pWriter); - pWriter = (IncrmergeWriter *)sqlite3_malloc(nAlloc); + pWriter = (IncrmergeWriter *)sqlite3_malloc64(nAlloc); if( !pWriter ) return SQLITE_NOMEM; pFilter = (Fts3SegFilter *)&pWriter[1]; pCsr = (Fts3MultiSegReader *)&pFilter[1]; @@ -5210,7 +5227,7 @@ static u64 fts3ChecksumIndex( int rc; u64 cksum = 0; - assert( *pRc==SQLITE_OK ); + if( *pRc ) return 0; memset(&filter, 0, sizeof(filter)); memset(&csr, 0, sizeof(csr)); @@ -5277,7 +5294,7 @@ static u64 fts3ChecksumIndex( ** If an error occurs (e.g. an OOM or IO error), return an SQLite error ** code. The final value of *pbOk is undefined in this case. */ -static int fts3IntegrityCheck(Fts3Table *p, int *pbOk){ +int sqlite3Fts3IntegrityCheck(Fts3Table *p, int *pbOk){ int rc = SQLITE_OK; /* Return code */ u64 cksum1 = 0; /* Checksum based on FTS index contents */ u64 cksum2 = 0; /* Checksum based on %_content contents */ @@ -5355,7 +5372,12 @@ static int fts3IntegrityCheck(Fts3Table *p, int *pbOk){ sqlite3_finalize(pStmt); } - *pbOk = (cksum1==cksum2); + if( rc==SQLITE_CORRUPT_VTAB ){ + rc = SQLITE_OK; + *pbOk = 0; + }else{ + *pbOk = (rc==SQLITE_OK && cksum1==cksum2); + } return rc; } @@ -5395,7 +5417,7 @@ static int fts3DoIntegrityCheck( ){ int rc; int bOk = 0; - rc = fts3IntegrityCheck(p, &bOk); + rc = sqlite3Fts3IntegrityCheck(p, &bOk); if( rc==SQLITE_OK && bOk==0 ) rc = FTS_CORRUPT_VTAB; return rc; } @@ -5425,8 +5447,11 @@ static int fts3SpecialInsert(Fts3Table *p, sqlite3_value *pVal){ rc = fts3DoIncrmerge(p, &zVal[6]); }else if( nVal>10 && 0==sqlite3_strnicmp(zVal, "automerge=", 10) ){ rc = fts3DoAutoincrmerge(p, &zVal[10]); + }else if( nVal==5 && 0==sqlite3_strnicmp(zVal, "flush", 5) ){ + rc = sqlite3Fts3PendingTermsFlush(p); + } #if defined(SQLITE_DEBUG) || defined(SQLITE_TEST) - }else{ + else{ int v; if( nVal>9 && 0==sqlite3_strnicmp(zVal, "nodesize=", 9) ){ v = atoi(&zVal[9]); @@ -5444,8 +5469,8 @@ static int fts3SpecialInsert(Fts3Table *p, sqlite3_value *pVal){ if( v>=4 && v<=FTS3_MERGE_COUNT && (v&1)==0 ) p->nMergeCount = v; rc = SQLITE_OK; } -#endif } +#endif return rc; } @@ -5554,7 +5579,7 @@ int sqlite3Fts3DeferredTokenList( return SQLITE_OK; } - pRet = (char *)sqlite3_malloc(p->pList->nData); + pRet = (char *)sqlite3_malloc64(p->pList->nData); if( !pRet ) return SQLITE_NOMEM; nSkip = sqlite3Fts3GetVarint(p->pList->aData, &dummy); @@ -5574,7 +5599,7 @@ int sqlite3Fts3DeferToken( int iCol /* Column that token must appear in (or -1) */ ){ Fts3DeferredToken *pDeferred; - pDeferred = sqlite3_malloc(sizeof(*pDeferred)); + pDeferred = sqlite3_malloc64(sizeof(*pDeferred)); if( !pDeferred ){ return SQLITE_NOMEM; } @@ -5594,7 +5619,7 @@ int sqlite3Fts3DeferToken( /* ** SQLite value pRowid contains the rowid of a row that may or may not be ** present in the FTS3 table. If it is, delete it and adjust the contents -** of subsiduary data structures accordingly. +** of subsidiary data structures accordingly. */ static int fts3DeleteByRowid( Fts3Table *p, diff --git a/ext/fts3/mkfts3amal.tcl b/ext/fts3/mkfts3amal.tcl deleted file mode 100644 index 059048717f..0000000000 --- a/ext/fts3/mkfts3amal.tcl +++ /dev/null @@ -1,115 +0,0 @@ -#!/usr/bin/tclsh -# -# This script builds a single C code file holding all of FTS3 code. -# The name of the output file is fts3amal.c. To build this file, -# first do: -# -# make target_source -# -# The make target above moves all of the source code files into -# a subdirectory named "tsrc". (This script expects to find the files -# there and will not work if they are not found.) -# -# After the "tsrc" directory has been created and populated, run -# this script: -# -# tclsh mkfts3amal.tcl -# -# The amalgamated FTS3 code will be written into fts3amal.c -# - -# Open the output file and write a header comment at the beginning -# of the file. -# -set out [open fts3amal.c w] -set today [clock format [clock seconds] -format "%Y-%m-%d %H:%M:%S UTC" -gmt 1] -puts $out [subst \ -{/****************************************************************************** -** This file is an amalgamation of separate C source files from the SQLite -** Full Text Search extension 2 (fts3). By combining all the individual C -** code files into this single large file, the entire code can be compiled -** as a one translation unit. This allows many compilers to do optimizations -** that would not be possible if the files were compiled separately. It also -** makes the code easier to import into other projects. -** -** This amalgamation was generated on $today. -*/}] - -# These are the header files used by FTS3. The first time any of these -# files are seen in a #include statement in the C code, include the complete -# text of the file in-line. The file only needs to be included once. -# -foreach hdr { - fts3.h - fts3_hash.h - fts3_tokenizer.h - sqlite3.h - sqlite3ext.h -} { - set available_hdr($hdr) 1 -} - -# 78 stars used for comment formatting. -set s78 \ -{*****************************************************************************} - -# Insert a comment into the code -# -proc section_comment {text} { - global out s78 - set n [string length $text] - set nstar [expr {60 - $n}] - set stars [string range $s78 0 $nstar] - puts $out "/************** $text $stars/" -} - -# Read the source file named $filename and write it into the -# sqlite3.c output file. If any #include statements are seen, -# process them approprately. -# -proc copy_file {filename} { - global seen_hdr available_hdr out - set tail [file tail $filename] - section_comment "Begin file $tail" - set in [open $filename r] - while {![eof $in]} { - set line [gets $in] - if {[regexp {^#\s*include\s+["<]([^">]+)[">]} $line all hdr]} { - if {[info exists available_hdr($hdr)]} { - if {$available_hdr($hdr)} { - section_comment "Include $hdr in the middle of $tail" - copy_file tsrc/$hdr - section_comment "Continuing where we left off in $tail" - } - } elseif {![info exists seen_hdr($hdr)]} { - set seen_hdr($hdr) 1 - puts $out $line - } - } elseif {[regexp {^#ifdef __cplusplus} $line]} { - puts $out "#if 0" - } elseif {[regexp {^#line} $line]} { - # Skip #line directives. - } else { - puts $out $line - } - } - close $in - section_comment "End of $tail" -} - - -# Process the source files. Process files containing commonly -# used subroutines first in order to help the compiler find -# inlining opportunities. -# -foreach file { - fts3.c - fts3_hash.c - fts3_porter.c - fts3_tokenizer.c - fts3_tokenizer1.c -} { - copy_file tsrc/$file -} - -close $out diff --git a/ext/fts3/unicode/mkunicode.tcl b/ext/fts3/unicode/mkunicode.tcl index 58d90c68c7..3bf866ef74 100644 --- a/ext/fts3/unicode/mkunicode.tcl +++ b/ext/fts3/unicode/mkunicode.tcl @@ -628,6 +628,9 @@ proc print_categories {lMap} { $caseP $caseS $caseZ + + default: + return 1; } return 0; } @@ -890,7 +893,7 @@ proc print_test_main {} { puts "\}" } -# Proces the command line arguments. Exit early if they are not to +# Process the command line arguments. Exit early if they are not to # our liking. # proc usage {} { diff --git a/ext/fts5/extract_api_docs.tcl b/ext/fts5/extract_api_docs.tcl index 2320d70b7d..6ee71c262c 100644 --- a/ext/fts5/extract_api_docs.tcl +++ b/ext/fts5/extract_api_docs.tcl @@ -82,7 +82,7 @@ proc get_struct_docs {data names} { set current_doc "" } set subject n/a - regexp {^ *([[:alpha:]]*)} $line -> subject + regexp {^ *([[:alnum:]_]*)} $line -> subject if {[lsearch $names $subject]>=0} { set current_header $subject } else { @@ -108,8 +108,11 @@ proc get_tokenizer_docs {data} { append res "
    $line

    \n" continue } + if {[regexp {FTS5_TOKENIZER} $line]} { + set line

    + } if {[regexp {SYNONYM SUPPORT} $line]} { - set line "

    Synonym Support

    " + set line "

    Synonym Support

    " } if {[string trim $line] == ""} { append res "

    \n" @@ -223,10 +226,12 @@ proc main {data} { Fts5ExtensionApi { set struct [get_fts5_struct $data "^struct Fts5ExtensionApi" "^.;"] set map [list] + set lKey [list] foreach {k v} [get_struct_members $data] { if {[string match x* $k]==0} continue - lappend map $k "$k" + lappend lKey $k } + foreach k [lsort -decr $lKey] { lappend map $k "$k" } output [string map $map $struct] } diff --git a/ext/fts5/fts5.h b/ext/fts5/fts5.h index 081e534f3f..907ea232a4 100644 --- a/ext/fts5/fts5.h +++ b/ext/fts5/fts5.h @@ -55,8 +55,8 @@ struct Fts5PhraseIter { ** EXTENSION API FUNCTIONS ** ** xUserData(pFts): -** Return a copy of the context pointer the extension function was -** registered with. +** Return a copy of the pUserData pointer passed to the xCreateFunction() +** API when the extension function was registered. ** ** xColumnTotalSize(pFts, iCol, pnToken): ** If parameter iCol is less than zero, set output variable *pnToken @@ -88,8 +88,11 @@ struct Fts5PhraseIter { ** created with the "columnsize=0" option. ** ** xColumnText: -** This function attempts to retrieve the text of column iCol of the -** current document. If successful, (*pz) is set to point to a buffer +** If parameter iCol is less than zero, or greater than or equal to the +** number of columns in the table, SQLITE_RANGE is returned. +** +** Otherwise, this function attempts to retrieve the text of column iCol of +** the current document. If successful, (*pz) is set to point to a buffer ** containing the text in utf-8 encoding, (*pn) is set to the size in bytes ** (not characters) of the buffer and SQLITE_OK is returned. Otherwise, ** if an error occurs, an SQLite error code is returned and the final values @@ -99,8 +102,10 @@ struct Fts5PhraseIter { ** Returns the number of phrases in the current query expression. ** ** xPhraseSize: -** Returns the number of tokens in phrase iPhrase of the query. Phrases -** are numbered starting from zero. +** If parameter iCol is less than zero, or greater than or equal to the +** number of phrases in the current query, as returned by xPhraseCount, +** 0 is returned. Otherwise, this function returns the number of tokens in +** phrase iPhrase of the query. Phrases are numbered starting from zero. ** ** xInstCount: ** Set *pnInst to the total number of occurrences of all phrases within @@ -116,12 +121,13 @@ struct Fts5PhraseIter { ** Query for the details of phrase match iIdx within the current row. ** Phrase matches are numbered starting from zero, so the iIdx argument ** should be greater than or equal to zero and smaller than the value -** output by xInstCount(). +** output by xInstCount(). If iIdx is less than zero or greater than +** or equal to the value returned by xInstCount(), SQLITE_RANGE is returned. ** -** Usually, output parameter *piPhrase is set to the phrase number, *piCol +** Otherwise, output parameter *piPhrase is set to the phrase number, *piCol ** to the column in which it occurs and *piOff the token offset of the -** first token of the phrase. Returns SQLITE_OK if successful, or an error -** code (i.e. SQLITE_NOMEM) if an error occurs. +** first token of the phrase. SQLITE_OK is returned if successful, or an +** error code (i.e. SQLITE_NOMEM) if an error occurs. ** ** This API can be quite slow if used with an FTS5 table created with the ** "detail=none" or "detail=column" option. @@ -147,6 +153,10 @@ struct Fts5PhraseIter { ** Invoking Api.xUserData() returns a copy of the pointer passed as ** the third argument to pUserData. ** +** If parameter iPhrase is less than zero, or greater than or equal to +** the number of phrases in the query, as returned by xPhraseCount(), +** this function returns SQLITE_RANGE. +** ** If the callback function returns any value other than SQLITE_OK, the ** query is abandoned and the xQueryPhrase function returns immediately. ** If the returned value is SQLITE_DONE, xQueryPhrase returns SQLITE_OK. @@ -228,6 +238,10 @@ struct Fts5PhraseIter { ** (i.e. if it is a contentless table), then this API always iterates ** through an empty set (all calls to xPhraseFirst() set iCol to -1). ** +** In all cases, matches are visited in (column ASC, offset ASC) order. +** i.e. all those in column 0, sorted by offset, followed by those in +** column 1, etc. +** ** xPhraseNext() ** See xPhraseFirst above. ** @@ -261,9 +275,80 @@ struct Fts5PhraseIter { ** ** xPhraseNextColumn() ** See xPhraseFirstColumn above. +** +** xQueryToken(pFts5, iPhrase, iToken, ppToken, pnToken) +** This is used to access token iToken of phrase iPhrase of the current +** query. Before returning, output parameter *ppToken is set to point +** to a buffer containing the requested token, and *pnToken to the +** size of this buffer in bytes. +** +** If iPhrase or iToken are less than zero, or if iPhrase is greater than +** or equal to the number of phrases in the query as reported by +** xPhraseCount(), or if iToken is equal to or greater than the number of +** tokens in the phrase, SQLITE_RANGE is returned and *ppToken and *pnToken + are both zeroed. +** +** The output text is not a copy of the query text that specified the +** token. It is the output of the tokenizer module. For tokendata=1 +** tables, this includes any embedded 0x00 and trailing data. +** +** xInstToken(pFts5, iIdx, iToken, ppToken, pnToken) +** This is used to access token iToken of phrase hit iIdx within the +** current row. If iIdx is less than zero or greater than or equal to the +** value returned by xInstCount(), SQLITE_RANGE is returned. Otherwise, +** output variable (*ppToken) is set to point to a buffer containing the +** matching document token, and (*pnToken) to the size of that buffer in +** bytes. +** +** The output text is not a copy of the document text that was tokenized. +** It is the output of the tokenizer module. For tokendata=1 tables, this +** includes any embedded 0x00 and trailing data. +** +** This API may be slow in some cases if the token identified by parameters +** iIdx and iToken matched a prefix token in the query. In most cases, the +** first call to this API for each prefix token in the query is forced +** to scan the portion of the full-text index that matches the prefix +** token to collect the extra data required by this API. If the prefix +** token matches a large number of token instances in the document set, +** this may be a performance problem. +** +** If the user knows in advance that a query may use this API for a +** prefix token, FTS5 may be configured to collect all required data as part +** of the initial querying of the full-text index, avoiding the second scan +** entirely. This also causes prefix queries that do not use this API to +** run more slowly and use more memory. FTS5 may be configured in this way +** either on a per-table basis using the [FTS5 insttoken | 'insttoken'] +** option, or on a per-query basis using the +** [fts5_insttoken | fts5_insttoken()] user function. +** +** This API can be quite slow if used with an FTS5 table created with the +** "detail=none" or "detail=column" option. +** +** xColumnLocale(pFts5, iIdx, pzLocale, pnLocale) +** If parameter iCol is less than zero, or greater than or equal to the +** number of columns in the table, SQLITE_RANGE is returned. +** +** Otherwise, this function attempts to retrieve the locale associated +** with column iCol of the current row. Usually, there is no associated +** locale, and output parameters (*pzLocale) and (*pnLocale) are set +** to NULL and 0, respectively. However, if the fts5_locale() function +** was used to associate a locale with the value when it was inserted +** into the fts5 table, then (*pzLocale) is set to point to a nul-terminated +** buffer containing the name of the locale in utf-8 encoding. (*pnLocale) +** is set to the size in bytes of the buffer, not including the +** nul-terminator. +** +** If successful, SQLITE_OK is returned. Or, if an error occurs, an +** SQLite error code is returned. The final value of the output parameters +** is undefined in this case. +** +** xTokenize_v2: +** Tokenize text using the tokenizer belonging to the FTS5 table. This +** API is the same as the xTokenize() API, except that it allows a tokenizer +** locale to be specified. */ struct Fts5ExtensionApi { - int iVersion; /* Currently always set to 3 */ + int iVersion; /* Currently always set to 4 */ void *(*xUserData)(Fts5Context*); @@ -298,6 +383,22 @@ struct Fts5ExtensionApi { int (*xPhraseFirstColumn)(Fts5Context*, int iPhrase, Fts5PhraseIter*, int*); void (*xPhraseNextColumn)(Fts5Context*, Fts5PhraseIter*, int *piCol); + + /* Below this point are iVersion>=3 only */ + int (*xQueryToken)(Fts5Context*, + int iPhrase, int iToken, + const char **ppToken, int *pnToken + ); + int (*xInstToken)(Fts5Context*, int iIdx, int iToken, const char**, int*); + + /* Below this point are iVersion>=4 only */ + int (*xColumnLocale)(Fts5Context*, int iCol, const char **pz, int *pn); + int (*xTokenize_v2)(Fts5Context*, + const char *pText, int nText, /* Text to tokenize */ + const char *pLocale, int nLocale, /* Locale to pass to tokenizer */ + void *pCtx, /* Context passed to xToken() */ + int (*xToken)(void*, int, const char*, int, int, int) /* Callback */ + ); }; /* @@ -318,7 +419,7 @@ struct Fts5ExtensionApi { ** A tokenizer instance is required to actually tokenize text. ** ** The first argument passed to this function is a copy of the (void*) -** pointer provided by the application when the fts5_tokenizer object +** pointer provided by the application when the fts5_tokenizer_v2 object ** was registered with FTS5 (the third argument to xCreateTokenizer()). ** The second and third arguments are an array of nul-terminated strings ** containing the tokenizer arguments, if any, specified following the @@ -342,7 +443,7 @@ struct Fts5ExtensionApi { ** argument passed to this function is a pointer to an Fts5Tokenizer object ** returned by an earlier call to xCreate(). ** -** The second argument indicates the reason that FTS5 is requesting +** The third argument indicates the reason that FTS5 is requesting ** tokenization of the supplied text. This is always one of the following ** four values: ** @@ -366,6 +467,13 @@ struct Fts5ExtensionApi { ** on a columnsize=0 database. ** ** +** The sixth and seventh arguments passed to xTokenize() - pLocale and +** nLocale - are a pointer to a buffer containing the locale to use for +** tokenization (e.g. "en_US") and its size in bytes, respectively. The +** pLocale buffer is not nul-terminated. pLocale may be passed NULL (in +** which case nLocale is always 0) to indicate that the tokenizer should +** use its default locale. +** ** For each token in the input string, the supplied callback xToken() must ** be invoked. The first argument to it should be a copy of the pointer ** passed as the second argument to xTokenize(). The third and fourth @@ -389,6 +497,30 @@ struct Fts5ExtensionApi { ** may abandon the tokenization and return any error code other than ** SQLITE_OK or SQLITE_DONE. ** +** If the tokenizer is registered using an fts5_tokenizer_v2 object, +** then the xTokenize() method has two additional arguments - pLocale +** and nLocale. These specify the locale that the tokenizer should use +** for the current request. If pLocale and nLocale are both 0, then the +** tokenizer should use its default locale. Otherwise, pLocale points to +** an nLocale byte buffer containing the name of the locale to use as utf-8 +** text. pLocale is not nul-terminated. +** +** FTS5_TOKENIZER +** +** There is also an fts5_tokenizer object. This is an older, deprecated, +** version of fts5_tokenizer_v2. It is similar except that: +** +**

      +**
    • There is no "iVersion" field, and +**
    • The xTokenize() method does not take a locale argument. +**
    +** +** Legacy fts5_tokenizer tokenizers must be registered using the +** legacy xCreateTokenizer() function, instead of xCreateTokenizer_v2(). +** +** Tokenizer implementations registered using either API may be retrieved +** using both xFindTokenizer() and xFindTokenizer_v2(). +** ** SYNONYM SUPPORT ** ** Custom tokenizers may also support synonyms. Consider a case in which a @@ -492,11 +624,38 @@ struct Fts5ExtensionApi { ** as separate queries of the FTS index are required for each synonym. ** ** When using methods (2) or (3), it is important that the tokenizer only -** provide synonyms when tokenizing document text (method (2)) or query -** text (method (3)), not both. Doing so will not cause any errors, but is +** provide synonyms when tokenizing document text (method (3)) or query +** text (method (2)), not both. Doing so will not cause any errors, but is ** inefficient. */ typedef struct Fts5Tokenizer Fts5Tokenizer; +typedef struct fts5_tokenizer_v2 fts5_tokenizer_v2; +struct fts5_tokenizer_v2 { + int iVersion; /* Currently always 2 */ + + int (*xCreate)(void*, const char **azArg, int nArg, Fts5Tokenizer **ppOut); + void (*xDelete)(Fts5Tokenizer*); + int (*xTokenize)(Fts5Tokenizer*, + void *pCtx, + int flags, /* Mask of FTS5_TOKENIZE_* flags */ + const char *pText, int nText, + const char *pLocale, int nLocale, + int (*xToken)( + void *pCtx, /* Copy of 2nd argument to xTokenize() */ + int tflags, /* Mask of FTS5_TOKEN_* flags */ + const char *pToken, /* Pointer to buffer containing token */ + int nToken, /* Size of token in bytes */ + int iStart, /* Byte offset of token within input text */ + int iEnd /* Byte offset of end of token within input text */ + ) + ); +}; + +/* +** New code should use the fts5_tokenizer_v2 type to define tokenizer +** implementations. The following type is included for legacy applications +** that still use it. +*/ typedef struct fts5_tokenizer fts5_tokenizer; struct fts5_tokenizer { int (*xCreate)(void*, const char **azArg, int nArg, Fts5Tokenizer **ppOut); @@ -516,6 +675,7 @@ struct fts5_tokenizer { ); }; + /* Flags that may be passed as the third argument to xTokenize() */ #define FTS5_TOKENIZE_QUERY 0x0001 #define FTS5_TOKENIZE_PREFIX 0x0002 @@ -535,13 +695,13 @@ struct fts5_tokenizer { */ typedef struct fts5_api fts5_api; struct fts5_api { - int iVersion; /* Currently always set to 2 */ + int iVersion; /* Currently always set to 3 */ /* Create a new tokenizer */ int (*xCreateTokenizer)( fts5_api *pApi, const char *zName, - void *pContext, + void *pUserData, fts5_tokenizer *pTokenizer, void (*xDestroy)(void*) ); @@ -550,7 +710,7 @@ struct fts5_api { int (*xFindTokenizer)( fts5_api *pApi, const char *zName, - void **ppContext, + void **ppUserData, fts5_tokenizer *pTokenizer ); @@ -558,10 +718,29 @@ struct fts5_api { int (*xCreateFunction)( fts5_api *pApi, const char *zName, - void *pContext, + void *pUserData, fts5_extension_function xFunction, void (*xDestroy)(void*) ); + + /* APIs below this point are only available if iVersion>=3 */ + + /* Create a new tokenizer */ + int (*xCreateTokenizer_v2)( + fts5_api *pApi, + const char *zName, + void *pUserData, + fts5_tokenizer_v2 *pTokenizer, + void (*xDestroy)(void*) + ); + + /* Find an existing tokenizer */ + int (*xFindTokenizer_v2)( + fts5_api *pApi, + const char *zName, + void **ppUserData, + fts5_tokenizer_v2 **ppTokenizer + ); }; /* diff --git a/ext/fts5/fts5Int.h b/ext/fts5/fts5Int.h index 754f28c67f..a13a65d3c2 100644 --- a/ext/fts5/fts5Int.h +++ b/ext/fts5/fts5Int.h @@ -20,6 +20,7 @@ SQLITE_EXTENSION_INIT1 #include #include +#include #ifndef SQLITE_AMALGAMATION @@ -59,6 +60,27 @@ typedef sqlite3_uint64 u64; # define LARGEST_INT64 (0xffffffff|(((i64)0x7fffffff)<<32)) # define SMALLEST_INT64 (((i64)-1) - LARGEST_INT64) +/* +** This macro is used in a single assert() within fts5 to check that an +** allocation is aligned to an 8-byte boundary. But it is a complicated +** macro to get right for multiple platforms without generating warnings. +** So instead of reproducing the entire definition from sqliteInt.h, we +** just do without this assert() for the rare non-amalgamation builds. +*/ +#define EIGHT_BYTE_ALIGNMENT(x) 1 + +/* +** Macros needed to provide flexible arrays in a portable way +*/ +#ifndef offsetof +# define offsetof(ST,M) ((size_t)((char*)&((ST*)0)->M - (char*)0)) +#endif +#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) +# define FLEXARRAY +#else +# define FLEXARRAY 1 +#endif + #endif /* Truncate very long tokens to this many bytes. Hard limit is @@ -131,10 +153,11 @@ typedef struct Fts5Colset Fts5Colset; */ struct Fts5Colset { int nCol; - int aiCol[1]; + int aiCol[FLEXARRAY]; }; - +/* Size (int bytes) of a complete Fts5Colset object with N columns. */ +#define SZ_FTS5COLSET(N) (sizeof(i64)*((N+2)/2)) /************************************************************************** ** Interface to code in fts5_config.c. fts5_config.c contains contains code @@ -142,6 +165,18 @@ struct Fts5Colset { */ typedef struct Fts5Config Fts5Config; +typedef struct Fts5TokenizerConfig Fts5TokenizerConfig; + +struct Fts5TokenizerConfig { + Fts5Tokenizer *pTok; + fts5_tokenizer_v2 *pApi2; + fts5_tokenizer *pApi1; + const char **azArg; + int nArg; + int ePattern; /* FTS_PATTERN_XXX constant */ + const char *pLocale; /* Current locale to use */ + int nLocale; /* Size of pLocale in bytes */ +}; /* ** An instance of the following structure encodes all information that can @@ -154,6 +189,10 @@ typedef struct Fts5Config Fts5Config; ** attempt to merge together. A value of 1 sets the object to use the ** compile time default. Zero disables auto-merge altogether. ** +** bContentlessDelete: +** True if the contentless_delete option was present in the CREATE +** VIRTUAL TABLE statement. +** ** zContent: ** ** zContentRowid: @@ -177,9 +216,12 @@ typedef struct Fts5Config Fts5Config; ** ** INSERT INTO tbl(tbl, rank) VALUES('prefix-index', $bPrefixIndex); ** +** bLocale: +** Set to true if locale=1 was specified when the table was created. */ struct Fts5Config { sqlite3 *db; /* Database handle */ + Fts5Global *pGlobal; /* Global fts5 object for handle db */ char *zDb; /* Database holding FTS index (e.g. "main") */ char *zName; /* Name of FTS index */ int nCol; /* Number of columns */ @@ -188,17 +230,21 @@ struct Fts5Config { int nPrefix; /* Number of prefix indexes */ int *aPrefix; /* Sizes in bytes of nPrefix prefix indexes */ int eContent; /* An FTS5_CONTENT value */ + int bContentlessDelete; /* "contentless_delete=" option (dflt==0) */ + int bContentlessUnindexed; /* "contentless_unindexed=" option (dflt=0) */ char *zContent; /* content table */ char *zContentRowid; /* "content_rowid=" option value */ int bColumnsize; /* "columnsize=" option value (dflt==1) */ + int bTokendata; /* "tokendata=" option value (dflt==0) */ + int bLocale; /* "locale=" option value (dflt==0) */ int eDetail; /* FTS5_DETAIL_XXX value */ char *zContentExprlist; - Fts5Tokenizer *pTok; - fts5_tokenizer *pTokApi; + Fts5TokenizerConfig t; int bLock; /* True when table is preparing statement */ - int ePattern; /* FTS_PATTERN_XXX constant */ + /* Values loaded from the %_config table */ + int iVersion; /* fts5 file format 'version' */ int iCookie; /* Incremented when %_config is modified */ int pgsz; /* Approximate page size used in %_data */ int nAutomerge; /* 'automerge' setting */ @@ -207,6 +253,9 @@ struct Fts5Config { int nHashSize; /* Bytes of memory for in-memory hash */ char *zRank; /* Name of rank function */ char *zRankArgs; /* Arguments to rank function */ + int bSecureDelete; /* 'secure-delete' */ + int nDeleteMerge; /* 'deletemerge' */ + int bPrefixInsttoken; /* 'prefix-insttoken' */ /* If non-NULL, points to sqlite3_vtab.base.zErrmsg. Often NULL. */ char **pzErrmsg; @@ -216,12 +265,16 @@ struct Fts5Config { #endif }; -/* Current expected value of %_config table 'version' field */ -#define FTS5_CURRENT_VERSION 4 +/* Current expected value of %_config table 'version' field. And +** the expected version if the 'secure-delete' option has ever been +** set on the table. */ +#define FTS5_CURRENT_VERSION 4 +#define FTS5_CURRENT_VERSION_SECUREDELETE 5 -#define FTS5_CONTENT_NORMAL 0 -#define FTS5_CONTENT_NONE 1 -#define FTS5_CONTENT_EXTERNAL 2 +#define FTS5_CONTENT_NORMAL 0 +#define FTS5_CONTENT_NONE 1 +#define FTS5_CONTENT_EXTERNAL 2 +#define FTS5_CONTENT_UNINDEXED 3 #define FTS5_DETAIL_FULL 0 #define FTS5_DETAIL_NONE 1 @@ -256,6 +309,8 @@ int sqlite3Fts5ConfigSetValue(Fts5Config*, const char*, sqlite3_value*, int*); int sqlite3Fts5ConfigParseRank(const char*, char**, char**); +void sqlite3Fts5ConfigErrmsg(Fts5Config *pConfig, const char *zFmt, ...); + /* ** End of interface to code in fts5_config.c. **************************************************************************/ @@ -286,7 +341,7 @@ void sqlite3Fts5BufferAppendPrintf(int *, Fts5Buffer*, char *zFmt, ...); char *sqlite3Fts5Mprintf(int *pRc, const char *zFmt, ...); #define fts5BufferZero(x) sqlite3Fts5BufferZero(x) -#define fts5BufferAppendVarint(a,b,c) sqlite3Fts5BufferAppendVarint(a,b,c) +#define fts5BufferAppendVarint(a,b,c) sqlite3Fts5BufferAppendVarint(a,b,(i64)c) #define fts5BufferFree(a) sqlite3Fts5BufferFree(a) #define fts5BufferAppendBlob(a,b,c,d) sqlite3Fts5BufferAppendBlob(a,b,c,d) #define fts5BufferSet(a,b,c,d) sqlite3Fts5BufferSet(a,b,c,d) @@ -300,7 +355,7 @@ char *sqlite3Fts5Mprintf(int *pRc, const char *zFmt, ...); void sqlite3Fts5Put32(u8*, int); int sqlite3Fts5Get32(const u8*); -#define FTS5_POS2COLUMN(iPos) (int)(iPos >> 32) +#define FTS5_POS2COLUMN(iPos) (int)((iPos >> 32) & 0x7FFFFFFF) #define FTS5_POS2OFFSET(iPos) (int)(iPos & 0x7FFFFFFF) typedef struct Fts5PoslistReader Fts5PoslistReader; @@ -373,16 +428,19 @@ struct Fts5IndexIter { /* ** Values used as part of the flags argument passed to IndexQuery(). */ -#define FTS5INDEX_QUERY_PREFIX 0x0001 /* Prefix query */ -#define FTS5INDEX_QUERY_DESC 0x0002 /* Docs in descending rowid order */ -#define FTS5INDEX_QUERY_TEST_NOIDX 0x0004 /* Do not use prefix index */ -#define FTS5INDEX_QUERY_SCAN 0x0008 /* Scan query (fts5vocab) */ +#define FTS5INDEX_QUERY_PREFIX 0x0001 /* Prefix query */ +#define FTS5INDEX_QUERY_DESC 0x0002 /* Docs in descending rowid order */ +#define FTS5INDEX_QUERY_TEST_NOIDX 0x0004 /* Do not use prefix index */ +#define FTS5INDEX_QUERY_SCAN 0x0008 /* Scan query (fts5vocab) */ /* The following are used internally by the fts5_index.c module. They are ** defined here only to make it easier to avoid clashes with the flags ** above. */ -#define FTS5INDEX_QUERY_SKIPEMPTY 0x0010 -#define FTS5INDEX_QUERY_NOOUTPUT 0x0020 +#define FTS5INDEX_QUERY_SKIPEMPTY 0x0010 +#define FTS5INDEX_QUERY_NOOUTPUT 0x0020 +#define FTS5INDEX_QUERY_SKIPHASH 0x0040 +#define FTS5INDEX_QUERY_NOTOKENDATA 0x0080 +#define FTS5INDEX_QUERY_SCANONETERM 0x0100 /* ** Create/destroy an Fts5Index object. @@ -451,6 +509,17 @@ void *sqlite3Fts5StructureRef(Fts5Index*); void sqlite3Fts5StructureRelease(void*); int sqlite3Fts5StructureTest(Fts5Index*, void*); +/* +** Used by xInstToken(): +*/ +int sqlite3Fts5IterToken( + Fts5IndexIter *pIndexIter, + const char *pToken, int nToken, + i64 iRowid, + int iCol, + int iOff, + const char **ppOut, int *pnOut +); /* ** Insert or remove data to or from the index. Each time a document is @@ -525,6 +594,16 @@ int sqlite3Fts5IndexReset(Fts5Index *p); int sqlite3Fts5IndexLoadConfig(Fts5Index *p); +int sqlite3Fts5IndexGetOrigin(Fts5Index *p, i64 *piOrigin); +int sqlite3Fts5IndexContentlessDelete(Fts5Index *p, i64 iOrigin, i64 iRowid); + +void sqlite3Fts5IndexIterClearTokendata(Fts5IndexIter*); + +/* Used to populate hash tables for xInstToken in detail=none/column mode. */ +int sqlite3Fts5IndexIterWriteTokendata( + Fts5IndexIter*, const char*, int, i64 iRowid, int iCol, int iOff +); + /* ** End of interface to code in fts5_index.c. **************************************************************************/ @@ -537,7 +616,7 @@ int sqlite3Fts5GetVarintLen(u32 iVal); u8 sqlite3Fts5GetVarint(const unsigned char*, u64*); int sqlite3Fts5PutVarint(unsigned char *p, u64 v); -#define fts5GetVarint32(a,b) sqlite3Fts5GetVarint32(a,(u32*)&b) +#define fts5GetVarint32(a,b) sqlite3Fts5GetVarint32(a,(u32*)&(b)) #define fts5GetVarint sqlite3Fts5GetVarint #define fts5FastGetVarint32(a, iOff, nVal) { \ @@ -568,18 +647,20 @@ struct Fts5Table { Fts5Index *pIndex; /* Full-text index */ }; -int sqlite3Fts5GetTokenizer( - Fts5Global*, - const char **azArg, - int nArg, - Fts5Config*, - char **pzErr -); +int sqlite3Fts5LoadTokenizer(Fts5Config *pConfig); Fts5Table *sqlite3Fts5TableFromCsrid(Fts5Global*, i64); int sqlite3Fts5FlushToDisk(Fts5Table*); +void sqlite3Fts5ClearLocale(Fts5Config *pConfig); +void sqlite3Fts5SetLocale(Fts5Config *pConfig, const char *pLoc, int nLoc); + +int sqlite3Fts5IsLocaleValue(Fts5Config *pConfig, sqlite3_value *pVal); +int sqlite3Fts5DecodeLocaleValue(sqlite3_value *pVal, + const char **ppText, int *pnText, const char **ppLoc, int *pnLoc +); + /* ** End of interface to code in fts5.c. **************************************************************************/ @@ -609,6 +690,11 @@ int sqlite3Fts5HashWrite( */ void sqlite3Fts5HashClear(Fts5Hash*); +/* +** Return true if the hash is empty, false otherwise. +*/ +int sqlite3Fts5HashIsEmpty(Fts5Hash*); + int sqlite3Fts5HashQuery( Fts5Hash*, /* Hash table to query */ int nPre, @@ -625,11 +711,13 @@ void sqlite3Fts5HashScanNext(Fts5Hash*); int sqlite3Fts5HashScanEof(Fts5Hash*); void sqlite3Fts5HashScanEntry(Fts5Hash *, const char **pzTerm, /* OUT: term (nul-terminated) */ + int *pnTerm, /* OUT: Size of term in bytes */ const u8 **ppDoclist, /* OUT: pointer to doclist */ int *pnDoclist /* OUT: size of doclist in bytes */ ); + /* ** End of interface to code in fts5_hash.c. **************************************************************************/ @@ -652,8 +740,8 @@ int sqlite3Fts5StorageRename(Fts5Storage*, const char *zName); int sqlite3Fts5DropAll(Fts5Config*); int sqlite3Fts5CreateTable(Fts5Config*, const char*, const char*, int, char **); -int sqlite3Fts5StorageDelete(Fts5Storage *p, i64, sqlite3_value**); -int sqlite3Fts5StorageContentInsert(Fts5Storage *p, sqlite3_value**, i64*); +int sqlite3Fts5StorageDelete(Fts5Storage *p, i64, sqlite3_value**, int); +int sqlite3Fts5StorageContentInsert(Fts5Storage *p, int, sqlite3_value**, i64*); int sqlite3Fts5StorageIndexInsert(Fts5Storage *p, sqlite3_value**, i64); int sqlite3Fts5StorageIntegrity(Fts5Storage *p, int iArg); @@ -678,6 +766,9 @@ int sqlite3Fts5StorageOptimize(Fts5Storage *p); int sqlite3Fts5StorageMerge(Fts5Storage *p, int nMerge); int sqlite3Fts5StorageReset(Fts5Storage *p); +void sqlite3Fts5StorageReleaseDeleteRow(Fts5Storage*); +int sqlite3Fts5StorageFindDeleteRow(Fts5Storage *p, i64 iDel); + /* ** End of interface to code in fts5_storage.c. **************************************************************************/ @@ -724,7 +815,7 @@ int sqlite3Fts5ExprPattern( ** i64 iRowid = sqlite3Fts5ExprRowid(pExpr); ** } */ -int sqlite3Fts5ExprFirst(Fts5Expr*, Fts5Index *pIdx, i64 iMin, int bDesc); +int sqlite3Fts5ExprFirst(Fts5Expr*, Fts5Index *pIdx, i64 iMin, i64, int bDesc); int sqlite3Fts5ExprNext(Fts5Expr*, i64 iMax); int sqlite3Fts5ExprEof(Fts5Expr*); i64 sqlite3Fts5ExprRowid(Fts5Expr*); @@ -750,6 +841,10 @@ int sqlite3Fts5ExprClonePhrase(Fts5Expr*, int, Fts5Expr**); int sqlite3Fts5ExprPhraseCollist(Fts5Expr *, int, const u8 **, int *); +int sqlite3Fts5ExprQueryToken(Fts5Expr*, int, int, const char**, int*); +int sqlite3Fts5ExprInstToken(Fts5Expr*, i64, int, int, int, int, const char**, int*); +void sqlite3Fts5ExprClearTokens(Fts5Expr*); + /******************************************* ** The fts5_expr.c API above this point is used by the other hand-written ** C code in this module. The interfaces below this point are called by @@ -826,6 +921,7 @@ int sqlite3Fts5TokenizerPattern( int (*xCreate)(void*, const char**, int, Fts5Tokenizer**), Fts5Tokenizer *pTok ); +int sqlite3Fts5TokenizerPreload(Fts5TokenizerConfig*); /* ** End of interface to code in fts5_tokenizer.c. **************************************************************************/ diff --git a/ext/fts5/fts5_aux.c b/ext/fts5/fts5_aux.c index 77f6d5baba..95b33ea318 100644 --- a/ext/fts5/fts5_aux.c +++ b/ext/fts5/fts5_aux.c @@ -110,15 +110,19 @@ static int fts5CInstIterInit( */ typedef struct HighlightContext HighlightContext; struct HighlightContext { - CInstIter iter; /* Coalesced Instance Iterator */ - int iPos; /* Current token offset in zIn[] */ + /* Constant parameters to fts5HighlightCb() */ int iRangeStart; /* First token to include */ int iRangeEnd; /* If non-zero, last token to include */ const char *zOpen; /* Opening highlight */ const char *zClose; /* Closing highlight */ const char *zIn; /* Input text */ int nIn; /* Size of input text in bytes */ - int iOff; /* Current offset within zIn[] */ + + /* Variables modified by fts5HighlightCb() */ + CInstIter iter; /* Coalesced Instance Iterator */ + int iPos; /* Current token offset in zIn[] */ + int iOff; /* Have copied up to this offset in zIn[] */ + int bOpen; /* True if highlight is open */ char *zOut; /* Output value */ }; @@ -151,8 +155,8 @@ static int fts5HighlightCb( int tflags, /* Mask of FTS5_TOKEN_* flags */ const char *pToken, /* Buffer containing token */ int nToken, /* Size of token in bytes */ - int iStartOff, /* Start offset of token */ - int iEndOff /* End offset of token */ + int iStartOff, /* Start byte offset of token */ + int iEndOff /* End byte offset of token */ ){ HighlightContext *p = (HighlightContext*)pContext; int rc = SQLITE_OK; @@ -163,40 +167,66 @@ static int fts5HighlightCb( if( tflags & FTS5_TOKEN_COLOCATED ) return SQLITE_OK; iPos = p->iPos++; - if( p->iRangeEnd>0 ){ + if( p->iRangeEnd>=0 ){ if( iPosiRangeStart || iPos>p->iRangeEnd ) return SQLITE_OK; if( p->iRangeStart && iPos==p->iRangeStart ) p->iOff = iStartOff; } - if( iPos==p->iter.iStart ){ + /* If the parenthesis is open, and this token is not part of the current + ** phrase, and the starting byte offset of this token is past the point + ** that has currently been copied into the output buffer, close the + ** parenthesis. */ + if( p->bOpen + && (iPos<=p->iter.iStart || p->iter.iStart<0) + && iStartOff>p->iOff + ){ + fts5HighlightAppend(&rc, p, p->zClose, -1); + p->bOpen = 0; + } + + /* If this is the start of a new phrase, and the highlight is not open: + ** + ** * copy text from the input up to the start of the phrase, and + ** * open the highlight. + */ + if( iPos==p->iter.iStart && p->bOpen==0 ){ fts5HighlightAppend(&rc, p, &p->zIn[p->iOff], iStartOff - p->iOff); fts5HighlightAppend(&rc, p, p->zOpen, -1); p->iOff = iStartOff; + p->bOpen = 1; } if( iPos==p->iter.iEnd ){ - if( p->iRangeEnd && p->iter.iStartiRangeStart ){ + if( p->bOpen==0 ){ + assert( p->iRangeEnd>=0 ); fts5HighlightAppend(&rc, p, p->zOpen, -1); + p->bOpen = 1; } fts5HighlightAppend(&rc, p, &p->zIn[p->iOff], iEndOff - p->iOff); - fts5HighlightAppend(&rc, p, p->zClose, -1); p->iOff = iEndOff; + if( rc==SQLITE_OK ){ rc = fts5CInstIterNext(&p->iter); } } - if( p->iRangeEnd>0 && iPos==p->iRangeEnd ){ - fts5HighlightAppend(&rc, p, &p->zIn[p->iOff], iEndOff - p->iOff); - p->iOff = iEndOff; - if( iPos>=p->iter.iStart && iPositer.iEnd ){ + if( iPos==p->iRangeEnd ){ + if( p->bOpen ){ + if( p->iter.iStart>=0 && iPos>=p->iter.iStart ){ + fts5HighlightAppend(&rc, p, &p->zIn[p->iOff], iEndOff - p->iOff); + p->iOff = iEndOff; + } fts5HighlightAppend(&rc, p, p->zClose, -1); + p->bOpen = 0; } + fts5HighlightAppend(&rc, p, &p->zIn[p->iOff], iEndOff - p->iOff); + p->iOff = iEndOff; } return rc; } + /* ** Implementation of highlight() function. */ @@ -221,15 +251,28 @@ static void fts5HighlightFunction( memset(&ctx, 0, sizeof(HighlightContext)); ctx.zOpen = (const char*)sqlite3_value_text(apVal[1]); ctx.zClose = (const char*)sqlite3_value_text(apVal[2]); + ctx.iRangeEnd = -1; rc = pApi->xColumnText(pFts, iCol, &ctx.zIn, &ctx.nIn); - - if( ctx.zIn ){ + if( rc==SQLITE_RANGE ){ + sqlite3_result_text(pCtx, "", -1, SQLITE_STATIC); + rc = SQLITE_OK; + }else if( ctx.zIn ){ + const char *pLoc = 0; /* Locale of column iCol */ + int nLoc = 0; /* Size of pLoc in bytes */ if( rc==SQLITE_OK ){ rc = fts5CInstIterInit(pApi, pFts, iCol, &ctx.iter); } if( rc==SQLITE_OK ){ - rc = pApi->xTokenize(pFts, ctx.zIn, ctx.nIn, (void*)&ctx,fts5HighlightCb); + rc = pApi->xColumnLocale(pFts, iCol, &pLoc, &nLoc); + } + if( rc==SQLITE_OK ){ + rc = pApi->xTokenize_v2( + pFts, ctx.zIn, ctx.nIn, pLoc, nLoc, (void*)&ctx, fts5HighlightCb + ); + } + if( ctx.bOpen ){ + fts5HighlightAppend(&rc, &ctx, ctx.zClose, -1); } fts5HighlightAppend(&rc, &ctx, &ctx.zIn[ctx.iOff], ctx.nIn - ctx.iOff); @@ -406,6 +449,7 @@ static void fts5SnippetFunction( iCol = sqlite3_value_int(apVal[0]); ctx.zOpen = fts5ValueToText(apVal[1]); ctx.zClose = fts5ValueToText(apVal[2]); + ctx.iRangeEnd = -1; zEllips = fts5ValueToText(apVal[3]); nToken = sqlite3_value_int(apVal[4]); @@ -422,6 +466,8 @@ static void fts5SnippetFunction( memset(&sFinder, 0, sizeof(Fts5SFinder)); for(i=0; ixColumnText(pFts, i, &sFinder.zDoc, &nDoc); if( rc!=SQLITE_OK ) break; - rc = pApi->xTokenize(pFts, - sFinder.zDoc, nDoc, (void*)&sFinder,fts5SentenceFinderCb + rc = pApi->xColumnLocale(pFts, i, &pLoc, &nLoc); + if( rc!=SQLITE_OK ) break; + rc = pApi->xTokenize_v2(pFts, + sFinder.zDoc, nDoc, pLoc, nLoc, (void*)&sFinder, fts5SentenceFinderCb ); if( rc!=SQLITE_OK ) break; rc = pApi->xColumnSize(pFts, i, &nDocsize); @@ -488,6 +536,9 @@ static void fts5SnippetFunction( rc = pApi->xColumnSize(pFts, iBestCol, &nColSize); } if( ctx.zIn ){ + const char *pLoc = 0; /* Locale of column iBestCol */ + int nLoc = 0; /* Bytes in pLoc */ + if( rc==SQLITE_OK ){ rc = fts5CInstIterInit(pApi, pFts, iBestCol, &ctx.iter); } @@ -506,7 +557,15 @@ static void fts5SnippetFunction( } if( rc==SQLITE_OK ){ - rc = pApi->xTokenize(pFts, ctx.zIn, ctx.nIn, (void*)&ctx,fts5HighlightCb); + rc = pApi->xColumnLocale(pFts, iBestCol, &pLoc, &nLoc); + } + if( rc==SQLITE_OK ){ + rc = pApi->xTokenize_v2( + pFts, ctx.zIn, ctx.nIn, pLoc, nLoc, (void*)&ctx,fts5HighlightCb + ); + } + if( ctx.bOpen ){ + fts5HighlightAppend(&rc, &ctx, ctx.zClose, -1); } if( ctx.iRangeEnd>=(nColSize-1) ){ fts5HighlightAppend(&rc, &ctx, &ctx.zIn[ctx.iOff], ctx.nIn - ctx.iOff); @@ -608,7 +667,7 @@ static int fts5Bm25GetData( ** under consideration. ** ** The problem with this is that if (N < 2*nHit), the IDF is - ** negative. Which is undesirable. So the mimimum allowable IDF is + ** negative. Which is undesirable. So the minimum allowable IDF is ** (1e-6) - roughly the same as a term that appears in just over ** half of set of 5,000,000 documents. */ double idf = log( (nRow - nHit + 0.5) / (nHit + 0.5) ); @@ -687,6 +746,53 @@ static void fts5Bm25Function( } } +/* +** Implementation of fts5_get_locale() function. +*/ +static void fts5GetLocaleFunction( + const Fts5ExtensionApi *pApi, /* API offered by current FTS version */ + Fts5Context *pFts, /* First arg to pass to pApi functions */ + sqlite3_context *pCtx, /* Context for returning result/error */ + int nVal, /* Number of values in apVal[] array */ + sqlite3_value **apVal /* Array of trailing arguments */ +){ + int iCol = 0; + int eType = 0; + int rc = SQLITE_OK; + const char *zLocale = 0; + int nLocale = 0; + + /* xColumnLocale() must be available */ + assert( pApi->iVersion>=4 ); + + if( nVal!=1 ){ + const char *z = "wrong number of arguments to function fts5_get_locale()"; + sqlite3_result_error(pCtx, z, -1); + return; + } + + eType = sqlite3_value_numeric_type(apVal[0]); + if( eType!=SQLITE_INTEGER ){ + const char *z = "non-integer argument passed to function fts5_get_locale()"; + sqlite3_result_error(pCtx, z, -1); + return; + } + + iCol = sqlite3_value_int(apVal[0]); + if( iCol<0 || iCol>=pApi->xColumnCount(pFts) ){ + sqlite3_result_error_code(pCtx, SQLITE_RANGE); + return; + } + + rc = pApi->xColumnLocale(pFts, iCol, &zLocale, &nLocale); + if( rc!=SQLITE_OK ){ + sqlite3_result_error_code(pCtx, rc); + return; + } + + sqlite3_result_text(pCtx, zLocale, nLocale, SQLITE_TRANSIENT); +} + int sqlite3Fts5AuxInit(fts5_api *pApi){ struct Builtin { const char *zFunc; /* Function name (nul-terminated) */ @@ -694,9 +800,10 @@ int sqlite3Fts5AuxInit(fts5_api *pApi){ fts5_extension_function xFunc;/* Callback function */ void (*xDestroy)(void*); /* Destructor function */ } aBuiltin [] = { - { "snippet", 0, fts5SnippetFunction, 0 }, - { "highlight", 0, fts5HighlightFunction, 0 }, - { "bm25", 0, fts5Bm25Function, 0 }, + { "snippet", 0, fts5SnippetFunction, 0 }, + { "highlight", 0, fts5HighlightFunction, 0 }, + { "bm25", 0, fts5Bm25Function, 0 }, + { "fts5_get_locale", 0, fts5GetLocaleFunction, 0 }, }; int rc = SQLITE_OK; /* Return code */ int i; /* To iterate through builtin functions */ diff --git a/ext/fts5/fts5_buffer.c b/ext/fts5/fts5_buffer.c index b9614e1290..afcd83b6ba 100644 --- a/ext/fts5/fts5_buffer.c +++ b/ext/fts5/fts5_buffer.c @@ -68,6 +68,7 @@ void sqlite3Fts5BufferAppendBlob( ){ if( nData ){ if( fts5BufferGrow(pRc, pBuf, nData) ) return; + assert( pBuf->p!=0 ); memcpy(&pBuf->p[pBuf->n], pData, nData); pBuf->n += nData; } @@ -169,6 +170,7 @@ int sqlite3Fts5PoslistNext64( i64 *piOff /* IN/OUT: Current offset */ ){ int i = *pi; + assert( a!=0 || i==0 ); if( i>=n ){ /* EOF */ *piOff = -1; @@ -176,6 +178,7 @@ int sqlite3Fts5PoslistNext64( }else{ i64 iOff = *piOff; u32 iVal; + assert( a!=0 ); fts5FastGetVarint32(a, i, iVal); if( iVal<=1 ){ if( iVal==0 ){ @@ -305,7 +308,7 @@ char *sqlite3Fts5Strndup(int *pRc, const char *pIn, int nIn){ ** * The 52 upper and lower case ASCII characters, and ** * The 10 integer ASCII characters. ** * The underscore character "_" (0x5F). -** * The unicode "subsitute" character (0x1A). +** * The unicode "substitute" character (0x1A). */ int sqlite3Fts5IsBareword(char t){ u8 aBareword[128] = { diff --git a/ext/fts5/fts5_config.c b/ext/fts5/fts5_config.c index ab1a846b12..eea82b046d 100644 --- a/ext/fts5/fts5_config.c +++ b/ext/fts5/fts5_config.c @@ -22,6 +22,8 @@ #define FTS5_DEFAULT_CRISISMERGE 16 #define FTS5_DEFAULT_HASHSIZE (1024*1024) +#define FTS5_DEFAULT_DELETE_AUTOMERGE 10 /* default 10% */ + /* Maximum allowed page size */ #define FTS5_MAX_PAGE_SIZE (64*1024) @@ -232,7 +234,6 @@ static int fts5ConfigSetEnum( ** eventually free any such error message using sqlite3_free(). */ static int fts5ConfigParseSpecial( - Fts5Global *pGlobal, Fts5Config *pConfig, /* Configuration object to update */ const char *zCmd, /* Special command to parse */ const char *zArg, /* Argument to parse */ @@ -240,6 +241,7 @@ static int fts5ConfigParseSpecial( ){ int rc = SQLITE_OK; int nCmd = (int)strlen(zCmd); + if( sqlite3_strnicmp("prefix", zCmd, nCmd)==0 ){ const int nByte = sizeof(int) * FTS5_MAX_PREFIX_INDEXES; const char *p; @@ -296,12 +298,11 @@ static int fts5ConfigParseSpecial( if( sqlite3_strnicmp("tokenize", zCmd, nCmd)==0 ){ const char *p = (const char*)zArg; sqlite3_int64 nArg = strlen(zArg) + 1; - char **azArg = sqlite3Fts5MallocZero(&rc, sizeof(char*) * nArg); - char *pDel = sqlite3Fts5MallocZero(&rc, nArg * 2); - char *pSpace = pDel; + char **azArg = sqlite3Fts5MallocZero(&rc, (sizeof(char*) + 2) * nArg); - if( azArg && pSpace ){ - if( pConfig->pTok ){ + if( azArg ){ + char *pSpace = (char*)&azArg[nArg]; + if( pConfig->t.azArg ){ *pzErr = sqlite3_mprintf("multiple tokenize=... directives"); rc = SQLITE_ERROR; }else{ @@ -324,16 +325,14 @@ static int fts5ConfigParseSpecial( *pzErr = sqlite3_mprintf("parse error in tokenize directive"); rc = SQLITE_ERROR; }else{ - rc = sqlite3Fts5GetTokenizer(pGlobal, - (const char**)azArg, (int)nArg, pConfig, - pzErr - ); + pConfig->t.azArg = (const char**)azArg; + pConfig->t.nArg = nArg; + azArg = 0; } } } - sqlite3_free(azArg); - sqlite3_free(pDel); + return rc; } @@ -352,6 +351,26 @@ static int fts5ConfigParseSpecial( return rc; } + if( sqlite3_strnicmp("contentless_delete", zCmd, nCmd)==0 ){ + if( (zArg[0]!='0' && zArg[0]!='1') || zArg[1]!='\0' ){ + *pzErr = sqlite3_mprintf("malformed contentless_delete=... directive"); + rc = SQLITE_ERROR; + }else{ + pConfig->bContentlessDelete = (zArg[0]=='1'); + } + return rc; + } + + if( sqlite3_strnicmp("contentless_unindexed", zCmd, nCmd)==0 ){ + if( (zArg[0]!='0' && zArg[0]!='1') || zArg[1]!='\0' ){ + *pzErr = sqlite3_mprintf("malformed contentless_delete=... directive"); + rc = SQLITE_ERROR; + }else{ + pConfig->bContentlessUnindexed = (zArg[0]=='1'); + } + return rc; + } + if( sqlite3_strnicmp("content_rowid", zCmd, nCmd)==0 ){ if( pConfig->zContentRowid ){ *pzErr = sqlite3_mprintf("multiple content_rowid=... directives"); @@ -372,6 +391,16 @@ static int fts5ConfigParseSpecial( return rc; } + if( sqlite3_strnicmp("locale", zCmd, nCmd)==0 ){ + if( (zArg[0]!='0' && zArg[0]!='1') || zArg[1]!='\0' ){ + *pzErr = sqlite3_mprintf("malformed locale=... directive"); + rc = SQLITE_ERROR; + }else{ + pConfig->bLocale = (zArg[0]=='1'); + } + return rc; + } + if( sqlite3_strnicmp("detail", zCmd, nCmd)==0 ){ const Fts5Enum aDetail[] = { { "none", FTS5_DETAIL_NONE }, @@ -386,20 +415,20 @@ static int fts5ConfigParseSpecial( return rc; } + if( sqlite3_strnicmp("tokendata", zCmd, nCmd)==0 ){ + if( (zArg[0]!='0' && zArg[0]!='1') || zArg[1]!='\0' ){ + *pzErr = sqlite3_mprintf("malformed tokendata=... directive"); + rc = SQLITE_ERROR; + }else{ + pConfig->bTokendata = (zArg[0]=='1'); + } + return rc; + } + *pzErr = sqlite3_mprintf("unrecognized option: \"%.*s\"", nCmd, zCmd); return SQLITE_ERROR; } -/* -** Allocate an instance of the default tokenizer ("simple") at -** Fts5Config.pTokenizer. Return SQLITE_OK if successful, or an SQLite error -** code if an error occurs. -*/ -static int fts5ConfigDefaultTokenizer(Fts5Global *pGlobal, Fts5Config *pConfig){ - assert( pConfig->pTok==0 && pConfig->pTokApi==0 ); - return sqlite3Fts5GetTokenizer(pGlobal, 0, 0, pConfig, 0); -} - /* ** Gobble up the first bareword or quoted word from the input buffer zIn. ** Return a pointer to the character immediately following the last in @@ -459,7 +488,8 @@ static int fts5ConfigParseColumn( Fts5Config *p, char *zCol, char *zArg, - char **pzErr + char **pzErr, + int *pbUnindexed ){ int rc = SQLITE_OK; if( 0==sqlite3_stricmp(zCol, FTS5_RANK_NAME) @@ -470,6 +500,7 @@ static int fts5ConfigParseColumn( }else if( zArg ){ if( 0==sqlite3_stricmp(zArg, "unindexed") ){ p->abUnindexed[p->nCol] = 1; + *pbUnindexed = 1; }else{ *pzErr = sqlite3_mprintf("unrecognized column option: %s", zArg); rc = SQLITE_ERROR; @@ -490,11 +521,26 @@ static int fts5ConfigMakeExprlist(Fts5Config *p){ sqlite3Fts5BufferAppendPrintf(&rc, &buf, "T.%Q", p->zContentRowid); if( p->eContent!=FTS5_CONTENT_NONE ){ + assert( p->eContent==FTS5_CONTENT_EXTERNAL + || p->eContent==FTS5_CONTENT_NORMAL + || p->eContent==FTS5_CONTENT_UNINDEXED + ); for(i=0; inCol; i++){ if( p->eContent==FTS5_CONTENT_EXTERNAL ){ sqlite3Fts5BufferAppendPrintf(&rc, &buf, ", T.%Q", p->azCol[i]); - }else{ + }else if( p->eContent==FTS5_CONTENT_NORMAL || p->abUnindexed[i] ){ sqlite3Fts5BufferAppendPrintf(&rc, &buf, ", T.c%d", i); + }else{ + sqlite3Fts5BufferAppendPrintf(&rc, &buf, ", NULL"); + } + } + } + if( p->eContent==FTS5_CONTENT_NORMAL && p->bLocale ){ + for(i=0; inCol; i++){ + if( p->abUnindexed[i]==0 ){ + sqlite3Fts5BufferAppendPrintf(&rc, &buf, ", T.l%d", i); + }else{ + sqlite3Fts5BufferAppendPrintf(&rc, &buf, ", NULL"); } } } @@ -528,10 +574,12 @@ int sqlite3Fts5ConfigParse( Fts5Config *pRet; /* New object to return */ int i; sqlite3_int64 nByte; + int bUnindexed = 0; /* True if there are one or more UNINDEXED */ *ppOut = pRet = (Fts5Config*)sqlite3_malloc(sizeof(Fts5Config)); if( pRet==0 ) return SQLITE_NOMEM; memset(pRet, 0, sizeof(Fts5Config)); + pRet->pGlobal = pGlobal; pRet->db = db; pRet->iCookie = -1; @@ -550,6 +598,7 @@ int sqlite3Fts5ConfigParse( rc = SQLITE_ERROR; } + assert( (pRet->abUnindexed && pRet->azCol) || rc!=SQLITE_OK ); for(i=3; rc==SQLITE_OK && ipTok==0 ){ - rc = fts5ConfigDefaultTokenizer(pGlobal, pRet); + /* We only allow contentless_delete=1 if the table is indeed contentless. */ + if( rc==SQLITE_OK + && pRet->bContentlessDelete + && pRet->eContent!=FTS5_CONTENT_NONE + ){ + *pzErr = sqlite3_mprintf( + "contentless_delete=1 requires a contentless table" + ); + rc = SQLITE_ERROR; + } + + /* We only allow contentless_delete=1 if columnsize=0 is not present. + ** + ** This restriction may be removed at some point. + */ + if( rc==SQLITE_OK && pRet->bContentlessDelete && pRet->bColumnsize==0 ){ + *pzErr = sqlite3_mprintf( + "contentless_delete=1 is incompatible with columnsize=0" + ); + rc = SQLITE_ERROR; + } + + /* We only allow contentless_unindexed=1 if the table is actually a + ** contentless one. + */ + if( rc==SQLITE_OK + && pRet->bContentlessUnindexed + && pRet->eContent!=FTS5_CONTENT_NONE + ){ + *pzErr = sqlite3_mprintf( + "contentless_unindexed=1 requires a contentless table" + ); + rc = SQLITE_ERROR; } /* If no zContent option was specified, fill in the default values. */ if( rc==SQLITE_OK && pRet->zContent==0 ){ const char *zTail = 0; - assert( pRet->eContent==FTS5_CONTENT_NORMAL - || pRet->eContent==FTS5_CONTENT_NONE + assert( pRet->eContent==FTS5_CONTENT_NORMAL + || pRet->eContent==FTS5_CONTENT_NONE ); if( pRet->eContent==FTS5_CONTENT_NORMAL ){ zTail = "content"; + }else if( bUnindexed && pRet->bContentlessUnindexed ){ + pRet->eContent = FTS5_CONTENT_UNINDEXED; + zTail = "content"; }else if( pRet->bColumnsize ){ zTail = "docsize"; } @@ -643,9 +723,14 @@ int sqlite3Fts5ConfigParse( void sqlite3Fts5ConfigFree(Fts5Config *pConfig){ if( pConfig ){ int i; - if( pConfig->pTok ){ - pConfig->pTokApi->xDelete(pConfig->pTok); + if( pConfig->t.pTok ){ + if( pConfig->t.pApi1 ){ + pConfig->t.pApi1->xDelete(pConfig->t.pTok); + }else{ + pConfig->t.pApi2->xDelete(pConfig->t.pTok); + } } + sqlite3_free((char*)pConfig->t.azArg); sqlite3_free(pConfig->zDb); sqlite3_free(pConfig->zName); for(i=0; inCol; i++){ @@ -720,10 +805,24 @@ int sqlite3Fts5Tokenize( void *pCtx, /* Context passed to xToken() */ int (*xToken)(void*, int, const char*, int, int, int) /* Callback */ ){ - if( pText==0 ) return SQLITE_OK; - return pConfig->pTokApi->xTokenize( - pConfig->pTok, pCtx, flags, pText, nText, xToken - ); + int rc = SQLITE_OK; + if( pText ){ + if( pConfig->t.pTok==0 ){ + rc = sqlite3Fts5LoadTokenizer(pConfig); + } + if( rc==SQLITE_OK ){ + if( pConfig->t.pApi1 ){ + rc = pConfig->t.pApi1->xTokenize( + pConfig->t.pTok, pCtx, flags, pText, nText, xToken + ); + }else{ + rc = pConfig->t.pApi2->xTokenize(pConfig->t.pTok, pCtx, flags, + pText, nText, pConfig->t.pLocale, pConfig->t.nLocale, xToken + ); + } + } + } + return rc; } /* @@ -889,6 +988,18 @@ int sqlite3Fts5ConfigSetValue( } } + else if( 0==sqlite3_stricmp(zKey, "deletemerge") ){ + int nVal = -1; + if( SQLITE_INTEGER==sqlite3_value_numeric_type(pVal) ){ + nVal = sqlite3_value_int(pVal); + }else{ + *pbBadkey = 1; + } + if( nVal<0 ) nVal = FTS5_DEFAULT_DELETE_AUTOMERGE; + if( nVal>100 ) nVal = 0; + pConfig->nDeleteMerge = nVal; + } + else if( 0==sqlite3_stricmp(zKey, "rank") ){ const char *zIn = (const char*)sqlite3_value_text(pVal); char *zRank; @@ -903,6 +1014,31 @@ int sqlite3Fts5ConfigSetValue( rc = SQLITE_OK; *pbBadkey = 1; } + } + + else if( 0==sqlite3_stricmp(zKey, "secure-delete") ){ + int bVal = -1; + if( SQLITE_INTEGER==sqlite3_value_numeric_type(pVal) ){ + bVal = sqlite3_value_int(pVal); + } + if( bVal<0 ){ + *pbBadkey = 1; + }else{ + pConfig->bSecureDelete = (bVal ? 1 : 0); + } + } + + else if( 0==sqlite3_stricmp(zKey, "insttoken") ){ + int bVal = -1; + if( SQLITE_INTEGER==sqlite3_value_numeric_type(pVal) ){ + bVal = sqlite3_value_int(pVal); + } + if( bVal<0 ){ + *pbBadkey = 1; + }else{ + pConfig->bPrefixInsttoken = (bVal ? 1 : 0); + } + }else{ *pbBadkey = 1; } @@ -925,6 +1061,7 @@ int sqlite3Fts5ConfigLoad(Fts5Config *pConfig, int iCookie){ pConfig->nUsermerge = FTS5_DEFAULT_USERMERGE; pConfig->nCrisisMerge = FTS5_DEFAULT_CRISISMERGE; pConfig->nHashSize = FTS5_DEFAULT_HASHSIZE; + pConfig->nDeleteMerge = FTS5_DEFAULT_DELETE_AUTOMERGE; zSql = sqlite3Fts5Mprintf(&rc, zSelect, pConfig->zDb, pConfig->zName); if( zSql ){ @@ -947,15 +1084,17 @@ int sqlite3Fts5ConfigLoad(Fts5Config *pConfig, int iCookie){ rc = sqlite3_finalize(p); } - if( rc==SQLITE_OK && iVersion!=FTS5_CURRENT_VERSION ){ + if( rc==SQLITE_OK + && iVersion!=FTS5_CURRENT_VERSION + && iVersion!=FTS5_CURRENT_VERSION_SECUREDELETE + ){ rc = SQLITE_ERROR; - if( pConfig->pzErrmsg ){ - assert( 0==*pConfig->pzErrmsg ); - *pConfig->pzErrmsg = sqlite3_mprintf( - "invalid fts5 file format (found %d, expected %d) - run 'rebuild'", - iVersion, FTS5_CURRENT_VERSION - ); - } + sqlite3Fts5ConfigErrmsg(pConfig, "invalid fts5 file format " + "(found %d, expected %d or %d) - run 'rebuild'", + iVersion, FTS5_CURRENT_VERSION, FTS5_CURRENT_VERSION_SECUREDELETE + ); + }else{ + pConfig->iVersion = iVersion; } if( rc==SQLITE_OK ){ @@ -963,3 +1102,26 @@ int sqlite3Fts5ConfigLoad(Fts5Config *pConfig, int iCookie){ } return rc; } + +/* +** Set (*pConfig->pzErrmsg) to point to an sqlite3_malloc()ed buffer +** containing the error message created using printf() style formatting +** string zFmt and its trailing arguments. +*/ +void sqlite3Fts5ConfigErrmsg(Fts5Config *pConfig, const char *zFmt, ...){ + va_list ap; /* ... printf arguments */ + char *zMsg = 0; + + va_start(ap, zFmt); + zMsg = sqlite3_vmprintf(zFmt, ap); + if( pConfig->pzErrmsg ){ + assert( *pConfig->pzErrmsg==0 ); + *pConfig->pzErrmsg = zMsg; + }else{ + sqlite3_free(zMsg); + } + + va_end(ap); +} + + diff --git a/ext/fts5/fts5_expr.c b/ext/fts5/fts5_expr.c index d9c1dd0fd9..352df81f4f 100644 --- a/ext/fts5/fts5_expr.c +++ b/ext/fts5/fts5_expr.c @@ -17,6 +17,10 @@ #include "fts5Int.h" #include "fts5parse.h" +#ifndef SQLITE_FTS5_MAX_EXPR_DEPTH +# define SQLITE_FTS5_MAX_EXPR_DEPTH 256 +#endif + /* ** All token types in the generated fts5parse.h file are greater than 0. */ @@ -50,18 +54,28 @@ struct Fts5Expr { /* ** eType: -** Expression node type. Always one of: +** Expression node type. Usually one of: ** ** FTS5_AND (nChild, apChild valid) ** FTS5_OR (nChild, apChild valid) ** FTS5_NOT (nChild, apChild valid) ** FTS5_STRING (pNear valid) ** FTS5_TERM (pNear valid) +** +** An expression node with eType==0 may also exist. It always matches zero +** rows. This is created when a phrase containing no tokens is parsed. +** e.g. "". +** +** iHeight: +** Distance from this node to furthest leaf. This is always 0 for nodes +** of type FTS5_STRING and FTS5_TERM. For all other nodes it is one +** greater than the largest child value. */ struct Fts5ExprNode { int eType; /* Node type */ int bEof; /* True at EOF */ int bNomatch; /* True if entry is not a match */ + int iHeight; /* Distance to tree leaf nodes */ /* Next method for this node. */ int (*xNext)(Fts5Expr*, Fts5ExprNode*, int, i64); @@ -72,9 +86,13 @@ struct Fts5ExprNode { /* Child nodes. For a NOT node, this array always contains 2 entries. For ** AND or OR nodes, it contains 2 or more entries. */ int nChild; /* Number of child nodes */ - Fts5ExprNode *apChild[1]; /* Array of child nodes */ + Fts5ExprNode *apChild[FLEXARRAY]; /* Array of child nodes */ }; +/* Size (in bytes) of an Fts5ExprNode object that holds up to N children */ +#define SZ_FTS5EXPRNODE(N) \ + (offsetof(Fts5ExprNode,apChild) + (N)*sizeof(Fts5ExprNode*)) + #define Fts5NodeIsString(p) ((p)->eType==FTS5_TERM || (p)->eType==FTS5_STRING) /* @@ -90,7 +108,9 @@ struct Fts5ExprNode { struct Fts5ExprTerm { u8 bPrefix; /* True for a prefix term */ u8 bFirst; /* True if token must be first in column */ - char *zTerm; /* nul-terminated term */ + char *pTerm; /* Term data */ + int nQueryTerm; /* Effective size of term in bytes */ + int nFullTerm; /* Size of term in bytes incl. tokendata */ Fts5IndexIter *pIter; /* Iterator for this term */ Fts5ExprTerm *pSynonym; /* Pointer to first in list of synonyms */ }; @@ -103,9 +123,13 @@ struct Fts5ExprPhrase { Fts5ExprNode *pNode; /* FTS5_STRING node this phrase is part of */ Fts5Buffer poslist; /* Current position list */ int nTerm; /* Number of entries in aTerm[] */ - Fts5ExprTerm aTerm[1]; /* Terms that make up this phrase */ + Fts5ExprTerm aTerm[FLEXARRAY]; /* Terms that make up this phrase */ }; +/* Size (in bytes) of an Fts5ExprPhrase object that holds up to N terms */ +#define SZ_FTS5EXPRPHRASE(N) \ + (offsetof(Fts5ExprPhrase,aTerm) + (N)*sizeof(Fts5ExprTerm)) + /* ** One or more phrases that must appear within a certain token distance of ** each other within each matching document. @@ -114,9 +138,12 @@ struct Fts5ExprNearset { int nNear; /* NEAR parameter */ Fts5Colset *pColset; /* Columns to search (NULL -> all columns) */ int nPhrase; /* Number of entries in aPhrase[] array */ - Fts5ExprPhrase *apPhrase[1]; /* Array of phrase pointers */ + Fts5ExprPhrase *apPhrase[FLEXARRAY]; /* Array of phrase pointers */ }; +/* Size (in bytes) of an Fts5ExprNearset object covering up to N phrases */ +#define SZ_FTS5EXPRNEARSET(N) \ + (offsetof(Fts5ExprNearset,apPhrase)+(N)*sizeof(Fts5ExprPhrase*)) /* ** Parse context. @@ -131,6 +158,31 @@ struct Fts5Parse { int bPhraseToAnd; /* Convert "a+b" to "a AND b" */ }; +/* +** Check that the Fts5ExprNode.iHeight variables are set correctly in +** the expression tree passed as the only argument. +*/ +#ifndef NDEBUG +static void assert_expr_depth_ok(int rc, Fts5ExprNode *p){ + if( rc==SQLITE_OK ){ + if( p->eType==FTS5_TERM || p->eType==FTS5_STRING || p->eType==0 ){ + assert( p->iHeight==0 ); + }else{ + int ii; + int iMaxChild = 0; + for(ii=0; iinChild; ii++){ + Fts5ExprNode *pChild = p->apChild[ii]; + iMaxChild = MAX(iMaxChild, pChild->iHeight); + assert_expr_depth_ok(SQLITE_OK, pChild); + } + assert( p->iHeight==iMaxChild+1 ); + } + } +} +#else +# define assert_expr_depth_ok(rc, p) +#endif + void sqlite3Fts5ParseError(Fts5Parse *pParse, const char *zFmt, ...){ va_list ap; va_start(ap, zFmt); @@ -245,10 +297,13 @@ int sqlite3Fts5ExprNew( }while( sParse.rc==SQLITE_OK && t!=FTS5_EOF ); sqlite3Fts5ParserFree(pEngine, fts5ParseFree); + assert( sParse.pExpr || sParse.rc!=SQLITE_OK ); + assert_expr_depth_ok(sParse.rc, sParse.pExpr); + /* If the LHS of the MATCH expression was a user column, apply the ** implicit column-filter. */ - if( iColnCol && sParse.pExpr && sParse.rc==SQLITE_OK ){ - int n = sizeof(Fts5Colset); + if( sParse.rc==SQLITE_OK && iColnCol ){ + int n = SZ_FTS5COLSET(1); Fts5Colset *pColset = (Fts5Colset*)sqlite3Fts5MallocZero(&sParse.rc, n); if( pColset ){ pColset->nCol = 1; @@ -264,15 +319,7 @@ int sqlite3Fts5ExprNew( sParse.rc = SQLITE_NOMEM; sqlite3Fts5ParseNodeFree(sParse.pExpr); }else{ - if( !sParse.pExpr ){ - const int nByte = sizeof(Fts5ExprNode); - pNew->pRoot = (Fts5ExprNode*)sqlite3Fts5MallocZero(&sParse.rc, nByte); - if( pNew->pRoot ){ - pNew->pRoot->bEof = 1; - } - }else{ - pNew->pRoot = sParse.pExpr; - } + pNew->pRoot = sParse.pExpr; pNew->pIndex = 0; pNew->pConfig = pConfig; pNew->apExprPhrase = sParse.apPhrase; @@ -285,10 +332,27 @@ int sqlite3Fts5ExprNew( } sqlite3_free(sParse.apPhrase); - *pzErr = sParse.zErr; + if( 0==*pzErr ){ + *pzErr = sParse.zErr; + }else{ + sqlite3_free(sParse.zErr); + } return sParse.rc; } +/* +** Assuming that buffer z is at least nByte bytes in size and contains a +** valid utf-8 string, return the number of characters in the string. +*/ +static int fts5ExprCountChar(const char *z, int nByte){ + int nRet = 0; + int ii; + for(ii=0; ii=3 ){ + + if( fts5ExprCountChar(&zText[iFirst], i-iFirst)>=3 ){ int jj; zExpr[iOut++] = '"'; for(jj=iFirst; jjnPhrase + p2->nPhrase; @@ -418,7 +483,7 @@ int sqlite3Fts5ExprAnd(Fts5Expr **pp1, Fts5Expr *p2){ } sqlite3_free(p2->apExprPhrase); sqlite3_free(p2); - }else{ + }else if( p2 ){ *pp1 = p2; } @@ -916,7 +981,7 @@ static int fts5ExprNearInitAll( p->pIter = 0; } rc = sqlite3Fts5IndexQuery( - pExpr->pIndex, p->zTerm, (int)strlen(p->zTerm), + pExpr->pIndex, p->pTerm, p->nQueryTerm, (pTerm->bPrefix ? FTS5INDEX_QUERY_PREFIX : 0) | (pExpr->bDesc ? FTS5INDEX_QUERY_DESC : 0), pNear->pColset, @@ -1072,7 +1137,7 @@ static int fts5ExprNodeTest_STRING( } }else{ Fts5IndexIter *pIter = pPhrase->aTerm[j].pIter; - if( pIter->iRowid==iLast || pIter->bEof ) continue; + if( pIter->iRowid==iLast ) continue; bMatch = 0; if( fts5ExprAdvanceto(pIter, bDesc, &iLast, &rc, &pNode->bEof) ){ return rc; @@ -1484,7 +1549,13 @@ static int fts5ExprNodeFirst(Fts5Expr *pExpr, Fts5ExprNode *pNode){ ** Return SQLITE_OK if successful, or an SQLite error code otherwise. It ** is not considered an error if the query does not match any documents. */ -int sqlite3Fts5ExprFirst(Fts5Expr *p, Fts5Index *pIdx, i64 iFirst, int bDesc){ +int sqlite3Fts5ExprFirst( + Fts5Expr *p, + Fts5Index *pIdx, + i64 iFirst, + i64 iLast, + int bDesc +){ Fts5ExprNode *pRoot = p->pRoot; int rc; /* Return code */ @@ -1506,6 +1577,9 @@ int sqlite3Fts5ExprFirst(Fts5Expr *p, Fts5Index *pIdx, i64 iFirst, int bDesc){ assert( pRoot->bEof==0 ); rc = fts5ExprNodeNext(p, pRoot, 0, 0); } + if( fts5RowidCmp(p, pRoot->iRowid, iLast)>0 ){ + pRoot->bEof = 1; + } return rc; } @@ -1553,7 +1627,7 @@ static void fts5ExprPhraseFree(Fts5ExprPhrase *pPhrase){ Fts5ExprTerm *pSyn; Fts5ExprTerm *pNext; Fts5ExprTerm *pTerm = &pPhrase->aTerm[i]; - sqlite3_free(pTerm->zTerm); + sqlite3_free(pTerm->pTerm); sqlite3Fts5IterClose(pTerm->pIter); for(pSyn=pTerm->pSynonym; pSyn; pSyn=pNext){ pNext = pSyn->pSynonym; @@ -1594,12 +1668,9 @@ Fts5ExprNearset *sqlite3Fts5ParseNearset( Fts5ExprNearset *pRet = 0; if( pParse->rc==SQLITE_OK ){ - if( pPhrase==0 ){ - return pNear; - } if( pNear==0 ){ sqlite3_int64 nByte; - nByte = sizeof(Fts5ExprNearset) + SZALLOC * sizeof(Fts5ExprPhrase*); + nByte = SZ_FTS5EXPRNEARSET(SZALLOC+1); pRet = sqlite3_malloc64(nByte); if( pRet==0 ){ pParse->rc = SQLITE_NOMEM; @@ -1610,7 +1681,7 @@ Fts5ExprNearset *sqlite3Fts5ParseNearset( int nNew = pNear->nPhrase + SZALLOC; sqlite3_int64 nByte; - nByte = sizeof(Fts5ExprNearset) + nNew * sizeof(Fts5ExprPhrase*); + nByte = SZ_FTS5EXPRNEARSET(nNew+1); pRet = (Fts5ExprNearset*)sqlite3_realloc64(pNear, nByte); if( pRet==0 ){ pParse->rc = SQLITE_NOMEM; @@ -1627,6 +1698,9 @@ Fts5ExprNearset *sqlite3Fts5ParseNearset( }else{ if( pRet->nPhrase>0 ){ Fts5ExprPhrase *pLast = pRet->apPhrase[pRet->nPhrase-1]; + assert( pParse!=0 ); + assert( pParse->apPhrase!=0 ); + assert( pParse->nPhrase>=2 ); assert( pLast==pParse->apPhrase[pParse->nPhrase-2] ); if( pPhrase->nTerm==0 ){ fts5ExprPhraseFree(pPhrase); @@ -1648,6 +1722,7 @@ Fts5ExprNearset *sqlite3Fts5ParseNearset( typedef struct TokenCtx TokenCtx; struct TokenCtx { Fts5ExprPhrase *pPhrase; + Fts5Config *pConfig; int rc; }; @@ -1681,8 +1756,12 @@ static int fts5ParseTokenize( rc = SQLITE_NOMEM; }else{ memset(pSyn, 0, (size_t)nByte); - pSyn->zTerm = ((char*)pSyn) + sizeof(Fts5ExprTerm) + sizeof(Fts5Buffer); - memcpy(pSyn->zTerm, pToken, nToken); + pSyn->pTerm = ((char*)pSyn) + sizeof(Fts5ExprTerm) + sizeof(Fts5Buffer); + pSyn->nFullTerm = pSyn->nQueryTerm = nToken; + if( pCtx->pConfig->bTokendata ){ + pSyn->nQueryTerm = (int)strlen(pSyn->pTerm); + } + memcpy(pSyn->pTerm, pToken, nToken); pSyn->pSynonym = pPhrase->aTerm[pPhrase->nTerm-1].pSynonym; pPhrase->aTerm[pPhrase->nTerm-1].pSynonym = pSyn; } @@ -1693,12 +1772,12 @@ static int fts5ParseTokenize( int nNew = SZALLOC + (pPhrase ? pPhrase->nTerm : 0); pNew = (Fts5ExprPhrase*)sqlite3_realloc64(pPhrase, - sizeof(Fts5ExprPhrase) + sizeof(Fts5ExprTerm) * nNew + SZ_FTS5EXPRPHRASE(nNew+1) ); if( pNew==0 ){ rc = SQLITE_NOMEM; }else{ - if( pPhrase==0 ) memset(pNew, 0, sizeof(Fts5ExprPhrase)); + if( pPhrase==0 ) memset(pNew, 0, SZ_FTS5EXPRPHRASE(1)); pCtx->pPhrase = pPhrase = pNew; pNew->nTerm = nNew - SZALLOC; } @@ -1707,7 +1786,11 @@ static int fts5ParseTokenize( if( rc==SQLITE_OK ){ pTerm = &pPhrase->aTerm[pPhrase->nTerm++]; memset(pTerm, 0, sizeof(Fts5ExprTerm)); - pTerm->zTerm = sqlite3Fts5Strndup(&rc, pToken, nToken); + pTerm->pTerm = sqlite3Fts5Strndup(&rc, pToken, nToken); + pTerm->nFullTerm = pTerm->nQueryTerm = nToken; + if( pCtx->pConfig->bTokendata && rc==SQLITE_OK ){ + pTerm->nQueryTerm = (int)strlen(pTerm->pTerm); + } } } @@ -1774,6 +1857,7 @@ Fts5ExprPhrase *sqlite3Fts5ParseTerm( memset(&sCtx, 0, sizeof(TokenCtx)); sCtx.pPhrase = pAppend; + sCtx.pConfig = pConfig; rc = fts5ParseStringFromToken(pToken, &z); if( rc==SQLITE_OK ){ @@ -1801,10 +1885,11 @@ Fts5ExprPhrase *sqlite3Fts5ParseTerm( if( sCtx.pPhrase==0 ){ /* This happens when parsing a token or quoted phrase that contains ** no token characters at all. (e.g ... MATCH '""'). */ - sCtx.pPhrase = sqlite3Fts5MallocZero(&pParse->rc, sizeof(Fts5ExprPhrase)); + sCtx.pPhrase = sqlite3Fts5MallocZero(&pParse->rc, SZ_FTS5EXPRPHRASE(1)); }else if( sCtx.pPhrase->nTerm ){ sCtx.pPhrase->aTerm[sCtx.pPhrase->nTerm-1].bPrefix = (u8)bPrefix; } + assert( pParse->apPhrase!=0 ); pParse->apPhrase[pParse->nPhrase-1] = sCtx.pPhrase; } @@ -1821,30 +1906,32 @@ int sqlite3Fts5ExprClonePhrase( Fts5Expr **ppNew ){ int rc = SQLITE_OK; /* Return code */ - Fts5ExprPhrase *pOrig; /* The phrase extracted from pExpr */ + Fts5ExprPhrase *pOrig = 0; /* The phrase extracted from pExpr */ Fts5Expr *pNew = 0; /* Expression to return via *ppNew */ - TokenCtx sCtx = {0,0}; /* Context object for fts5ParseTokenize */ - - pOrig = pExpr->apExprPhrase[iPhrase]; - pNew = (Fts5Expr*)sqlite3Fts5MallocZero(&rc, sizeof(Fts5Expr)); + TokenCtx sCtx = {0,0,0}; /* Context object for fts5ParseTokenize */ + if( !pExpr || iPhrase<0 || iPhrase>=pExpr->nPhrase ){ + rc = SQLITE_RANGE; + }else{ + pOrig = pExpr->apExprPhrase[iPhrase]; + pNew = (Fts5Expr*)sqlite3Fts5MallocZero(&rc, sizeof(Fts5Expr)); + } if( rc==SQLITE_OK ){ pNew->apExprPhrase = (Fts5ExprPhrase**)sqlite3Fts5MallocZero(&rc, sizeof(Fts5ExprPhrase*)); } if( rc==SQLITE_OK ){ - pNew->pRoot = (Fts5ExprNode*)sqlite3Fts5MallocZero(&rc, - sizeof(Fts5ExprNode)); + pNew->pRoot = (Fts5ExprNode*)sqlite3Fts5MallocZero(&rc, SZ_FTS5EXPRNODE(1)); } if( rc==SQLITE_OK ){ - pNew->pRoot->pNear = (Fts5ExprNearset*)sqlite3Fts5MallocZero(&rc, - sizeof(Fts5ExprNearset) + sizeof(Fts5ExprPhrase*)); + pNew->pRoot->pNear = (Fts5ExprNearset*)sqlite3Fts5MallocZero(&rc, + SZ_FTS5EXPRNEARSET(2)); } - if( rc==SQLITE_OK ){ + if( rc==SQLITE_OK && ALWAYS(pOrig!=0) ){ Fts5Colset *pColsetOrig = pOrig->pNode->pNear->pColset; if( pColsetOrig ){ sqlite3_int64 nByte; Fts5Colset *pColset; - nByte = sizeof(Fts5Colset) + (pColsetOrig->nCol-1) * sizeof(int); + nByte = SZ_FTS5COLSET(pColsetOrig->nCol); pColset = (Fts5Colset*)sqlite3Fts5MallocZero(&rc, nByte); if( pColset ){ memcpy(pColset, pColsetOrig, (size_t)nByte); @@ -1853,26 +1940,27 @@ int sqlite3Fts5ExprClonePhrase( } } - if( pOrig->nTerm ){ - int i; /* Used to iterate through phrase terms */ - for(i=0; rc==SQLITE_OK && inTerm; i++){ - int tflags = 0; - Fts5ExprTerm *p; - for(p=&pOrig->aTerm[i]; p && rc==SQLITE_OK; p=p->pSynonym){ - const char *zTerm = p->zTerm; - rc = fts5ParseTokenize((void*)&sCtx, tflags, zTerm, (int)strlen(zTerm), - 0, 0); - tflags = FTS5_TOKEN_COLOCATED; - } - if( rc==SQLITE_OK ){ - sCtx.pPhrase->aTerm[i].bPrefix = pOrig->aTerm[i].bPrefix; - sCtx.pPhrase->aTerm[i].bFirst = pOrig->aTerm[i].bFirst; + if( rc==SQLITE_OK ){ + if( pOrig->nTerm ){ + int i; /* Used to iterate through phrase terms */ + sCtx.pConfig = pExpr->pConfig; + for(i=0; rc==SQLITE_OK && inTerm; i++){ + int tflags = 0; + Fts5ExprTerm *p; + for(p=&pOrig->aTerm[i]; p && rc==SQLITE_OK; p=p->pSynonym){ + rc = fts5ParseTokenize((void*)&sCtx,tflags,p->pTerm,p->nFullTerm,0,0); + tflags = FTS5_TOKEN_COLOCATED; + } + if( rc==SQLITE_OK ){ + sCtx.pPhrase->aTerm[i].bPrefix = pOrig->aTerm[i].bPrefix; + sCtx.pPhrase->aTerm[i].bFirst = pOrig->aTerm[i].bFirst; + } } + }else{ + /* This happens when parsing a token or quoted phrase that contains + ** no token characters at all. (e.g ... MATCH '""'). */ + sCtx.pPhrase = sqlite3Fts5MallocZero(&rc, SZ_FTS5EXPRPHRASE(1)); } - }else{ - /* This happens when parsing a token or quoted phrase that contains - ** no token characters at all. (e.g ... MATCH '""'). */ - sCtx.pPhrase = sqlite3Fts5MallocZero(&rc, sizeof(Fts5ExprPhrase)); } if( rc==SQLITE_OK && ALWAYS(sCtx.pPhrase) ){ @@ -1936,7 +2024,8 @@ void sqlite3Fts5ParseSetDistance( ); return; } - nNear = nNear * 10 + (p->p[i] - '0'); + if( nNear<214748363 ) nNear = nNear * 10 + (p->p[i] - '0'); + /* ^^^^^^^^^^^^^^^--- Prevent integer overflow */ } }else{ nNear = FTS5_DEFAULT_NEARDIST; @@ -1965,7 +2054,7 @@ static Fts5Colset *fts5ParseColset( assert( pParse->rc==SQLITE_OK ); assert( iCol>=0 && iColpConfig->nCol ); - pNew = sqlite3_realloc64(p, sizeof(Fts5Colset) + sizeof(int)*nCol); + pNew = sqlite3_realloc64(p, SZ_FTS5COLSET(nCol+1)); if( pNew==0 ){ pParse->rc = SQLITE_NOMEM; }else{ @@ -2000,7 +2089,7 @@ Fts5Colset *sqlite3Fts5ParseColsetInvert(Fts5Parse *pParse, Fts5Colset *p){ int nCol = pParse->pConfig->nCol; pRet = (Fts5Colset*)sqlite3Fts5MallocZero(&pParse->rc, - sizeof(Fts5Colset) + sizeof(int)*nCol + SZ_FTS5COLSET(nCol+1) ); if( pRet ){ int i; @@ -2061,7 +2150,7 @@ Fts5Colset *sqlite3Fts5ParseColset( static Fts5Colset *fts5CloneColset(int *pRc, Fts5Colset *pOrig){ Fts5Colset *pRet; if( pOrig ){ - sqlite3_int64 nByte = sizeof(Fts5Colset) + (pOrig->nCol-1) * sizeof(int); + sqlite3_int64 nByte = SZ_FTS5COLSET(pOrig->nCol); pRet = (Fts5Colset*)sqlite3Fts5MallocZero(pRc, nByte); if( pRet ){ memcpy(pRet, pOrig, (size_t)nByte); @@ -2188,7 +2277,11 @@ static void fts5ExprAssignXNext(Fts5ExprNode *pNode){ } } +/* +** Add pSub as a child of p. +*/ static void fts5ExprAddChildren(Fts5ExprNode *p, Fts5ExprNode *pSub){ + int ii = p->nChild; if( p->eType!=FTS5_NOT && pSub->eType==p->eType ){ int nByte = sizeof(Fts5ExprNode*) * pSub->nChild; memcpy(&p->apChild[p->nChild], pSub->apChild, nByte); @@ -2197,6 +2290,9 @@ static void fts5ExprAddChildren(Fts5ExprNode *p, Fts5ExprNode *pSub){ }else{ p->apChild[p->nChild++] = pSub; } + for( ; iinChild; ii++){ + p->iHeight = MAX(p->iHeight, p->apChild[ii]->iHeight + 1); + } } /* @@ -2222,26 +2318,29 @@ static Fts5ExprNode *fts5ParsePhraseToAnd( assert( pNear->nPhrase==1 ); assert( pParse->bPhraseToAnd ); - nByte = sizeof(Fts5ExprNode) + nTerm*sizeof(Fts5ExprNode*); + nByte = SZ_FTS5EXPRNODE(nTerm+1); pRet = (Fts5ExprNode*)sqlite3Fts5MallocZero(&pParse->rc, nByte); if( pRet ){ pRet->eType = FTS5_AND; pRet->nChild = nTerm; + pRet->iHeight = 1; fts5ExprAssignXNext(pRet); pParse->nPhrase--; for(ii=0; iirc, sizeof(Fts5ExprPhrase) + &pParse->rc, SZ_FTS5EXPRPHRASE(1) ); if( pPhrase ){ if( parseGrowPhraseArray(pParse) ){ fts5ExprPhraseFree(pPhrase); }else{ + Fts5ExprTerm *p = &pNear->apPhrase[0]->aTerm[ii]; + Fts5ExprTerm *pTo = &pPhrase->aTerm[0]; pParse->apPhrase[pParse->nPhrase++] = pPhrase; pPhrase->nTerm = 1; - pPhrase->aTerm[0].zTerm = sqlite3Fts5Strndup( - &pParse->rc, pNear->apPhrase[0]->aTerm[ii].zTerm, -1 - ); + pTo->pTerm = sqlite3Fts5Strndup(&pParse->rc, p->pTerm, p->nFullTerm); + pTo->nQueryTerm = p->nQueryTerm; + pTo->nFullTerm = p->nFullTerm; pRet->apChild[ii] = sqlite3Fts5ParseNode(pParse, FTS5_STRING, 0, 0, sqlite3Fts5ParseNearset(pParse, 0, pPhrase) ); @@ -2298,7 +2397,7 @@ Fts5ExprNode *sqlite3Fts5ParseNode( if( pRight->eType==eType ) nChild += pRight->nChild-1; } - nByte = sizeof(Fts5ExprNode) + sizeof(Fts5ExprNode*)*(nChild-1); + nByte = SZ_FTS5EXPRNODE(nChild); pRet = (Fts5ExprNode*)sqlite3Fts5MallocZero(&pParse->rc, nByte); if( pRet ){ @@ -2325,13 +2424,25 @@ Fts5ExprNode *sqlite3Fts5ParseNode( "fts5: %s queries are not supported (detail!=full)", pNear->nPhrase==1 ? "phrase": "NEAR" ); - sqlite3_free(pRet); + sqlite3Fts5ParseNodeFree(pRet); pRet = 0; + pNear = 0; + assert( pLeft==0 && pRight==0 ); } } }else{ + assert( pNear==0 ); fts5ExprAddChildren(pRet, pLeft); fts5ExprAddChildren(pRet, pRight); + pLeft = pRight = 0; + if( pRet->iHeight>SQLITE_FTS5_MAX_EXPR_DEPTH ){ + sqlite3Fts5ParseError(pParse, + "fts5 expression tree is too large (maximum depth %d)", + SQLITE_FTS5_MAX_EXPR_DEPTH + ); + sqlite3Fts5ParseNodeFree(pRet); + pRet = 0; + } } } } @@ -2367,6 +2478,7 @@ Fts5ExprNode *sqlite3Fts5ParseImplicitAnd( assert( pRight->eType==FTS5_STRING || pRight->eType==FTS5_TERM || pRight->eType==FTS5_EOF + || (pRight->eType==FTS5_AND && pParse->bPhraseToAnd) ); if( pLeft->eType==FTS5_AND ){ @@ -2380,6 +2492,8 @@ Fts5ExprNode *sqlite3Fts5ParseImplicitAnd( ); if( pRight->eType==FTS5_EOF ){ + assert( pParse->apPhrase!=0 ); + assert( pParse->nPhrase>0 ); assert( pParse->apPhrase[pParse->nPhrase-1]==pRight->pNear->apPhrase[0] ); sqlite3Fts5ParseNodeFree(pRight); pRet = pLeft; @@ -2410,7 +2524,7 @@ Fts5ExprNode *sqlite3Fts5ParseImplicitAnd( return pRet; } -#ifdef SQLITE_TEST +#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG) static char *fts5ExprTermPrint(Fts5ExprTerm *pTerm){ sqlite3_int64 nByte = 0; Fts5ExprTerm *p; @@ -2418,16 +2532,17 @@ static char *fts5ExprTermPrint(Fts5ExprTerm *pTerm){ /* Determine the maximum amount of space required. */ for(p=pTerm; p; p=p->pSynonym){ - nByte += (int)strlen(pTerm->zTerm) * 2 + 3 + 2; + nByte += pTerm->nQueryTerm * 2 + 3 + 2; } zQuoted = sqlite3_malloc64(nByte); if( zQuoted ){ int i = 0; for(p=pTerm; p; p=p->pSynonym){ - char *zIn = p->zTerm; + char *zIn = p->pTerm; + char *zEnd = &zIn[p->nQueryTerm]; zQuoted[i++] = '"'; - while( *zIn ){ + while( zInnTerm; iTerm++){ - char *zTerm = pPhrase->aTerm[iTerm].zTerm; - zRet = fts5PrintfAppend(zRet, "%s%s", iTerm==0?"":" ", zTerm); + Fts5ExprTerm *p = &pPhrase->aTerm[iTerm]; + zRet = fts5PrintfAppend(zRet, "%s%.*s", iTerm==0?"":" ", + p->nQueryTerm, p->pTerm + ); if( pPhrase->aTerm[iTerm].bPrefix ){ zRet = fts5PrintfAppend(zRet, "*"); } @@ -2516,6 +2633,8 @@ static char *fts5ExprPrintTcl( if( zRet==0 ) return 0; } + }else if( pExpr->eType==0 ){ + zRet = sqlite3_mprintf("{}"); }else{ char const *zOp = 0; int i; @@ -2777,14 +2896,14 @@ static void fts5ExprFold( sqlite3_result_int(pCtx, sqlite3Fts5UnicodeFold(iCode, bRemoveDiacritics)); } } -#endif /* ifdef SQLITE_TEST */ +#endif /* if SQLITE_TEST || SQLITE_FTS5_DEBUG */ /* ** This is called during initialization to register the fts5_expr() scalar ** UDF with the SQLite handle passed as the only argument. */ int sqlite3Fts5ExprInit(Fts5Global *pGlobal, sqlite3 *db){ -#ifdef SQLITE_TEST +#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG) struct Fts5ExprFunc { const char *z; void (*x)(sqlite3_context*,int,sqlite3_value**); @@ -2905,6 +3024,17 @@ static int fts5ExprColsetTest(Fts5Colset *pColset, int iCol){ return 0; } +/* +** pToken is a buffer nToken bytes in size that may or may not contain +** an embedded 0x00 byte. If it does, return the number of bytes in +** the buffer before the 0x00. If it does not, return nToken. +*/ +static int fts5QueryTerm(const char *pToken, int nToken){ + int ii; + for(ii=0; iipExpr; int i; + int nQuery = nToken; + i64 iRowid = pExpr->pRoot->iRowid; UNUSED_PARAM2(iUnused1, iUnused2); - if( nToken>FTS5_MAX_TOKEN_SIZE ) nToken = FTS5_MAX_TOKEN_SIZE; + if( nQuery>FTS5_MAX_TOKEN_SIZE ) nQuery = FTS5_MAX_TOKEN_SIZE; + if( pExpr->pConfig->bTokendata ){ + nQuery = fts5QueryTerm(pToken, nQuery); + } if( (tflags & FTS5_TOKEN_COLOCATED)==0 ) p->iOff++; for(i=0; inPhrase; i++){ - Fts5ExprTerm *pTerm; + Fts5ExprTerm *pT; if( p->aPopulator[i].bOk==0 ) continue; - for(pTerm=&pExpr->apExprPhrase[i]->aTerm[0]; pTerm; pTerm=pTerm->pSynonym){ - int nTerm = (int)strlen(pTerm->zTerm); - if( (nTerm==nToken || (nTermbPrefix)) - && memcmp(pTerm->zTerm, pToken, nTerm)==0 + for(pT=&pExpr->apExprPhrase[i]->aTerm[0]; pT; pT=pT->pSynonym){ + if( (pT->nQueryTerm==nQuery || (pT->nQueryTermbPrefix)) + && memcmp(pT->pTerm, pToken, pT->nQueryTerm)==0 ){ int rc = sqlite3Fts5PoslistWriterAppend( &pExpr->apExprPhrase[i]->poslist, &p->aPopulator[i].writer, p->iOff ); + if( rc==SQLITE_OK && (pExpr->pConfig->bTokendata || pT->bPrefix) ){ + int iCol = p->iOff>>32; + int iTokOff = p->iOff & 0x7FFFFFFF; + rc = sqlite3Fts5IndexIterWriteTokendata( + pT->pIter, pToken, nToken, iRowid, iCol, iTokOff + ); + } if( rc ) return rc; break; } @@ -2985,6 +3126,7 @@ static int fts5ExprCheckPoslists(Fts5ExprNode *pNode, i64 iRowid){ pNode->iRowid = iRowid; pNode->bEof = 0; switch( pNode->eType ){ + case 0: case FTS5_TERM: case FTS5_STRING: return (pNode->pNear->apPhrase[0]->poslist.n>0); @@ -3066,3 +3208,79 @@ int sqlite3Fts5ExprPhraseCollist( return rc; } + +/* +** Does the work of the fts5_api.xQueryToken() API method. +*/ +int sqlite3Fts5ExprQueryToken( + Fts5Expr *pExpr, + int iPhrase, + int iToken, + const char **ppOut, + int *pnOut +){ + Fts5ExprPhrase *pPhrase = 0; + + if( iPhrase<0 || iPhrase>=pExpr->nPhrase ){ + return SQLITE_RANGE; + } + pPhrase = pExpr->apExprPhrase[iPhrase]; + if( iToken<0 || iToken>=pPhrase->nTerm ){ + return SQLITE_RANGE; + } + + *ppOut = pPhrase->aTerm[iToken].pTerm; + *pnOut = pPhrase->aTerm[iToken].nFullTerm; + return SQLITE_OK; +} + +/* +** Does the work of the fts5_api.xInstToken() API method. +*/ +int sqlite3Fts5ExprInstToken( + Fts5Expr *pExpr, + i64 iRowid, + int iPhrase, + int iCol, + int iOff, + int iToken, + const char **ppOut, + int *pnOut +){ + Fts5ExprPhrase *pPhrase = 0; + Fts5ExprTerm *pTerm = 0; + int rc = SQLITE_OK; + + if( iPhrase<0 || iPhrase>=pExpr->nPhrase ){ + return SQLITE_RANGE; + } + pPhrase = pExpr->apExprPhrase[iPhrase]; + if( iToken<0 || iToken>=pPhrase->nTerm ){ + return SQLITE_RANGE; + } + pTerm = &pPhrase->aTerm[iToken]; + if( pExpr->pConfig->bTokendata || pTerm->bPrefix ){ + rc = sqlite3Fts5IterToken( + pTerm->pIter, pTerm->pTerm, pTerm->nQueryTerm, + iRowid, iCol, iOff+iToken, ppOut, pnOut + ); + }else{ + *ppOut = pTerm->pTerm; + *pnOut = pTerm->nFullTerm; + } + return rc; +} + +/* +** Clear the token mappings for all Fts5IndexIter objects managed by +** the expression passed as the only argument. +*/ +void sqlite3Fts5ExprClearTokens(Fts5Expr *pExpr){ + int ii; + for(ii=0; iinPhrase; ii++){ + Fts5ExprTerm *pT; + for(pT=&pExpr->apExprPhrase[ii]->aTerm[0]; pT; pT=pT->pSynonym){ + sqlite3Fts5IndexIterClearTokendata(pT->pIter); + } + } +} diff --git a/ext/fts5/fts5_hash.c b/ext/fts5/fts5_hash.c index bc9244fc01..a33dec9a92 100644 --- a/ext/fts5/fts5_hash.c +++ b/ext/fts5/fts5_hash.c @@ -20,7 +20,7 @@ typedef struct Fts5HashEntry Fts5HashEntry; /* ** This file contains the implementation of an in-memory hash table used -** to accumuluate "term -> doclist" content before it is flused to a level-0 +** to accumulate "term -> doclist" content before it is flushed to a level-0 ** segment. */ @@ -36,10 +36,15 @@ struct Fts5Hash { /* ** Each entry in the hash table is represented by an object of the -** following type. Each object, its key (a nul-terminated string) and -** its current data are stored in a single memory allocation. The -** key immediately follows the object in memory. The position list -** data immediately follows the key data in memory. +** following type. Each object, its key, and its current data are stored +** in a single memory allocation. The key immediately follows the object +** in memory. The position list data immediately follows the key data +** in memory. +** +** The key is Fts5HashEntry.nKey bytes in size. It consists of a single +** byte identifying the index (either the main term index or a prefix-index), +** followed by the term data. For example: "0token". There is no +** nul-terminator - in this case nKey=6. ** ** The data that follows the key is in a similar, but not identical format ** to the doclist data stored in the database. It is: @@ -72,7 +77,7 @@ struct Fts5HashEntry { }; /* -** Eqivalent to: +** Equivalent to: ** ** char *fts5EntryKey(Fts5HashEntry *pEntry){ return zKey; } */ @@ -174,8 +179,7 @@ static int fts5HashResize(Fts5Hash *pHash){ unsigned int iHash; Fts5HashEntry *p = apOld[i]; apOld[i] = p->pHashNext; - iHash = fts5HashKey(nNew, (u8*)fts5EntryKey(p), - (int)strlen(fts5EntryKey(p))); + iHash = fts5HashKey(nNew, (u8*)fts5EntryKey(p), p->nKey); p->pHashNext = apNew[iHash]; apNew[iHash] = p; } @@ -259,7 +263,7 @@ int sqlite3Fts5HashWrite( for(p=pHash->aSlot[iHash]; p; p=p->pHashNext){ char *zKey = fts5EntryKey(p); if( zKey[0]==bByte - && p->nKey==nToken + && p->nKey==nToken+1 && memcmp(&zKey[1], pToken, nToken)==0 ){ break; @@ -289,9 +293,9 @@ int sqlite3Fts5HashWrite( zKey[0] = bByte; memcpy(&zKey[1], pToken, nToken); assert( iHash==fts5HashKey(pHash->nSlot, (u8*)zKey, nToken+1) ); - p->nKey = nToken; + p->nKey = nToken+1; zKey[nToken+1] = '\0'; - p->nData = nToken+1 + 1 + sizeof(Fts5HashEntry); + p->nData = nToken+1 + sizeof(Fts5HashEntry); p->pHashNext = pHash->aSlot[iHash]; pHash->aSlot[iHash] = p; pHash->nEntry++; @@ -408,12 +412,17 @@ static Fts5HashEntry *fts5HashEntryMerge( *ppOut = p1; p1 = 0; }else{ - int i = 0; char *zKey1 = fts5EntryKey(p1); char *zKey2 = fts5EntryKey(p2); - while( zKey1[i]==zKey2[i] ) i++; + int nMin = MIN(p1->nKey, p2->nKey); + + int cmp = memcmp(zKey1, zKey2, nMin); + if( cmp==0 ){ + cmp = p1->nKey - p2->nKey; + } + assert( cmp!=0 ); - if( ((u8)zKey1[i])>((u8)zKey2[i]) ){ + if( cmp>0 ){ /* p2 is smaller */ *ppOut = p2; ppOut = &p2->pScanNext; @@ -432,10 +441,8 @@ static Fts5HashEntry *fts5HashEntryMerge( } /* -** Extract all tokens from hash table iHash and link them into a list -** in sorted order. The hash table is cleared before returning. It is -** the responsibility of the caller to free the elements of the returned -** list. +** Link all tokens from hash table iHash into a list in sorted order. The +** tokens are not removed from the hash table. */ static int fts5HashEntrySort( Fts5Hash *pHash, @@ -457,7 +464,7 @@ static int fts5HashEntrySort( Fts5HashEntry *pIter; for(pIter=pHash->aSlot[iSlot]; pIter; pIter=pIter->pHashNext){ if( pTerm==0 - || (pIter->nKey+1>=nTerm && 0==memcmp(fts5EntryKey(pIter), pTerm, nTerm)) + || (pIter->nKey>=nTerm && 0==memcmp(fts5EntryKey(pIter), pTerm, nTerm)) ){ Fts5HashEntry *pEntry = pIter; pEntry->pScanNext = 0; @@ -475,7 +482,6 @@ static int fts5HashEntrySort( pList = fts5HashEntryMerge(pList, ap[i]); } - pHash->nEntry = 0; sqlite3_free(ap); *ppSorted = pList; return SQLITE_OK; @@ -497,12 +503,11 @@ int sqlite3Fts5HashQuery( for(p=pHash->aSlot[iHash]; p; p=p->pHashNext){ zKey = fts5EntryKey(p); - assert( p->nKey+1==(int)strlen(zKey) ); - if( nTerm==p->nKey+1 && memcmp(zKey, pTerm, nTerm)==0 ) break; + if( nTerm==p->nKey && memcmp(zKey, pTerm, nTerm)==0 ) break; } if( p ){ - int nHashPre = sizeof(Fts5HashEntry) + nTerm + 1; + int nHashPre = sizeof(Fts5HashEntry) + nTerm; int nList = p->nData - nHashPre; u8 *pRet = (u8*)(*ppOut = sqlite3_malloc64(nPre + nList + 10)); if( pRet ){ @@ -529,6 +534,28 @@ int sqlite3Fts5HashScanInit( return fts5HashEntrySort(p, pTerm, nTerm, &p->pScan); } +#ifdef SQLITE_DEBUG +static int fts5HashCount(Fts5Hash *pHash){ + int nEntry = 0; + int ii; + for(ii=0; iinSlot; ii++){ + Fts5HashEntry *p = 0; + for(p=pHash->aSlot[ii]; p; p=p->pHashNext){ + nEntry++; + } + } + return nEntry; +} +#endif + +/* +** Return true if the hash table is empty, false otherwise. +*/ +int sqlite3Fts5HashIsEmpty(Fts5Hash *pHash){ + assert( pHash->nEntry==fts5HashCount(pHash) ); + return pHash->nEntry==0; +} + void sqlite3Fts5HashScanNext(Fts5Hash *p){ assert( !sqlite3Fts5HashScanEof(p) ); p->pScan = p->pScan->pScanNext; @@ -541,19 +568,22 @@ int sqlite3Fts5HashScanEof(Fts5Hash *p){ void sqlite3Fts5HashScanEntry( Fts5Hash *pHash, const char **pzTerm, /* OUT: term (nul-terminated) */ + int *pnTerm, /* OUT: Size of term in bytes */ const u8 **ppDoclist, /* OUT: pointer to doclist */ int *pnDoclist /* OUT: size of doclist in bytes */ ){ Fts5HashEntry *p; if( (p = pHash->pScan) ){ char *zKey = fts5EntryKey(p); - int nTerm = (int)strlen(zKey); + int nTerm = p->nKey; fts5HashAddPoslistSize(pHash, p, 0); *pzTerm = zKey; - *ppDoclist = (const u8*)&zKey[nTerm+1]; - *pnDoclist = p->nData - (sizeof(Fts5HashEntry) + nTerm + 1); + *pnTerm = nTerm; + *ppDoclist = (const u8*)&zKey[nTerm]; + *pnDoclist = p->nData - (sizeof(Fts5HashEntry) + nTerm); }else{ *pzTerm = 0; + *pnTerm = 0; *ppDoclist = 0; *pnDoclist = 0; } diff --git a/ext/fts5/fts5_index.c b/ext/fts5/fts5_index.c index fe253984b3..7e25731ed5 100644 --- a/ext/fts5/fts5_index.c +++ b/ext/fts5/fts5_index.c @@ -54,6 +54,26 @@ # error "FTS5_MAX_PREFIX_INDEXES is too large" #endif +#define FTS5_MAX_LEVEL 64 + +/* +** There are two versions of the format used for the structure record: +** +** 1. the legacy format, that may be read by all fts5 versions, and +** +** 2. the V2 format, which is used by contentless_delete=1 databases. +** +** Both begin with a 4-byte "configuration cookie" value. Then, a legacy +** format structure record contains a varint - the number of levels in +** the structure. Whereas a V2 structure record contains the constant +** 4 bytes [0xff 0x00 0x00 0x01]. This is unambiguous as the value of a +** varint has to be at least 16256 to begin with "0xFF". And the default +** maximum number of levels is 64. +** +** See below for more on structure record formats. +*/ +#define FTS5_STRUCTURE_V2 "\xFF\x00\x00\x01" + /* ** Details: ** @@ -61,7 +81,7 @@ ** ** CREATE TABLE %_data(id INTEGER PRIMARY KEY, block BLOB); ** -** , contains the following 5 types of records. See the comments surrounding +** , contains the following 6 types of records. See the comments surrounding ** the FTS5_*_ROWID macros below for a description of how %_data rowids are ** assigned to each fo them. ** @@ -69,13 +89,13 @@ ** ** The set of segments that make up an index - the index structure - are ** recorded in a single record within the %_data table. The record consists -** of a single 32-bit configuration cookie value followed by a list of -** SQLite varints. If the FTS table features more than one index (because -** there are one or more prefix indexes), it is guaranteed that all share -** the same cookie value. +** of a single 32-bit configuration cookie value followed by a list of +** SQLite varints. ** -** Immediately following the configuration cookie, the record begins with -** three varints: +** If the structure record is a V2 record, the configuration cookie is +** followed by the following 4 bytes: [0xFF 0x00 0x00 0x01]. +** +** Next, the record continues with three varints: ** ** + number of levels, ** + total number of segments on all levels, @@ -90,6 +110,12 @@ ** + first leaf page number (often 1, always greater than 0) ** + final leaf page number ** +** Then, for V2 structures only: +** +** + lower origin counter value, +** + upper origin counter value, +** + the number of tombstone hash pages. +** ** 2. The Averages Record: ** ** A single record within the %_data table. The data is a list of varints. @@ -205,6 +231,38 @@ ** * A list of delta-encoded varints - the first rowid on each subsequent ** child page. ** +** 6. Tombstone Hash Page +** +** These records are only ever present in contentless_delete=1 tables. +** There are zero or more of these associated with each segment. They +** are used to store the tombstone rowids for rows contained in the +** associated segments. +** +** The set of nHashPg tombstone hash pages associated with a single +** segment together form a single hash table containing tombstone rowids. +** To find the page of the hash on which a key might be stored: +** +** iPg = (rowid % nHashPg) +** +** Then, within page iPg, which has nSlot slots: +** +** iSlot = (rowid / nHashPg) % nSlot +** +** Each tombstone hash page begins with an 8 byte header: +** +** 1-byte: Key-size (the size in bytes of each slot). Either 4 or 8. +** 1-byte: rowid-0-tombstone flag. This flag is only valid on the +** first tombstone hash page for each segment (iPg=0). If set, +** the hash table contains rowid 0. If clear, it does not. +** Rowid 0 is handled specially. +** 2-bytes: unused. +** 4-bytes: Big-endian integer containing number of entries on page. +** +** Following this are nSlot 4 or 8 byte slots (depending on the key-size +** in the first byte of the page header). The number of slots may be +** determined based on the size of the page record and the key-size: +** +** nSlot = (nByte - 8) / key-size */ /* @@ -238,6 +296,7 @@ #define FTS5_SEGMENT_ROWID(segid, pgno) fts5_dri(segid, 0, 0, pgno) #define FTS5_DLIDX_ROWID(segid, height, pgno) fts5_dri(segid, 1, height, pgno) +#define FTS5_TOMBSTONE_ROWID(segid,ipg) fts5_dri(segid+(1<<16), 0, 0, ipg) #ifdef SQLITE_DEBUG int sqlite3Fts5Corrupt() { return SQLITE_CORRUPT_VTAB; } @@ -264,6 +323,9 @@ typedef struct Fts5SegWriter Fts5SegWriter; typedef struct Fts5Structure Fts5Structure; typedef struct Fts5StructureLevel Fts5StructureLevel; typedef struct Fts5StructureSegment Fts5StructureSegment; +typedef struct Fts5TokenDataIter Fts5TokenDataIter; +typedef struct Fts5TokenDataMap Fts5TokenDataMap; +typedef struct Fts5TombstoneArray Fts5TombstoneArray; struct Fts5Data { u8 *p; /* Pointer to buffer containing record */ @@ -273,6 +335,12 @@ struct Fts5Data { /* ** One object per %_data table. +** +** nContentlessDelete: +** The number of contentless delete operations since the most recent +** call to fts5IndexFlush() or fts5IndexDiscardData(). This is tracked +** so that extra auto-merge work can be done by fts5IndexFlush() to +** account for the delete operations. */ struct Fts5Index { Fts5Config *pConfig; /* Virtual table configuration */ @@ -287,19 +355,25 @@ struct Fts5Index { int nPendingData; /* Current bytes of pending data */ i64 iWriteRowid; /* Rowid for current doc being written */ int bDelete; /* Current write is a delete */ + int nContentlessDelete; /* Number of contentless delete ops */ + int nPendingRow; /* Number of INSERT in hash table */ /* Error state. */ int rc; /* Current error code */ + int flushRc; /* State used by the fts5DataXXX() functions. */ sqlite3_blob *pReader; /* RO incr-blob open on %_data table */ sqlite3_stmt *pWriter; /* "INSERT ... %_data VALUES(?,?)" */ sqlite3_stmt *pDeleter; /* "DELETE FROM %_data ... id>=? AND id<=?" */ sqlite3_stmt *pIdxWriter; /* "INSERT ... %_idx VALUES(?,?,?,?)" */ - sqlite3_stmt *pIdxDeleter; /* "DELETE FROM %_idx WHERE segid=? */ + sqlite3_stmt *pIdxDeleter; /* "DELETE FROM %_idx WHERE segid=?" */ sqlite3_stmt *pIdxSelect; + sqlite3_stmt *pIdxNextSelect; int nRead; /* Total number of blocks read */ + sqlite3_stmt *pDeleteFromIdx; + sqlite3_stmt *pDataVersion; i64 iStructVersion; /* data_version when pStruct read */ Fts5Structure *pStruct; /* Current db structure (or NULL) */ @@ -319,11 +393,23 @@ struct Fts5DoclistIter { ** The contents of the "structure" record for each index are represented ** using an Fts5Structure record in memory. Which uses instances of the ** other Fts5StructureXXX types as components. +** +** nOriginCntr: +** This value is set to non-zero for structure records created for +** contentlessdelete=1 tables only. In that case it represents the +** origin value to apply to the next top-level segment created. */ struct Fts5StructureSegment { int iSegid; /* Segment id */ int pgnoFirst; /* First leaf page number in segment */ int pgnoLast; /* Last leaf page number in segment */ + + /* contentlessdelete=1 tables only: */ + u64 iOrigin1; + u64 iOrigin2; + int nPgTombstone; /* Number of tombstone hash table pages */ + u64 nEntryTombstone; /* Number of tombstone entries that "count" */ + u64 nEntry; /* Number of rows in this segment */ }; struct Fts5StructureLevel { int nMerge; /* Number of segments in incr-merge */ @@ -333,11 +419,16 @@ struct Fts5StructureLevel { struct Fts5Structure { int nRef; /* Object reference count */ u64 nWriteCounter; /* Total leaves written to level 0 */ + u64 nOriginCntr; /* Origin value for next top-level segment */ int nSegment; /* Total segments in this structure */ int nLevel; /* Number of levels in this index */ - Fts5StructureLevel aLevel[1]; /* Array of nLevel level objects */ + Fts5StructureLevel aLevel[FLEXARRAY]; /* Array of nLevel level objects */ }; +/* Size (in bytes) of an Fts5Structure object holding up to N levels */ +#define SZ_FTS5STRUCTURE(N) \ + (offsetof(Fts5Structure,aLevel) + (N)*sizeof(Fts5StructureLevel)) + /* ** An object of type Fts5SegWriter is used to write to segments. */ @@ -392,9 +483,6 @@ struct Fts5CResult { ** iLeafOffset: ** Byte offset within the current leaf that is the first byte of the ** position list data (one byte passed the position-list size field). -** rowid field of the current entry. Usually this is the size field of the -** position list data. The exception is if the rowid for the current entry -** is the last thing on the leaf page. ** ** pLeaf: ** Buffer containing current leaf page data. Set to NULL at EOF. @@ -424,6 +512,13 @@ struct Fts5CResult { ** ** iTermIdx: ** Index of current term on iTermLeafPgno. +** +** apTombstone/nTombstone: +** These are used for contentless_delete=1 tables only. When the cursor +** is first allocated, the apTombstone[] array is allocated so that it +** is large enough for all tombstones hash pages associated with the +** segment. The pages themselves are loaded lazily from the database as +** they are required. */ struct Fts5SegIter { Fts5StructureSegment *pSeg; /* Segment to iterate through */ @@ -432,6 +527,7 @@ struct Fts5SegIter { Fts5Data *pLeaf; /* Current leaf data */ Fts5Data *pNextLeaf; /* Leaf page (iLeafPgno+1) */ i64 iLeafOffset; /* Byte offset within current leaf */ + Fts5TombstoneArray *pTombArray; /* Array of tombstone pages */ /* Next method */ void (*xNext)(Fts5Index*, Fts5SegIter*, int*); @@ -458,6 +554,49 @@ struct Fts5SegIter { u8 bDel; /* True if the delete flag is set */ }; +static int fts5IndexCorruptRowid(Fts5Index *pIdx, i64 iRowid){ + pIdx->rc = FTS5_CORRUPT; + sqlite3Fts5ConfigErrmsg(pIdx->pConfig, + "fts5: corruption found reading blob %lld from table \"%s\"", + iRowid, pIdx->pConfig->zName + ); + return SQLITE_CORRUPT_VTAB; +} +#define FTS5_CORRUPT_ROWID(pIdx, iRowid) fts5IndexCorruptRowid(pIdx, iRowid) + +static int fts5IndexCorruptIter(Fts5Index *pIdx, Fts5SegIter *pIter){ + pIdx->rc = FTS5_CORRUPT; + sqlite3Fts5ConfigErrmsg(pIdx->pConfig, + "fts5: corruption on page %d, segment %d, table \"%s\"", + pIter->iLeafPgno, pIter->pSeg->iSegid, pIdx->pConfig->zName + ); + return SQLITE_CORRUPT_VTAB; +} +#define FTS5_CORRUPT_ITER(pIdx, pIter) fts5IndexCorruptIter(pIdx, pIter) + +static int fts5IndexCorruptIdx(Fts5Index *pIdx){ + pIdx->rc = FTS5_CORRUPT; + sqlite3Fts5ConfigErrmsg(pIdx->pConfig, + "fts5: corruption in table \"%s\"", pIdx->pConfig->zName + ); + return SQLITE_CORRUPT_VTAB; +} +#define FTS5_CORRUPT_IDX(pIdx) fts5IndexCorruptIdx(pIdx) + + +/* +** Array of tombstone pages. Reference counted. +*/ +struct Fts5TombstoneArray { + int nRef; /* Number of pointers to this object */ + int nTombstone; + Fts5Data *apTombstone[FLEXARRAY]; /* Array of tombstone pages */ +}; + +/* Size (in bytes) of an Fts5TombstoneArray holding up to N tombstones */ +#define SZ_FTS5TOMBSTONEARRAY(N) \ + (offsetof(Fts5TombstoneArray,apTombstone)+(N)*sizeof(Fts5Data*)) + /* ** Argument is a pointer to an Fts5Data structure that contains a ** leaf page. @@ -502,9 +641,16 @@ struct Fts5SegIter { ** poslist: ** Used by sqlite3Fts5IterPoslist() when the poslist needs to be buffered. ** There is no way to tell if this is populated or not. +** +** pColset: +** If not NULL, points to an object containing a set of column indices. +** Only matches that occur in one of these columns will be returned. +** The Fts5Iter does not own the Fts5Colset object, and so it is not +** freed when the iterator is closed - it is owned by the upper layer. */ struct Fts5Iter { Fts5IndexIter base; /* Base class containing output vars */ + Fts5TokenDataIter *pTokenDataIter; Fts5Index *pIndex; /* Index that owns this iterator */ Fts5Buffer poslist; /* Buffer containing current poslist */ @@ -519,9 +665,11 @@ struct Fts5Iter { i64 iSwitchRowid; /* Firstest rowid of other than aFirst[1] */ Fts5CResult *aFirst; /* Current merge state (see above) */ - Fts5SegIter aSeg[1]; /* Array of segment iterators */ + Fts5SegIter aSeg[FLEXARRAY]; /* Array of segment iterators */ }; +/* Size (in bytes) of an Fts5Iter object holding up to N segment iterators */ +#define SZ_FTS5ITER(N) (offsetof(Fts5Iter,aSeg)+(N)*sizeof(Fts5SegIter)) /* ** An instance of the following type is used to iterate through the contents @@ -549,9 +697,13 @@ struct Fts5DlidxLvl { struct Fts5DlidxIter { int nLvl; int iSegid; - Fts5DlidxLvl aLvl[1]; + Fts5DlidxLvl aLvl[FLEXARRAY]; }; +/* Size (in bytes) of an Fts5DlidxIter object with up to N levels */ +#define SZ_FTS5DLIDXITER(N) \ + (offsetof(Fts5DlidxIter,aLvl)+(N)*sizeof(Fts5DlidxLvl)) + static void fts5PutU16(u8 *aOut, u16 iVal){ aOut[0] = (iVal>>8); aOut[1] = (iVal&0xFF); @@ -561,6 +713,60 @@ static u16 fts5GetU16(const u8 *aIn){ return ((u16)aIn[0] << 8) + aIn[1]; } +/* +** The only argument points to a buffer at least 8 bytes in size. This +** function interprets the first 8 bytes of the buffer as a 64-bit big-endian +** unsigned integer and returns the result. +*/ +static u64 fts5GetU64(u8 *a){ + return ((u64)a[0] << 56) + + ((u64)a[1] << 48) + + ((u64)a[2] << 40) + + ((u64)a[3] << 32) + + ((u64)a[4] << 24) + + ((u64)a[5] << 16) + + ((u64)a[6] << 8) + + ((u64)a[7] << 0); +} + +/* +** The only argument points to a buffer at least 4 bytes in size. This +** function interprets the first 4 bytes of the buffer as a 32-bit big-endian +** unsigned integer and returns the result. +*/ +static u32 fts5GetU32(const u8 *a){ + return ((u32)a[0] << 24) + + ((u32)a[1] << 16) + + ((u32)a[2] << 8) + + ((u32)a[3] << 0); +} + +/* +** Write iVal, formated as a 64-bit big-endian unsigned integer, to the +** buffer indicated by the first argument. +*/ +static void fts5PutU64(u8 *a, u64 iVal){ + a[0] = ((iVal >> 56) & 0xFF); + a[1] = ((iVal >> 48) & 0xFF); + a[2] = ((iVal >> 40) & 0xFF); + a[3] = ((iVal >> 32) & 0xFF); + a[4] = ((iVal >> 24) & 0xFF); + a[5] = ((iVal >> 16) & 0xFF); + a[6] = ((iVal >> 8) & 0xFF); + a[7] = ((iVal >> 0) & 0xFF); +} + +/* +** Write iVal, formated as a 32-bit big-endian unsigned integer, to the +** buffer indicated by the first argument. +*/ +static void fts5PutU32(u8 *a, u32 iVal){ + a[0] = ((iVal >> 24) & 0xFF); + a[1] = ((iVal >> 16) & 0xFF); + a[2] = ((iVal >> 8) & 0xFF); + a[3] = ((iVal >> 0) & 0xFF); +} + /* ** Allocate and return a buffer at least nByte bytes in size. ** @@ -617,11 +823,13 @@ static int fts5LeafFirstTermOff(Fts5Data *pLeaf){ /* ** Close the read-only blob handle, if it is open. */ -void sqlite3Fts5IndexCloseReader(Fts5Index *p){ +static void fts5IndexCloseReader(Fts5Index *p){ if( p->pReader ){ + int rc; sqlite3_blob *pReader = p->pReader; p->pReader = 0; - sqlite3_blob_close(pReader); + rc = sqlite3_blob_close(pReader); + if( p->rc==SQLITE_OK ) p->rc = rc; } } @@ -646,7 +854,7 @@ static Fts5Data *fts5DataRead(Fts5Index *p, i64 iRowid){ assert( p->pReader==0 ); p->pReader = pBlob; if( rc!=SQLITE_OK ){ - sqlite3Fts5IndexCloseReader(p); + fts5IndexCloseReader(p); } if( rc==SQLITE_ABORT ) rc = SQLITE_OK; } @@ -665,16 +873,17 @@ static Fts5Data *fts5DataRead(Fts5Index *p, i64 iRowid){ ** All the reasons those functions might return SQLITE_ERROR - missing ** table, missing row, non-blob/text in block column - indicate ** backing store corruption. */ - if( rc==SQLITE_ERROR ) rc = FTS5_CORRUPT; + if( rc==SQLITE_ERROR ) rc = FTS5_CORRUPT_ROWID(p, iRowid); if( rc==SQLITE_OK ){ u8 *aOut = 0; /* Read blob data into this buffer */ - int nByte = sqlite3_blob_bytes(p->pReader); - sqlite3_int64 nAlloc = sizeof(Fts5Data) + nByte + FTS5_DATA_PADDING; + i64 nByte = sqlite3_blob_bytes(p->pReader); + i64 szData = (sizeof(Fts5Data) + 7) & ~7; + i64 nAlloc = szData + nByte + FTS5_DATA_PADDING; pRet = (Fts5Data*)sqlite3_malloc64(nAlloc); if( pRet ){ pRet->nn = nByte; - aOut = pRet->p = (u8*)&pRet[1]; + aOut = pRet->p = (u8*)pRet + szData; }else{ rc = SQLITE_NOMEM; } @@ -697,6 +906,7 @@ static Fts5Data *fts5DataRead(Fts5Index *p, i64 iRowid){ } assert( (pRet==0)==(p->rc!=SQLITE_OK) ); + assert( pRet==0 || EIGHT_BYTE_ALIGNMENT( pRet->p ) ); return pRet; } @@ -713,7 +923,7 @@ static Fts5Data *fts5LeafRead(Fts5Index *p, i64 iRowid){ Fts5Data *pRet = fts5DataRead(p, iRowid); if( pRet ){ if( pRet->nn<4 || pRet->szLeaf>pRet->nn ){ - p->rc = FTS5_CORRUPT; + FTS5_CORRUPT_ROWID(p, iRowid); fts5DataRelease(pRet); pRet = 0; } @@ -728,9 +938,13 @@ static int fts5IndexPrepareStmt( ){ if( p->rc==SQLITE_OK ){ if( zSql ){ - p->rc = sqlite3_prepare_v3(p->pConfig->db, zSql, -1, + int rc = sqlite3_prepare_v3(p->pConfig->db, zSql, -1, SQLITE_PREPARE_PERSISTENT|SQLITE_PREPARE_NO_VTAB, ppStmt, 0); + /* If this prepare() call fails with SQLITE_ERROR, then one of the + ** %_idx or %_data tables has been removed or modified. Call this + ** corruption. */ + p->rc = (rc==SQLITE_ERROR ? SQLITE_CORRUPT : rc); }else{ p->rc = SQLITE_NOMEM; } @@ -788,10 +1002,17 @@ static void fts5DataDelete(Fts5Index *p, i64 iFirst, i64 iLast){ /* ** Remove all records associated with segment iSegid. */ -static void fts5DataRemoveSegment(Fts5Index *p, int iSegid){ +static void fts5DataRemoveSegment(Fts5Index *p, Fts5StructureSegment *pSeg){ + int iSegid = pSeg->iSegid; i64 iFirst = FTS5_SEGMENT_ROWID(iSegid, 0); i64 iLast = FTS5_SEGMENT_ROWID(iSegid+1, 0)-1; fts5DataDelete(p, iFirst, iLast); + + if( pSeg->nPgTombstone ){ + i64 iTomb1 = FTS5_TOMBSTONE_ROWID(iSegid, 0); + i64 iTomb2 = FTS5_TOMBSTONE_ROWID(iSegid, pSeg->nPgTombstone-1); + fts5DataDelete(p, iTomb1, iTomb2); + } if( p->pIdxDeleter==0 ){ Fts5Config *pConfig = p->pConfig; fts5IndexPrepareStmt(p, &p->pIdxDeleter, sqlite3_mprintf( @@ -850,7 +1071,7 @@ int sqlite3Fts5StructureTest(Fts5Index *p, void *pStruct){ static void fts5StructureMakeWritable(int *pRc, Fts5Structure **pp){ Fts5Structure *p = *pp; if( *pRc==SQLITE_OK && p->nRef>1 ){ - i64 nByte = sizeof(Fts5Structure)+(p->nLevel-1)*sizeof(Fts5StructureLevel); + i64 nByte = SZ_FTS5STRUCTURE(p->nLevel); Fts5Structure *pNew; pNew = (Fts5Structure*)sqlite3Fts5MallocZero(pRc, nByte); if( pNew ){ @@ -902,11 +1123,19 @@ static int fts5StructureDecode( int nSegment = 0; sqlite3_int64 nByte; /* Bytes of space to allocate at pRet */ Fts5Structure *pRet = 0; /* Structure object to return */ + int bStructureV2 = 0; /* True for FTS5_STRUCTURE_V2 */ + u64 nOriginCntr = 0; /* Largest origin value seen so far */ /* Grab the cookie value */ if( piCookie ) *piCookie = sqlite3Fts5Get32(pData); i = 4; + /* Check if this is a V2 structure record. Set bStructureV2 if it is. */ + if( 0==memcmp(&pData[i], FTS5_STRUCTURE_V2, 4) ){ + i += 4; + bStructureV2 = 1; + } + /* Read the total number of levels and segments from the start of the ** structure record. */ i += fts5GetVarint32(&pData[i], nLevel); @@ -916,10 +1145,7 @@ static int fts5StructureDecode( ){ return FTS5_CORRUPT; } - nByte = ( - sizeof(Fts5Structure) + /* Main structure */ - sizeof(Fts5StructureLevel) * (nLevel-1) /* aLevel[] array */ - ); + nByte = SZ_FTS5STRUCTURE(nLevel); pRet = (Fts5Structure*)sqlite3Fts5MallocZero(&rc, nByte); if( pRet ){ @@ -953,9 +1179,18 @@ static int fts5StructureDecode( rc = FTS5_CORRUPT; break; } + assert( pSeg!=0 ); i += fts5GetVarint32(&pData[i], pSeg->iSegid); i += fts5GetVarint32(&pData[i], pSeg->pgnoFirst); i += fts5GetVarint32(&pData[i], pSeg->pgnoLast); + if( bStructureV2 ){ + i += fts5GetVarint(&pData[i], &pSeg->iOrigin1); + i += fts5GetVarint(&pData[i], &pSeg->iOrigin2); + i += fts5GetVarint32(&pData[i], pSeg->nPgTombstone); + i += fts5GetVarint(&pData[i], &pSeg->nEntryTombstone); + i += fts5GetVarint(&pData[i], &pSeg->nEntry); + nOriginCntr = MAX(nOriginCntr, pSeg->iOrigin2); + } if( pSeg->pgnoLastpgnoFirst ){ rc = FTS5_CORRUPT; break; @@ -966,6 +1201,9 @@ static int fts5StructureDecode( } } if( nSegment!=0 && rc==SQLITE_OK ) rc = FTS5_CORRUPT; + if( bStructureV2 ){ + pRet->nOriginCntr = nOriginCntr+1; + } if( rc!=SQLITE_OK ){ fts5StructureRelease(pRet); @@ -983,13 +1221,11 @@ static int fts5StructureDecode( */ static void fts5StructureAddLevel(int *pRc, Fts5Structure **ppStruct){ fts5StructureMakeWritable(pRc, ppStruct); + assert( (ppStruct!=0 && (*ppStruct)!=0) || (*pRc)!=SQLITE_OK ); if( *pRc==SQLITE_OK ){ Fts5Structure *pStruct = *ppStruct; int nLevel = pStruct->nLevel; - sqlite3_int64 nByte = ( - sizeof(Fts5Structure) + /* Main structure */ - sizeof(Fts5StructureLevel) * (nLevel+1) /* aLevel[] array */ - ); + sqlite3_int64 nByte = SZ_FTS5STRUCTURE(nLevel+2); pStruct = sqlite3_realloc64(pStruct, nByte); if( pStruct ){ @@ -1046,8 +1282,14 @@ static Fts5Structure *fts5StructureReadUncached(Fts5Index *p){ /* TODO: Do we need this if the leaf-index is appended? Probably... */ memset(&pData->p[pData->nn], 0, FTS5_DATA_PADDING); p->rc = fts5StructureDecode(pData->p, pData->nn, &iCookie, &pRet); - if( p->rc==SQLITE_OK && (pConfig->pgsz==0 || pConfig->iCookie!=iCookie) ){ - p->rc = sqlite3Fts5ConfigLoad(pConfig, iCookie); + if( p->rc==SQLITE_OK ){ + if( (pConfig->pgsz==0 || pConfig->iCookie!=iCookie) ){ + p->rc = sqlite3Fts5ConfigLoad(pConfig, iCookie); + } + }else if( p->rc==SQLITE_CORRUPT_VTAB ){ + sqlite3Fts5ConfigErrmsg(p->pConfig, + "fts5: corrupt structure record for table \"%s\"", p->pConfig->zName + ); } fts5DataRelease(pData); if( p->rc!=SQLITE_OK ){ @@ -1177,6 +1419,7 @@ static void fts5StructureWrite(Fts5Index *p, Fts5Structure *pStruct){ Fts5Buffer buf; /* Buffer to serialize record into */ int iLvl; /* Used to iterate through levels */ int iCookie; /* Cookie value to store */ + int nHdr = (pStruct->nOriginCntr>0 ? (4+4+9+9+9) : (4+9+9)); assert( pStruct->nSegment==fts5StructureCountSegments(pStruct) ); memset(&buf, 0, sizeof(Fts5Buffer)); @@ -1185,9 +1428,12 @@ static void fts5StructureWrite(Fts5Index *p, Fts5Structure *pStruct){ iCookie = p->pConfig->iCookie; if( iCookie<0 ) iCookie = 0; - if( 0==sqlite3Fts5BufferSize(&p->rc, &buf, 4+9+9+9) ){ + if( 0==sqlite3Fts5BufferSize(&p->rc, &buf, nHdr) ){ sqlite3Fts5Put32(buf.p, iCookie); buf.n = 4; + if( pStruct->nOriginCntr>0 ){ + fts5BufferSafeAppendBlob(&buf, FTS5_STRUCTURE_V2, 4); + } fts5BufferSafeAppendVarint(&buf, pStruct->nLevel); fts5BufferSafeAppendVarint(&buf, pStruct->nSegment); fts5BufferSafeAppendVarint(&buf, (i64)pStruct->nWriteCounter); @@ -1201,9 +1447,17 @@ static void fts5StructureWrite(Fts5Index *p, Fts5Structure *pStruct){ assert( pLvl->nMerge<=pLvl->nSeg ); for(iSeg=0; iSegnSeg; iSeg++){ - fts5BufferAppendVarint(&p->rc, &buf, pLvl->aSeg[iSeg].iSegid); - fts5BufferAppendVarint(&p->rc, &buf, pLvl->aSeg[iSeg].pgnoFirst); - fts5BufferAppendVarint(&p->rc, &buf, pLvl->aSeg[iSeg].pgnoLast); + Fts5StructureSegment *pSeg = &pLvl->aSeg[iSeg]; + fts5BufferAppendVarint(&p->rc, &buf, pSeg->iSegid); + fts5BufferAppendVarint(&p->rc, &buf, pSeg->pgnoFirst); + fts5BufferAppendVarint(&p->rc, &buf, pSeg->pgnoLast); + if( pStruct->nOriginCntr>0 ){ + fts5BufferAppendVarint(&p->rc, &buf, pSeg->iOrigin1); + fts5BufferAppendVarint(&p->rc, &buf, pSeg->iOrigin2); + fts5BufferAppendVarint(&p->rc, &buf, pSeg->nPgTombstone); + fts5BufferAppendVarint(&p->rc, &buf, pSeg->nEntryTombstone); + fts5BufferAppendVarint(&p->rc, &buf, pSeg->nEntry); + } } } @@ -1346,9 +1600,9 @@ static int fts5DlidxLvlNext(Fts5DlidxLvl *pLvl){ } if( iOffnn ){ - i64 iVal; + u64 iVal; pLvl->iLeafPgno += (iOff - pLvl->iOff) + 1; - iOff += fts5GetVarint(&pData->p[iOff], (u64*)&iVal); + iOff += fts5GetVarint(&pData->p[iOff], &iVal); pLvl->iRowid += iVal; pLvl->iOff = iOff; }else{ @@ -1441,42 +1695,25 @@ static int fts5DlidxLvlPrev(Fts5DlidxLvl *pLvl){ pLvl->bEof = 1; }else{ u8 *a = pLvl->pData->p; - i64 iVal; - int iLimit; - int ii; - int nZero = 0; - - /* Currently iOff points to the first byte of a varint. This block - ** decrements iOff until it points to the first byte of the previous - ** varint. Taking care not to read any memory locations that occur - ** before the buffer in memory. */ - iLimit = (iOff>9 ? iOff-9 : 0); - for(iOff--; iOff>iLimit; iOff--){ - if( (a[iOff-1] & 0x80)==0 ) break; - } - fts5GetVarint(&a[iOff], (u64*)&iVal); - pLvl->iRowid -= iVal; - pLvl->iLeafPgno--; + pLvl->iOff = 0; + fts5DlidxLvlNext(pLvl); + while( 1 ){ + int nZero = 0; + int ii = pLvl->iOff; + u64 delta = 0; - /* Skip backwards past any 0x00 varints. */ - for(ii=iOff-1; ii>=pLvl->iFirstOff && a[ii]==0x00; ii--){ - nZero++; - } - if( ii>=pLvl->iFirstOff && (a[ii] & 0x80) ){ - /* The byte immediately before the last 0x00 byte has the 0x80 bit - ** set. So the last 0x00 is only a varint 0 if there are 8 more 0x80 - ** bytes before a[ii]. */ - int bZero = 0; /* True if last 0x00 counts */ - if( (ii-8)>=pLvl->iFirstOff ){ - int j; - for(j=1; j<=8 && (a[ii-j] & 0x80); j++); - bZero = (j>8); + while( a[ii]==0 ){ + nZero++; + ii++; } - if( bZero==0 ) nZero--; + ii += sqlite3Fts5GetVarint(&a[ii], &delta); + + if( ii>=iOff ) break; + pLvl->iLeafPgno += nZero+1; + pLvl->iRowid += delta; + pLvl->iOff = ii; } - pLvl->iLeafPgno -= nZero; - pLvl->iOff = iOff - nZero; } return pLvl->bEof; @@ -1533,7 +1770,7 @@ static Fts5DlidxIter *fts5DlidxIterInit( int bDone = 0; for(i=0; p->rc==SQLITE_OK && bDone==0; i++){ - sqlite3_int64 nByte = sizeof(Fts5DlidxIter) + i * sizeof(Fts5DlidxLvl); + sqlite3_int64 nByte = SZ_FTS5DLIDXITER(i+1); Fts5DlidxIter *pNew; pNew = (Fts5DlidxIter*)sqlite3_realloc64(pIter, nByte); @@ -1672,10 +1909,10 @@ static void fts5SegIterLoadRowid(Fts5Index *p, Fts5SegIter *pIter){ i64 iOff = pIter->iLeafOffset; ASSERT_SZLEAF_OK(pIter->pLeaf); - if( iOff>=pIter->pLeaf->szLeaf ){ + while( iOff>=pIter->pLeaf->szLeaf ){ fts5SegIterNextPage(p, pIter); if( pIter->pLeaf==0 ){ - if( p->rc==SQLITE_OK ) p->rc = FTS5_CORRUPT; + if( p->rc==SQLITE_OK ) FTS5_CORRUPT_ITER(p, pIter); return; } iOff = 4; @@ -1707,7 +1944,7 @@ static void fts5SegIterLoadTerm(Fts5Index *p, Fts5SegIter *pIter, int nKeep){ iOff += fts5GetVarint32(&a[iOff], nNew); if( iOff+nNew>pIter->pLeaf->szLeaf || nKeep>pIter->term.n || nNew==0 ){ - p->rc = FTS5_CORRUPT; + FTS5_CORRUPT_ITER(p, pIter); return; } pIter->term.n = nKeep; @@ -1743,6 +1980,25 @@ static void fts5SegIterSetNext(Fts5Index *p, Fts5SegIter *pIter){ } } +/* +** Allocate a tombstone hash page array object (pIter->pTombArray) for +** the iterator passed as the second argument. If an OOM error occurs, +** leave an error in the Fts5Index object. +*/ +static void fts5SegIterAllocTombstone(Fts5Index *p, Fts5SegIter *pIter){ + const i64 nTomb = (i64)pIter->pSeg->nPgTombstone; + if( nTomb>0 ){ + i64 nByte = SZ_FTS5TOMBSTONEARRAY(nTomb+1); + Fts5TombstoneArray *pNew; + pNew = (Fts5TombstoneArray*)sqlite3Fts5MallocZero(&p->rc, nByte); + if( pNew ){ + pNew->nTombstone = nTomb; + pNew->nRef = 1; + pIter->pTombArray = pNew; + } + } +} + /* ** Initialize the iterator object pIter to iterate through the entries in ** segment pSeg. The iterator is left pointing to the first entry when @@ -1771,10 +2027,12 @@ static void fts5SegIterInit( fts5SegIterSetNext(p, pIter); pIter->pSeg = pSeg; pIter->iLeafPgno = pSeg->pgnoFirst-1; - fts5SegIterNextPage(p, pIter); + do { + fts5SegIterNextPage(p, pIter); + }while( p->rc==SQLITE_OK && pIter->pLeaf && pIter->pLeaf->nn==4 ); } - if( p->rc==SQLITE_OK ){ + if( p->rc==SQLITE_OK && pIter->pLeaf ){ pIter->iLeafOffset = 4; assert( pIter->pLeaf!=0 ); assert_nc( pIter->pLeaf->nn>4 ); @@ -1782,6 +2040,7 @@ static void fts5SegIterInit( pIter->iPgidxOff = pIter->pLeaf->szLeaf+1; fts5SegIterLoadTerm(p, pIter, 0); fts5SegIterLoadNPos(p, pIter); + fts5SegIterAllocTombstone(p, pIter); } } @@ -1815,6 +2074,7 @@ static void fts5SegIterReverseInitPage(Fts5Index *p, Fts5SegIter *pIter){ while( 1 ){ u64 iDelta = 0; + if( i>=n ) break; if( eDetail==FTS5_DETAIL_NONE ){ /* todo */ if( i=pNew->szLeaf ){ - p->rc = FTS5_CORRUPT; + FTS5_CORRUPT_ITER(p, pIter); }else{ pIter->pLeaf = pNew; pIter->iLeafOffset = iRowidOff; @@ -1968,7 +2228,7 @@ static void fts5SegIterNext_None( iOff = pIter->iLeafOffset; /* Next entry is on the next page */ - if( pIter->pSeg && iOff>=pIter->pLeaf->szLeaf ){ + while( pIter->pSeg && iOff>=pIter->pLeaf->szLeaf ){ fts5SegIterNextPage(p, pIter); if( p->rc || pIter->pLeaf==0 ) return; pIter->iRowid = 0; @@ -1977,7 +2237,7 @@ static void fts5SegIterNext_None( if( iOffiEndofDoclist ){ /* Next entry is on the current page */ - i64 iDelta; + u64 iDelta; iOff += sqlite3Fts5GetVarint(&pIter->pLeaf->p[iOff], (u64*)&iDelta); pIter->iLeafOffset = iOff; pIter->iRowid += iDelta; @@ -1992,15 +2252,16 @@ static void fts5SegIterNext_None( }else{ const u8 *pList = 0; const char *zTerm = 0; + int nTerm = 0; int nList; sqlite3Fts5HashScanNext(p->pHash); - sqlite3Fts5HashScanEntry(p->pHash, &zTerm, &pList, &nList); + sqlite3Fts5HashScanEntry(p->pHash, &zTerm, &nTerm, &pList, &nList); if( pList==0 ) goto next_none_eof; pIter->pLeaf->p = (u8*)pList; pIter->pLeaf->nn = nList; pIter->pLeaf->szLeaf = nList; pIter->iEndofDoclist = nList; - sqlite3Fts5BufferSet(&p->rc,&pIter->term, (int)strlen(zTerm), (u8*)zTerm); + sqlite3Fts5BufferSet(&p->rc,&pIter->term, nTerm, (u8*)zTerm); pIter->iLeafOffset = fts5GetVarint(pList, (u64*)&pIter->iRowid); } @@ -2066,11 +2327,12 @@ static void fts5SegIterNext( }else if( pIter->pSeg==0 ){ const u8 *pList = 0; const char *zTerm = 0; + int nTerm = 0; int nList = 0; assert( (pIter->flags & FTS5_SEGITER_ONETERM) || pbNewTerm ); if( 0==(pIter->flags & FTS5_SEGITER_ONETERM) ){ sqlite3Fts5HashScanNext(p->pHash); - sqlite3Fts5HashScanEntry(p->pHash, &zTerm, &pList, &nList); + sqlite3Fts5HashScanEntry(p->pHash, &zTerm, &nTerm, &pList, &nList); } if( pList==0 ){ fts5DataRelease(pIter->pLeaf); @@ -2080,8 +2342,7 @@ static void fts5SegIterNext( pIter->pLeaf->nn = nList; pIter->pLeaf->szLeaf = nList; pIter->iEndofDoclist = nList+1; - sqlite3Fts5BufferSet(&p->rc, &pIter->term, (int)strlen(zTerm), - (u8*)zTerm); + sqlite3Fts5BufferSet(&p->rc, &pIter->term, nTerm, (u8*)zTerm); pIter->iLeafOffset = fts5GetVarint(pList, (u64*)&pIter->iRowid); *pbNewTerm = 1; } @@ -2113,7 +2374,7 @@ static void fts5SegIterNext( } assert_nc( iOffszLeaf ); if( iOff>pLeaf->szLeaf ){ - p->rc = FTS5_CORRUPT; + FTS5_CORRUPT_ITER(p, pIter); return; } } @@ -2161,7 +2422,7 @@ static void fts5SegIterReverse(Fts5Index *p, Fts5SegIter *pIter){ Fts5Data *pLast = 0; int pgnoLast = 0; - if( pDlidx ){ + if( pDlidx && p->pConfig->iVersion==FTS5_CURRENT_VERSION ){ int iSegid = pIter->pSeg->iSegid; pgnoLast = fts5DlidxIterPgno(pDlidx); pLast = fts5LeafRead(p, FTS5_SEGMENT_ROWID(iSegid, pgnoLast)); @@ -2221,18 +2482,20 @@ static void fts5SegIterReverse(Fts5Index *p, Fts5SegIter *pIter){ fts5DataRelease(pIter->pLeaf); pIter->pLeaf = pLast; pIter->iLeafPgno = pgnoLast; - iOff = fts5LeafFirstRowidOff(pLast); - if( iOff>pLast->szLeaf ){ - p->rc = FTS5_CORRUPT; - return; - } - iOff += fts5GetVarint(&pLast->p[iOff], (u64*)&pIter->iRowid); - pIter->iLeafOffset = iOff; + if( p->rc==SQLITE_OK ){ + iOff = fts5LeafFirstRowidOff(pLast); + if( iOff>pLast->szLeaf ){ + FTS5_CORRUPT_ITER(p, pIter); + return; + } + iOff += fts5GetVarint(&pLast->p[iOff], (u64*)&pIter->iRowid); + pIter->iLeafOffset = iOff; - if( fts5LeafIsTermless(pLast) ){ - pIter->iEndofDoclist = pLast->nn+1; - }else{ - pIter->iEndofDoclist = fts5LeafFirstTermOff(pLast); + if( fts5LeafIsTermless(pLast) ){ + pIter->iEndofDoclist = pLast->nn+1; + }else{ + pIter->iEndofDoclist = fts5LeafFirstTermOff(pLast); + } } } @@ -2302,7 +2565,7 @@ static void fts5LeafSeek( iPgidx += fts5GetVarint32(&a[iPgidx], iTermOff); iOff = iTermOff; if( iOff>n ){ - p->rc = FTS5_CORRUPT; + FTS5_CORRUPT_ITER(p, pIter); return; } @@ -2345,7 +2608,7 @@ static void fts5LeafSeek( iOff = iTermOff; if( iOff>=n ){ - p->rc = FTS5_CORRUPT; + FTS5_CORRUPT_ITER(p, pIter); return; } @@ -2367,7 +2630,7 @@ static void fts5LeafSeek( iPgidx = (u32)pIter->pLeaf->szLeaf; iPgidx += fts5GetVarint32(&pIter->pLeaf->p[iPgidx], iOff); if( iOff<4 || (i64)iOff>=pIter->pLeaf->szLeaf ){ - p->rc = FTS5_CORRUPT; + FTS5_CORRUPT_ITER(p, pIter); return; }else{ nKeep = 0; @@ -2382,7 +2645,7 @@ static void fts5LeafSeek( search_success: if( (i64)iOff+nNew>n || nNew<1 ){ - p->rc = FTS5_CORRUPT; + FTS5_CORRUPT_ITER(p, pIter); return; } pIter->iLeafOffset = iOff + nNew; @@ -2467,7 +2730,7 @@ static void fts5SegIterSeekInit( fts5LeafSeek(p, bGe, pIter, pTerm, nTerm); } - if( p->rc==SQLITE_OK && bGe==0 ){ + if( p->rc==SQLITE_OK && (bGe==0 || (flags & FTS5INDEX_QUERY_SCANONETERM)) ){ pIter->flags |= FTS5_SEGITER_ONETERM; if( pIter->pLeaf ){ if( flags & FTS5INDEX_QUERY_DESC ){ @@ -2483,6 +2746,9 @@ static void fts5SegIterSeekInit( } fts5SegIterSetNext(p, pIter); + if( 0==(flags & FTS5INDEX_QUERY_SCANONETERM) ){ + fts5SegIterAllocTombstone(p, pIter); + } /* Either: ** @@ -2499,6 +2765,79 @@ static void fts5SegIterSeekInit( ); } + +/* +** SQL used by fts5SegIterNextInit() to find the page to open. +*/ +static sqlite3_stmt *fts5IdxNextStmt(Fts5Index *p){ + if( p->pIdxNextSelect==0 ){ + Fts5Config *pConfig = p->pConfig; + fts5IndexPrepareStmt(p, &p->pIdxNextSelect, sqlite3_mprintf( + "SELECT pgno FROM '%q'.'%q_idx' WHERE " + "segid=? AND term>? ORDER BY term ASC LIMIT 1", + pConfig->zDb, pConfig->zName + )); + + } + return p->pIdxNextSelect; +} + +/* +** This is similar to fts5SegIterSeekInit(), except that it initializes +** the segment iterator to point to the first term following the page +** with pToken/nToken on it. +*/ +static void fts5SegIterNextInit( + Fts5Index *p, + const char *pTerm, int nTerm, + Fts5StructureSegment *pSeg, /* Description of segment */ + Fts5SegIter *pIter /* Object to populate */ +){ + int iPg = -1; /* Page of segment to open */ + int bDlidx = 0; + sqlite3_stmt *pSel = 0; /* SELECT to find iPg */ + + pSel = fts5IdxNextStmt(p); + if( pSel ){ + assert( p->rc==SQLITE_OK ); + sqlite3_bind_int(pSel, 1, pSeg->iSegid); + sqlite3_bind_blob(pSel, 2, pTerm, nTerm, SQLITE_STATIC); + + if( sqlite3_step(pSel)==SQLITE_ROW ){ + i64 val = sqlite3_column_int64(pSel, 0); + iPg = (int)(val>>1); + bDlidx = (val & 0x0001); + } + p->rc = sqlite3_reset(pSel); + sqlite3_bind_null(pSel, 2); + if( p->rc ) return; + } + + memset(pIter, 0, sizeof(*pIter)); + pIter->pSeg = pSeg; + pIter->flags |= FTS5_SEGITER_ONETERM; + if( iPg>=0 ){ + pIter->iLeafPgno = iPg - 1; + fts5SegIterNextPage(p, pIter); + fts5SegIterSetNext(p, pIter); + } + if( pIter->pLeaf ){ + const u8 *a = pIter->pLeaf->p; + int iTermOff = 0; + + pIter->iPgidxOff = pIter->pLeaf->szLeaf; + pIter->iPgidxOff += fts5GetVarint32(&a[pIter->iPgidxOff], iTermOff); + pIter->iLeafOffset = iTermOff; + fts5SegIterLoadTerm(p, pIter, 0); + fts5SegIterLoadNPos(p, pIter); + if( bDlidx ) fts5SegIterLoadDlidx(p, pIter); + + assert( p->rc!=SQLITE_OK || + fts5BufferCompareBlob(&pIter->term, (const u8*)pTerm, nTerm)>0 + ); + } +} + /* ** Initialize the object pIter to point to term pTerm/nTerm within the ** in-memory hash table. If there is no such term in the hash-table, the @@ -2525,14 +2864,21 @@ static void fts5SegIterHashInit( const u8 *pList = 0; p->rc = sqlite3Fts5HashScanInit(p->pHash, (const char*)pTerm, nTerm); - sqlite3Fts5HashScanEntry(p->pHash, (const char**)&z, &pList, &nList); - n = (z ? (int)strlen((const char*)z) : 0); + sqlite3Fts5HashScanEntry(p->pHash, (const char**)&z, &n, &pList, &nList); if( pList ){ pLeaf = fts5IdxMalloc(p, sizeof(Fts5Data)); if( pLeaf ){ pLeaf->p = (u8*)pList; } } + + /* The call to sqlite3Fts5HashScanInit() causes the hash table to + ** fill the size field of all existing position lists. This means they + ** can no longer be appended to. Since the only scenario in which they + ** can be appended to is if the previous operation on this table was + ** a DELETE, by clearing the Fts5Index.bDelete flag we can avoid this + ** possibility altogether. */ + p->bDelete = 0; }else{ p->rc = sqlite3Fts5HashQuery(p->pHash, sizeof(Fts5Data), (const char*)pTerm, nTerm, (void**)&pLeaf, &nList @@ -2563,6 +2909,37 @@ static void fts5SegIterHashInit( fts5SegIterSetNext(p, pIter); } +/* +** Array ap[] contains n elements. Release each of these elements using +** fts5DataRelease(). Then free the array itself using sqlite3_free(). +*/ +static void fts5IndexFreeArray(Fts5Data **ap, int n){ + if( ap ){ + int ii; + for(ii=0; iinRef--; + if( p->nRef<=0 ){ + int ii; + for(ii=0; iinTombstone; ii++){ + fts5DataRelease(p->apTombstone[ii]); + } + sqlite3_free(p); + } + } +} + /* ** Zero the iterator passed as the only argument. */ @@ -2570,6 +2947,7 @@ static void fts5SegIterClear(Fts5SegIter *pIter){ fts5BufferFree(&pIter->term); fts5DataRelease(pIter->pLeaf); fts5DataRelease(pIter->pNextLeaf); + fts5TombstoneArrayDelete(pIter->pTombArray); fts5DlidxIterFree(pIter->pDlidx); sqlite3_free(pIter->aRowidOffset); memset(pIter, 0, sizeof(Fts5SegIter)); @@ -2703,7 +3081,6 @@ static int fts5MultiIterDoCompare(Fts5Iter *pIter, int iOut){ assert_nc( i2!=0 ); pRes->bTermEq = 1; if( p1->iRowid==p2->iRowid ){ - p1->bDel = p2->bDel; return i2; } res = ((p1->iRowid > p2->iRowid)==pIter->bRev) ? -1 : +1; @@ -2722,7 +3099,8 @@ static int fts5MultiIterDoCompare(Fts5Iter *pIter, int iOut){ /* ** Move the seg-iter so that it points to the first rowid on page iLeafPgno. -** It is an error if leaf iLeafPgno does not exist or contains no rowids. +** It is an error if leaf iLeafPgno does not exist. Unless the db is +** a 'secure-delete' db, if it contains no rowids then this is also an error. */ static void fts5SegIterGotoPage( Fts5Index *p, /* FTS5 backend object */ @@ -2732,26 +3110,28 @@ static void fts5SegIterGotoPage( assert( iLeafPgno>pIter->iLeafPgno ); if( iLeafPgno>pIter->pSeg->pgnoLast ){ - p->rc = FTS5_CORRUPT; + FTS5_CORRUPT_IDX(p); }else{ fts5DataRelease(pIter->pNextLeaf); pIter->pNextLeaf = 0; pIter->iLeafPgno = iLeafPgno-1; - fts5SegIterNextPage(p, pIter); - assert( p->rc!=SQLITE_OK || pIter->iLeafPgno==iLeafPgno ); - if( p->rc==SQLITE_OK && ALWAYS(pIter->pLeaf!=0) ){ + while( p->rc==SQLITE_OK ){ int iOff; - u8 *a = pIter->pLeaf->p; - int n = pIter->pLeaf->szLeaf; - + fts5SegIterNextPage(p, pIter); + if( pIter->pLeaf==0 ) break; iOff = fts5LeafFirstRowidOff(pIter->pLeaf); - if( iOff<4 || iOff>=n ){ - p->rc = FTS5_CORRUPT; - }else{ - iOff += fts5GetVarint(&a[iOff], (u64*)&pIter->iRowid); - pIter->iLeafOffset = iOff; - fts5SegIterLoadNPos(p, pIter); + if( iOff>0 ){ + u8 *a = pIter->pLeaf->p; + int n = pIter->pLeaf->szLeaf; + if( iOff<4 || iOff>=n ){ + FTS5_CORRUPT_IDX(p); + }else{ + iOff += fts5GetVarint(&a[iOff], (u64*)&pIter->iRowid); + pIter->iLeafOffset = iOff; + fts5SegIterLoadNPos(p, pIter); + } + break; } } } @@ -2812,7 +3192,6 @@ static void fts5SegIterNextFrom( }while( p->rc==SQLITE_OK ); } - /* ** Free the iterator object passed as the second argument. */ @@ -2904,6 +3283,85 @@ static void fts5MultiIterSetEof(Fts5Iter *pIter){ pIter->iSwitchRowid = pSeg->iRowid; } +/* +** The argument to this macro must be an Fts5Data structure containing a +** tombstone hash page. This macro returns the key-size of the hash-page. +*/ +#define TOMBSTONE_KEYSIZE(pPg) (pPg->p[0]==4 ? 4 : 8) + +#define TOMBSTONE_NSLOT(pPg) \ + ((pPg->nn > 16) ? ((pPg->nn-8) / TOMBSTONE_KEYSIZE(pPg)) : 1) + +/* +** Query a single tombstone hash table for rowid iRowid. Return true if +** it is found or false otherwise. The tombstone hash table is one of +** nHashTable tables. +*/ +static int fts5IndexTombstoneQuery( + Fts5Data *pHash, /* Hash table page to query */ + int nHashTable, /* Number of pages attached to segment */ + u64 iRowid /* Rowid to query hash for */ +){ + const int szKey = TOMBSTONE_KEYSIZE(pHash); + const int nSlot = TOMBSTONE_NSLOT(pHash); + int iSlot = (iRowid / nHashTable) % nSlot; + int nCollide = nSlot; + + if( iRowid==0 ){ + return pHash->p[1]; + }else if( szKey==4 ){ + u32 *aSlot = (u32*)&pHash->p[8]; + while( aSlot[iSlot] ){ + if( fts5GetU32((u8*)&aSlot[iSlot])==iRowid ) return 1; + if( nCollide--==0 ) break; + iSlot = (iSlot+1)%nSlot; + } + }else{ + u64 *aSlot = (u64*)&pHash->p[8]; + while( aSlot[iSlot] ){ + if( fts5GetU64((u8*)&aSlot[iSlot])==iRowid ) return 1; + if( nCollide--==0 ) break; + iSlot = (iSlot+1)%nSlot; + } + } + + return 0; +} + +/* +** Return true if the iterator passed as the only argument points +** to an segment entry for which there is a tombstone. Return false +** if there is no tombstone or if the iterator is already at EOF. +*/ +static int fts5MultiIterIsDeleted(Fts5Iter *pIter){ + int iFirst = pIter->aFirst[1].iFirst; + Fts5SegIter *pSeg = &pIter->aSeg[iFirst]; + Fts5TombstoneArray *pArray = pSeg->pTombArray; + + if( pSeg->pLeaf && pArray ){ + /* Figure out which page the rowid might be present on. */ + int iPg = ((u64)pSeg->iRowid) % pArray->nTombstone; + assert( iPg>=0 ); + + /* If tombstone hash page iPg has not yet been loaded from the + ** database, load it now. */ + if( pArray->apTombstone[iPg]==0 ){ + pArray->apTombstone[iPg] = fts5DataRead(pIter->pIndex, + FTS5_TOMBSTONE_ROWID(pSeg->pSeg->iSegid, iPg) + ); + if( pArray->apTombstone[iPg]==0 ) return 0; + } + + return fts5IndexTombstoneQuery( + pArray->apTombstone[iPg], + pArray->nTombstone, + pSeg->iRowid + ); + } + + return 0; +} + /* ** Move the iterator to the next entry. ** @@ -2941,7 +3399,9 @@ static void fts5MultiIterNext( fts5AssertMultiIterSetup(p, pIter); assert( pSeg==&pIter->aSeg[pIter->aFirst[1].iFirst] && pSeg->pLeaf ); - if( pIter->bSkipEmpty==0 || pSeg->nPos ){ + if( (pIter->bSkipEmpty==0 || pSeg->nPos) + && 0==fts5MultiIterIsDeleted(pIter) + ){ pIter->xSetOutputs(pIter, pSeg); return; } @@ -2973,7 +3433,9 @@ static void fts5MultiIterNext2( } fts5AssertMultiIterSetup(p, pIter); - }while( fts5MultiIterIsEmpty(p, pIter) ); + }while( (fts5MultiIterIsEmpty(p, pIter) || fts5MultiIterIsDeleted(pIter)) + && (p->rc==SQLITE_OK) + ); } } @@ -2986,12 +3448,11 @@ static Fts5Iter *fts5MultiIterAlloc( int nSeg ){ Fts5Iter *pNew; - int nSlot; /* Power of two >= nSeg */ + i64 nSlot; /* Power of two >= nSeg */ for(nSlot=2; nSlotaSeg[] */ + SZ_FTS5ITER(nSlot) + /* pNew + pNew->aSeg[] */ sizeof(Fts5CResult) * nSlot /* pNew->aFirst[] */ ); if( pNew ){ @@ -3143,7 +3604,7 @@ static void fts5ChunkIterate( if( nRem<=0 ){ break; }else if( pSeg->pSeg==0 ){ - p->rc = FTS5_CORRUPT; + FTS5_CORRUPT_IDX(p); return; }else{ pgno++; @@ -3431,6 +3892,32 @@ static void fts5IterSetOutputCb(int *pRc, Fts5Iter *pIter){ } } +/* +** All the component segment-iterators of pIter have been set up. This +** functions finishes setup for iterator pIter itself. +*/ +static void fts5MultiIterFinishSetup(Fts5Index *p, Fts5Iter *pIter){ + int iIter; + for(iIter=pIter->nSeg-1; iIter>0; iIter--){ + int iEq; + if( (iEq = fts5MultiIterDoCompare(pIter, iIter)) ){ + Fts5SegIter *pSeg = &pIter->aSeg[iEq]; + if( p->rc==SQLITE_OK ) pSeg->xNext(p, pSeg, 0); + fts5MultiIterAdvanced(p, pIter, iEq, iIter); + } + } + fts5MultiIterSetEof(pIter); + fts5AssertMultiIterSetup(p, pIter); + + if( (pIter->bSkipEmpty && fts5MultiIterIsEmpty(p, pIter)) + || fts5MultiIterIsDeleted(pIter) + ){ + fts5MultiIterNext(p, pIter, 0, 0); + }else if( pIter->base.bEof==0 ){ + Fts5SegIter *pSeg = &pIter->aSeg[pIter->aFirst[1].iFirst]; + pIter->xSetOutputs(pIter, pSeg); + } +} /* ** Allocate a new Fts5Iter object. @@ -3466,7 +3953,7 @@ static void fts5MultiIterNew( if( iLevel<0 ){ assert( pStruct->nSegment==fts5StructureCountSegments(pStruct) ); nSeg = pStruct->nSegment; - nSeg += (p->pHash ? 1 : 0); + nSeg += (p->pHash && 0==(flags & FTS5INDEX_QUERY_SKIPHASH)); }else{ nSeg = MIN(pStruct->aLevel[iLevel].nSeg, nSegment); } @@ -3487,7 +3974,7 @@ static void fts5MultiIterNew( if( p->rc==SQLITE_OK ){ if( iLevel<0 ){ Fts5StructureLevel *pEnd = &pStruct->aLevel[pStruct->nLevel]; - if( p->pHash ){ + if( p->pHash && 0==(flags & FTS5INDEX_QUERY_SKIPHASH) ){ /* Add a segment iterator for the current contents of the hash table. */ Fts5SegIter *pIter = &pNew->aSeg[iIter++]; fts5SegIterHashInit(p, pTerm, nTerm, flags, pIter); @@ -3512,29 +3999,12 @@ static void fts5MultiIterNew( assert( iIter==nSeg ); } - /* If the above was successful, each component iterators now points + /* If the above was successful, each component iterator now points ** to the first entry in its segment. In this case initialize the ** aFirst[] array. Or, if an error has occurred, free the iterator ** object and set the output variable to NULL. */ if( p->rc==SQLITE_OK ){ - for(iIter=pNew->nSeg-1; iIter>0; iIter--){ - int iEq; - if( (iEq = fts5MultiIterDoCompare(pNew, iIter)) ){ - Fts5SegIter *pSeg = &pNew->aSeg[iEq]; - if( p->rc==SQLITE_OK ) pSeg->xNext(p, pSeg, 0); - fts5MultiIterAdvanced(p, pNew, iEq, iIter); - } - } - fts5MultiIterSetEof(pNew); - fts5AssertMultiIterSetup(p, pNew); - - if( pNew->bSkipEmpty && fts5MultiIterIsEmpty(p, pNew) ){ - fts5MultiIterNext(p, pNew, 0, 0); - }else if( pNew->base.bEof==0 ){ - Fts5SegIter *pSeg = &pNew->aSeg[pNew->aFirst[1].iFirst]; - pNew->xSetOutputs(pNew, pSeg); - } - + fts5MultiIterFinishSetup(p, pNew); }else{ fts5MultiIterFree(pNew); *ppOut = 0; @@ -3559,7 +4029,6 @@ static void fts5MultiIterNew2( pNew = fts5MultiIterAlloc(p, 2); if( pNew ){ Fts5SegIter *pIter = &pNew->aSeg[1]; - pIter->flags = FTS5_SEGITER_ONETERM; if( pData->szLeaf>0 ){ pIter->pLeaf = pData; @@ -3706,7 +4175,10 @@ static void fts5IndexDiscardData(Fts5Index *p){ if( p->pHash ){ sqlite3Fts5HashClear(p->pHash); p->nPendingData = 0; + p->nPendingRow = 0; + p->flushRc = SQLITE_OK; } + p->nContentlessDelete = 0; } /* @@ -3920,7 +4392,7 @@ static void fts5WriteDlidxAppend( } if( pDlidx->bPrevValid ){ - iVal = iRowid - pDlidx->iPrev; + iVal = (u64)iRowid - (u64)pDlidx->iPrev; }else{ i64 iPgno = (i==0 ? pWriter->writer.pgno : pDlidx[-1].pgno); assert( pDlidx->buf.n==0 ); @@ -4087,7 +4559,9 @@ static void fts5WriteAppendRowid( fts5BufferAppendVarint(&p->rc, &pPage->buf, iRowid); }else{ assert_nc( p->rc || iRowid>pWriter->iPrevRowid ); - fts5BufferAppendVarint(&p->rc, &pPage->buf, iRowid - pWriter->iPrevRowid); + fts5BufferAppendVarint(&p->rc, &pPage->buf, + (u64)iRowid - (u64)pWriter->iPrevRowid + ); } pWriter->iPrevRowid = iRowid; pWriter->bFirstRowidInDoclist = 0; @@ -4105,7 +4579,7 @@ static void fts5WriteAppendPoslistData( const u8 *a = aData; int n = nData; - assert( p->pConfig->pgsz>0 ); + assert( p->pConfig->pgsz>0 || p->rc!=SQLITE_OK ); while( p->rc==SQLITE_OK && (pPage->buf.n + pPage->pgidx.n + n)>=p->pConfig->pgsz ){ @@ -4233,14 +4707,14 @@ static void fts5TrimSegments(Fts5Index *p, Fts5Iter *pIter){ ** a single page has been assigned to more than one segment. In ** this case a prior iteration of this loop may have corrupted the ** segment currently being trimmed. */ - p->rc = FTS5_CORRUPT; + FTS5_CORRUPT_ROWID(p, iLeafRowid); }else{ fts5BufferZero(&buf); fts5BufferGrow(&p->rc, &buf, pData->nn); fts5BufferAppendBlob(&p->rc, &buf, sizeof(aHdr), aHdr); fts5BufferAppendVarint(&p->rc, &buf, pSeg->term.n); fts5BufferAppendBlob(&p->rc, &buf, pSeg->term.n, pSeg->term.p); - fts5BufferAppendBlob(&p->rc, &buf, pData->szLeaf-iOff,&pData->p[iOff]); + fts5BufferAppendBlob(&p->rc, &buf,pData->szLeaf-iOff,&pData->p[iOff]); if( p->rc==SQLITE_OK ){ /* Set the szLeaf field */ fts5PutU16(&buf.p[2], (u16)buf.n); @@ -4341,6 +4815,12 @@ static void fts5IndexMergeLevel( /* Read input from all segments in the input level */ nInput = pLvl->nSeg; + + /* Set the range of origins that will go into the output segment. */ + if( pStruct->nOriginCntr>0 ){ + pSeg->iOrigin1 = pLvl->aSeg[0].iOrigin1; + pSeg->iOrigin2 = pLvl->aSeg[pLvl->nSeg-1].iOrigin2; + } } bOldest = (pLvlOut->nSeg==1 && pStruct->nLevel==iLvl+2); @@ -4400,8 +4880,11 @@ static void fts5IndexMergeLevel( int i; /* Remove the redundant segments from the %_data table */ + assert( pSeg->nEntry==0 ); for(i=0; iaSeg[i].iSegid); + Fts5StructureSegment *pOld = &pLvl->aSeg[i]; + pSeg->nEntry += (pOld->nEntry - pOld->nEntryTombstone); + fts5DataRemoveSegment(p, pOld); } /* Remove the redundant segments from the input level */ @@ -4427,6 +4910,48 @@ static void fts5IndexMergeLevel( if( pnRem ) *pnRem -= writer.nLeafWritten; } +/* +** If this is not a contentless_delete=1 table, or if the 'deletemerge' +** configuration option is set to 0, then this function always returns -1. +** Otherwise, it searches the structure object passed as the second argument +** for a level suitable for merging due to having a large number of +** tombstones in the tombstone hash. If one is found, its index is returned. +** Otherwise, if there is no suitable level, -1. +*/ +static int fts5IndexFindDeleteMerge(Fts5Index *p, Fts5Structure *pStruct){ + Fts5Config *pConfig = p->pConfig; + int iRet = -1; + if( pConfig->bContentlessDelete && pConfig->nDeleteMerge>0 ){ + int ii; + int nBest = 0; + + for(ii=0; iinLevel; ii++){ + Fts5StructureLevel *pLvl = &pStruct->aLevel[ii]; + i64 nEntry = 0; + i64 nTomb = 0; + int iSeg; + for(iSeg=0; iSegnSeg; iSeg++){ + nEntry += pLvl->aSeg[iSeg].nEntry; + nTomb += pLvl->aSeg[iSeg].nEntryTombstone; + } + assert_nc( nEntry>0 || pLvl->nSeg==0 ); + if( nEntry>0 ){ + int nPercent = (nTomb * 100) / nEntry; + if( nPercent>=pConfig->nDeleteMerge && nPercent>nBest ){ + iRet = ii; + nBest = nPercent; + } + } + + /* If pLvl is already the input level to an ongoing merge, look no + ** further for a merge candidate. The caller should be allowed to + ** continue merging from pLvl first. */ + if( pLvl->nMerge ) break; + } + } + return iRet; +} + /* ** Do up to nPg pages of automerge work on the index. ** @@ -4446,14 +4971,15 @@ static int fts5IndexMerge( int iBestLvl = 0; /* Level offering the most input segments */ int nBest = 0; /* Number of input segments on best level */ - /* Set iBestLvl to the level to read input segments from. */ + /* Set iBestLvl to the level to read input segments from. Or to -1 if + ** there is no level suitable to merge segments from. */ assert( pStruct->nLevel>0 ); for(iLvl=0; iLvlnLevel; iLvl++){ Fts5StructureLevel *pLvl = &pStruct->aLevel[iLvl]; if( pLvl->nMerge ){ if( pLvl->nMerge>nBest ){ iBestLvl = iLvl; - nBest = pLvl->nMerge; + nBest = nMin; } break; } @@ -4462,22 +4988,18 @@ static int fts5IndexMerge( iBestLvl = iLvl; } } - - /* If nBest is still 0, then the index must be empty. */ -#ifdef SQLITE_DEBUG - for(iLvl=0; nBest==0 && iLvlnLevel; iLvl++){ - assert( pStruct->aLevel[iLvl].nSeg==0 ); + if( nBestaLevel[iBestLvl].nMerge==0 ){ - break; - } + if( iBestLvl<0 ) break; bRet = 1; fts5IndexMergeLevel(p, &pStruct, iBestLvl, &nRem); if( p->rc==SQLITE_OK && pStruct->aLevel[iBestLvl].nMerge==0 ){ fts5StructurePromote(p, iBestLvl+1, pStruct); } + + if( nMin==1 ) nMin = 2; } *ppStruct = pStruct; return bRet; @@ -4518,16 +5040,16 @@ static void fts5IndexCrisismerge( ){ const int nCrisis = p->pConfig->nCrisisMerge; Fts5Structure *pStruct = *ppStruct; - int iLvl = 0; - - assert( p->rc!=SQLITE_OK || pStruct->nLevel>0 ); - while( p->rc==SQLITE_OK && pStruct->aLevel[iLvl].nSeg>=nCrisis ){ - fts5IndexMergeLevel(p, &pStruct, iLvl, 0); - assert( p->rc!=SQLITE_OK || pStruct->nLevel>(iLvl+1) ); - fts5StructurePromote(p, iLvl+1, pStruct); - iLvl++; + if( pStruct && pStruct->nLevel>0 ){ + int iLvl = 0; + while( p->rc==SQLITE_OK && pStruct->aLevel[iLvl].nSeg>=nCrisis ){ + fts5IndexMergeLevel(p, &pStruct, iLvl, 0); + assert( p->rc!=SQLITE_OK || pStruct->nLevel>(iLvl+1) ); + fts5StructurePromote(p, iLvl+1, pStruct); + iLvl++; + } + *ppStruct = pStruct; } - *ppStruct = pStruct; } static int fts5IndexReturn(Fts5Index *p){ @@ -4536,6 +5058,14 @@ static int fts5IndexReturn(Fts5Index *p){ return rc; } +/* +** Close the read-only blob handle, if it is open. +*/ +void sqlite3Fts5IndexCloseReader(Fts5Index *p){ + fts5IndexCloseReader(p); + fts5IndexReturn(p); +} + typedef struct Fts5FlushCtx Fts5FlushCtx; struct Fts5FlushCtx { Fts5Index *pIdx; @@ -4562,10 +5092,495 @@ static int fts5PoslistPrefix(const u8 *aBuf, int nMax){ } /* -** Flush the contents of in-memory hash table iHash to a new level-0 -** segment on disk. Also update the corresponding structure record. +** Execute the SQL statement: ** -** If an error occurs, set the Fts5Index.rc error code. If an error has +** DELETE FROM %_idx WHERE (segid, (pgno/2)) = ($iSegid, $iPgno); +** +** This is used when a secure-delete operation removes the last term +** from a segment leaf page. In that case the %_idx entry is removed +** too. This is done to ensure that if all instances of a token are +** removed from an fts5 database in secure-delete mode, no trace of +** the token itself remains in the database. +*/ +static void fts5SecureDeleteIdxEntry( + Fts5Index *p, /* FTS5 backend object */ + int iSegid, /* Id of segment to delete entry for */ + int iPgno /* Page number within segment */ +){ + if( iPgno!=1 ){ + assert( p->pConfig->iVersion==FTS5_CURRENT_VERSION_SECUREDELETE ); + if( p->pDeleteFromIdx==0 ){ + fts5IndexPrepareStmt(p, &p->pDeleteFromIdx, sqlite3_mprintf( + "DELETE FROM '%q'.'%q_idx' WHERE (segid, (pgno/2)) = (?1, ?2)", + p->pConfig->zDb, p->pConfig->zName + )); + } + if( p->rc==SQLITE_OK ){ + sqlite3_bind_int(p->pDeleteFromIdx, 1, iSegid); + sqlite3_bind_int(p->pDeleteFromIdx, 2, iPgno); + sqlite3_step(p->pDeleteFromIdx); + p->rc = sqlite3_reset(p->pDeleteFromIdx); + } + } +} + +/* +** This is called when a secure-delete operation removes a position-list +** that overflows onto segment page iPgno of segment pSeg. This function +** rewrites node iPgno, and possibly one or more of its right-hand peers, +** to remove this portion of the position list. +** +** Output variable (*pbLastInDoclist) is set to true if the position-list +** removed is followed by a new term or the end-of-segment, or false if +** it is followed by another rowid/position list. +*/ +static void fts5SecureDeleteOverflow( + Fts5Index *p, + Fts5StructureSegment *pSeg, + int iPgno, + int *pbLastInDoclist +){ + const int bDetailNone = (p->pConfig->eDetail==FTS5_DETAIL_NONE); + int pgno; + Fts5Data *pLeaf = 0; + assert( iPgno!=1 ); + + *pbLastInDoclist = 1; + for(pgno=iPgno; p->rc==SQLITE_OK && pgno<=pSeg->pgnoLast; pgno++){ + i64 iRowid = FTS5_SEGMENT_ROWID(pSeg->iSegid, pgno); + int iNext = 0; + u8 *aPg = 0; + + pLeaf = fts5DataRead(p, iRowid); + if( pLeaf==0 ) break; + aPg = pLeaf->p; + + iNext = fts5GetU16(&aPg[0]); + if( iNext!=0 ){ + *pbLastInDoclist = 0; + } + if( iNext==0 && pLeaf->szLeaf!=pLeaf->nn ){ + fts5GetVarint32(&aPg[pLeaf->szLeaf], iNext); + } + + if( iNext==0 ){ + /* The page contains no terms or rowids. Replace it with an empty + ** page and move on to the right-hand peer. */ + const u8 aEmpty[] = {0x00, 0x00, 0x00, 0x04}; + assert_nc( bDetailNone==0 || pLeaf->nn==4 ); + if( bDetailNone==0 ) fts5DataWrite(p, iRowid, aEmpty, sizeof(aEmpty)); + fts5DataRelease(pLeaf); + pLeaf = 0; + }else if( bDetailNone ){ + break; + }else if( iNext>=pLeaf->szLeaf || pLeaf->nnszLeaf || iNext<4 ){ + FTS5_CORRUPT_ROWID(p, iRowid); + break; + }else{ + int nShift = iNext - 4; + int nPg; + + int nIdx = 0; + u8 *aIdx = 0; + + /* Unless the current page footer is 0 bytes in size (in which case + ** the new page footer will be as well), allocate and populate a + ** buffer containing the new page footer. Set stack variables aIdx + ** and nIdx accordingly. */ + if( pLeaf->nn>pLeaf->szLeaf ){ + int iFirst = 0; + int i1 = pLeaf->szLeaf; + int i2 = 0; + + i1 += fts5GetVarint32(&aPg[i1], iFirst); + if( iFirstrc, (pLeaf->nn-pLeaf->szLeaf)+2); + if( aIdx==0 ) break; + i2 = sqlite3Fts5PutVarint(aIdx, iFirst-nShift); + if( i1nn ){ + memcpy(&aIdx[i2], &aPg[i1], pLeaf->nn-i1); + i2 += (pLeaf->nn-i1); + } + nIdx = i2; + } + + /* Modify the contents of buffer aPg[]. Set nPg to the new size + ** in bytes. The new page is always smaller than the old. */ + nPg = pLeaf->szLeaf - nShift; + memmove(&aPg[4], &aPg[4+nShift], nPg-4); + fts5PutU16(&aPg[2], nPg); + if( fts5GetU16(&aPg[0]) ) fts5PutU16(&aPg[0], 4); + if( nIdx>0 ){ + memcpy(&aPg[nPg], aIdx, nIdx); + nPg += nIdx; + } + sqlite3_free(aIdx); + + /* Write the new page to disk and exit the loop */ + assert( nPg>4 || fts5GetU16(aPg)==0 ); + fts5DataWrite(p, iRowid, aPg, nPg); + break; + } + } + fts5DataRelease(pLeaf); +} + +/* +** Completely remove the entry that pSeg currently points to from +** the database. +*/ +static void fts5DoSecureDelete( + Fts5Index *p, + Fts5SegIter *pSeg +){ + const int bDetailNone = (p->pConfig->eDetail==FTS5_DETAIL_NONE); + int iSegid = pSeg->pSeg->iSegid; + u8 *aPg = pSeg->pLeaf->p; + int nPg = pSeg->pLeaf->nn; + int iPgIdx = pSeg->pLeaf->szLeaf; + + u64 iDelta = 0; + int iNextOff = 0; + int iOff = 0; + int nIdx = 0; + u8 *aIdx = 0; + int bLastInDoclist = 0; + int iIdx = 0; + int iStart = 0; + int iDelKeyOff = 0; /* Offset of deleted key, if any */ + + nIdx = nPg-iPgIdx; + aIdx = sqlite3Fts5MallocZero(&p->rc, ((i64)nIdx)+16); + if( p->rc ) return; + memcpy(aIdx, &aPg[iPgIdx], nIdx); + + /* At this point segment iterator pSeg points to the entry + ** this function should remove from the b-tree segment. + ** + ** In detail=full or detail=column mode, pSeg->iLeafOffset is the + ** offset of the first byte in the position-list for the entry to + ** remove. Immediately before this comes two varints that will also + ** need to be removed: + ** + ** + the rowid or delta rowid value for the entry, and + ** + the size of the position list in bytes. + ** + ** Or, in detail=none mode, there is a single varint prior to + ** pSeg->iLeafOffset - the rowid or delta rowid value. + ** + ** This block sets the following variables: + ** + ** iStart: + ** The offset of the first byte of the rowid or delta-rowid + ** value for the doclist entry being removed. + ** + ** iDelta: + ** The value of the rowid or delta-rowid value for the doclist + ** entry being removed. + ** + ** iNextOff: + ** The offset of the next entry following the position list + ** for the one being removed. If the position list for this + ** entry overflows onto the next leaf page, this value will be + ** greater than pLeaf->szLeaf. + */ + { + int iSOP; /* Start-Of-Position-list */ + if( pSeg->iLeafPgno==pSeg->iTermLeafPgno ){ + iStart = pSeg->iTermLeafOffset; + }else{ + iStart = fts5GetU16(&aPg[0]); + } + + iSOP = iStart + fts5GetVarint(&aPg[iStart], &iDelta); + assert_nc( iSOP<=pSeg->iLeafOffset ); + + if( bDetailNone ){ + while( iSOPiLeafOffset ){ + if( aPg[iSOP]==0x00 ) iSOP++; + if( aPg[iSOP]==0x00 ) iSOP++; + iStart = iSOP; + iSOP = iStart + fts5GetVarint(&aPg[iStart], &iDelta); + } + + iNextOff = iSOP; + if( iNextOffiEndofDoclist && aPg[iNextOff]==0x00 ) iNextOff++; + if( iNextOffiEndofDoclist && aPg[iNextOff]==0x00 ) iNextOff++; + + }else{ + int nPos = 0; + iSOP += fts5GetVarint32(&aPg[iSOP], nPos); + while( iSOPiLeafOffset ){ + iStart = iSOP + (nPos/2); + iSOP = iStart + fts5GetVarint(&aPg[iStart], &iDelta); + iSOP += fts5GetVarint32(&aPg[iSOP], nPos); + } + assert_nc( iSOP==pSeg->iLeafOffset ); + iNextOff = pSeg->iLeafOffset + pSeg->nPos; + } + } + + iOff = iStart; + + /* If the position-list for the entry being removed flows over past + ** the end of this page, delete the portion of the position-list on the + ** next page and beyond. + ** + ** Set variable bLastInDoclist to true if this entry happens + ** to be the last rowid in the doclist for its term. */ + if( iNextOff>=iPgIdx ){ + int pgno = pSeg->iLeafPgno+1; + fts5SecureDeleteOverflow(p, pSeg->pSeg, pgno, &bLastInDoclist); + iNextOff = iPgIdx; + } + + if( pSeg->bDel==0 ){ + if( iNextOff!=iPgIdx ){ + /* Loop through the page-footer. If iNextOff (offset of the + ** entry following the one we are removing) is equal to the + ** offset of a key on this page, then the entry is the last + ** in its doclist. */ + int iKeyOff = 0; + for(iIdx=0; iIdxbDel ){ + iOff += sqlite3Fts5PutVarint(&aPg[iOff], iDelta); + aPg[iOff++] = 0x01; + }else if( bLastInDoclist==0 ){ + if( iNextOff!=iPgIdx ){ + u64 iNextDelta = 0; + iNextOff += fts5GetVarint(&aPg[iNextOff], &iNextDelta); + iOff += sqlite3Fts5PutVarint(&aPg[iOff], iDelta + iNextDelta); + } + }else if( + pSeg->iLeafPgno==pSeg->iTermLeafPgno + && iStart==pSeg->iTermLeafOffset + ){ + /* The entry being removed was the only position list in its + ** doclist. Therefore the term needs to be removed as well. */ + int iKey = 0; + int iKeyOff = 0; + + /* Set iKeyOff to the offset of the term that will be removed - the + ** last offset in the footer that is not greater than iStart. */ + for(iIdx=0; iIdx(u32)iStart ) break; + iKeyOff += iVal; + } + assert_nc( iKey>=1 ); + + /* Set iDelKeyOff to the value of the footer entry to remove from + ** the page. */ + iDelKeyOff = iOff = iKeyOff; + + if( iNextOff!=iPgIdx ){ + /* This is the only position-list associated with the term, and there + ** is another term following it on this page. So the subsequent term + ** needs to be moved to replace the term associated with the entry + ** being removed. */ + int nPrefix = 0; + int nSuffix = 0; + int nPrefix2 = 0; + int nSuffix2 = 0; + + iDelKeyOff = iNextOff; + iNextOff += fts5GetVarint32(&aPg[iNextOff], nPrefix2); + iNextOff += fts5GetVarint32(&aPg[iNextOff], nSuffix2); + + if( iKey!=1 ){ + iKeyOff += fts5GetVarint32(&aPg[iKeyOff], nPrefix); + } + iKeyOff += fts5GetVarint32(&aPg[iKeyOff], nSuffix); + + nPrefix = MIN(nPrefix, nPrefix2); + nSuffix = (nPrefix2 + nSuffix2) - nPrefix; + + if( (iKeyOff+nSuffix)>iPgIdx || (iNextOff+nSuffix2)>iPgIdx ){ + FTS5_CORRUPT_IDX(p); + }else{ + if( iKey!=1 ){ + iOff += sqlite3Fts5PutVarint(&aPg[iOff], nPrefix); + } + iOff += sqlite3Fts5PutVarint(&aPg[iOff], nSuffix); + if( nPrefix2>pSeg->term.n ){ + FTS5_CORRUPT_IDX(p); + }else if( nPrefix2>nPrefix ){ + memcpy(&aPg[iOff], &pSeg->term.p[nPrefix], nPrefix2-nPrefix); + iOff += (nPrefix2-nPrefix); + } + memmove(&aPg[iOff], &aPg[iNextOff], nSuffix2); + iOff += nSuffix2; + iNextOff += nSuffix2; + } + } + }else if( iStart==4 ){ + int iPgno; + + assert_nc( pSeg->iLeafPgno>pSeg->iTermLeafPgno ); + /* The entry being removed may be the only position list in + ** its doclist. */ + for(iPgno=pSeg->iLeafPgno-1; iPgno>pSeg->iTermLeafPgno; iPgno-- ){ + Fts5Data *pPg = fts5DataRead(p, FTS5_SEGMENT_ROWID(iSegid, iPgno)); + int bEmpty = (pPg && pPg->nn==4); + fts5DataRelease(pPg); + if( bEmpty==0 ) break; + } + + if( iPgno==pSeg->iTermLeafPgno ){ + i64 iId = FTS5_SEGMENT_ROWID(iSegid, pSeg->iTermLeafPgno); + Fts5Data *pTerm = fts5DataRead(p, iId); + if( pTerm && pTerm->szLeaf==pSeg->iTermLeafOffset ){ + u8 *aTermIdx = &pTerm->p[pTerm->szLeaf]; + int nTermIdx = pTerm->nn - pTerm->szLeaf; + int iTermIdx = 0; + int iTermOff = 0; + + while( 1 ){ + u32 iVal = 0; + int nByte = fts5GetVarint32(&aTermIdx[iTermIdx], iVal); + iTermOff += iVal; + if( (iTermIdx+nByte)>=nTermIdx ) break; + iTermIdx += nByte; + } + nTermIdx = iTermIdx; + + memmove(&pTerm->p[iTermOff], &pTerm->p[pTerm->szLeaf], nTermIdx); + fts5PutU16(&pTerm->p[2], iTermOff); + + fts5DataWrite(p, iId, pTerm->p, iTermOff+nTermIdx); + if( nTermIdx==0 ){ + fts5SecureDeleteIdxEntry(p, iSegid, pSeg->iTermLeafPgno); + } + } + fts5DataRelease(pTerm); + } + } + + /* Assuming no error has occurred, this block does final edits to the + ** leaf page before writing it back to disk. Input variables are: + ** + ** nPg: Total initial size of leaf page. + ** iPgIdx: Initial offset of page footer. + ** + ** iOff: Offset to move data to + ** iNextOff: Offset to move data from + */ + if( p->rc==SQLITE_OK ){ + const int nMove = nPg - iNextOff; /* Number of bytes to move */ + int nShift = iNextOff - iOff; /* Distance to move them */ + + int iPrevKeyOut = 0; + int iKeyIn = 0; + + memmove(&aPg[iOff], &aPg[iNextOff], nMove); + iPgIdx -= nShift; + nPg = iPgIdx; + fts5PutU16(&aPg[2], iPgIdx); + + for(iIdx=0; iIdxiOff ? nShift : 0)); + nPg += sqlite3Fts5PutVarint(&aPg[nPg], iKeyOut - iPrevKeyOut); + iPrevKeyOut = iKeyOut; + } + } + + if( iPgIdx==nPg && nIdx>0 && pSeg->iLeafPgno!=1 ){ + fts5SecureDeleteIdxEntry(p, iSegid, pSeg->iLeafPgno); + } + + assert_nc( nPg>4 || fts5GetU16(aPg)==0 ); + fts5DataWrite(p, FTS5_SEGMENT_ROWID(iSegid,pSeg->iLeafPgno), aPg, nPg); + } + sqlite3_free(aIdx); +} + +/* +** This is called as part of flushing a delete to disk in 'secure-delete' +** mode. It edits the segments within the database described by argument +** pStruct to remove the entries for term zTerm, rowid iRowid. +** +** Return SQLITE_OK if successful, or an SQLite error code if an error +** has occurred. Any error code is also stored in the Fts5Index handle. +*/ +static int fts5FlushSecureDelete( + Fts5Index *p, + Fts5Structure *pStruct, + const char *zTerm, + int nTerm, + i64 iRowid +){ + const int f = FTS5INDEX_QUERY_SKIPHASH; + Fts5Iter *pIter = 0; /* Used to find term instance */ + + /* If the version number has not been set to SECUREDELETE, do so now. */ + if( p->pConfig->iVersion!=FTS5_CURRENT_VERSION_SECUREDELETE ){ + Fts5Config *pConfig = p->pConfig; + sqlite3_stmt *pStmt = 0; + fts5IndexPrepareStmt(p, &pStmt, sqlite3_mprintf( + "REPLACE INTO %Q.'%q_config' VALUES ('version', %d)", + pConfig->zDb, pConfig->zName, FTS5_CURRENT_VERSION_SECUREDELETE + )); + if( p->rc==SQLITE_OK ){ + int rc; + sqlite3_step(pStmt); + rc = sqlite3_finalize(pStmt); + if( p->rc==SQLITE_OK ) p->rc = rc; + pConfig->iCookie++; + pConfig->iVersion = FTS5_CURRENT_VERSION_SECUREDELETE; + } + } + + fts5MultiIterNew(p, pStruct, f, 0, (const u8*)zTerm, nTerm, -1, 0, &pIter); + if( fts5MultiIterEof(p, pIter)==0 ){ + i64 iThis = fts5MultiIterRowid(pIter); + if( iThisrc==SQLITE_OK + && fts5MultiIterEof(p, pIter)==0 + && iRowid==fts5MultiIterRowid(pIter) + ){ + Fts5SegIter *pSeg = &pIter->aSeg[pIter->aFirst[1].iFirst]; + fts5DoSecureDelete(p, pSeg); + } + } + + fts5MultiIterFree(pIter); + return p->rc; +} + + +/* +** Flush the contents of in-memory hash table iHash to a new level-0 +** segment on disk. Also update the corresponding structure record. +** +** If an error occurs, set the Fts5Index.rc error code. If an error has ** already occurred, this function is a no-op. */ static void fts5FlushOneHash(Fts5Index *p){ @@ -4577,143 +5592,199 @@ static void fts5FlushOneHash(Fts5Index *p){ /* Obtain a reference to the index structure and allocate a new segment-id ** for the new level-0 segment. */ pStruct = fts5StructureRead(p); - iSegid = fts5AllocateSegid(p, pStruct); fts5StructureInvalidate(p); - if( iSegid ){ - const int pgsz = p->pConfig->pgsz; - int eDetail = p->pConfig->eDetail; - Fts5StructureSegment *pSeg; /* New segment within pStruct */ - Fts5Buffer *pBuf; /* Buffer in which to assemble leaf page */ - Fts5Buffer *pPgidx; /* Buffer in which to assemble pgidx */ - - Fts5SegWriter writer; - fts5WriteInit(p, &writer, iSegid); - - pBuf = &writer.writer.buf; - pPgidx = &writer.writer.pgidx; - - /* fts5WriteInit() should have initialized the buffers to (most likely) - ** the maximum space required. */ - assert( p->rc || pBuf->nSpace>=(pgsz + FTS5_DATA_PADDING) ); - assert( p->rc || pPgidx->nSpace>=(pgsz + FTS5_DATA_PADDING) ); - - /* Begin scanning through hash table entries. This loop runs once for each - ** term/doclist currently stored within the hash table. */ - if( p->rc==SQLITE_OK ){ - p->rc = sqlite3Fts5HashScanInit(pHash, 0, 0); - } - while( p->rc==SQLITE_OK && 0==sqlite3Fts5HashScanEof(pHash) ){ - const char *zTerm; /* Buffer containing term */ - const u8 *pDoclist; /* Pointer to doclist for this term */ - int nDoclist; /* Size of doclist in bytes */ - - /* Write the term for this entry to disk. */ - sqlite3Fts5HashScanEntry(pHash, &zTerm, &pDoclist, &nDoclist); - fts5WriteAppendTerm(p, &writer, (int)strlen(zTerm), (const u8*)zTerm); - if( p->rc!=SQLITE_OK ) break; - - assert( writer.bFirstRowidInPage==0 ); - if( pgsz>=(pBuf->n + pPgidx->n + nDoclist + 1) ){ - /* The entire doclist will fit on the current leaf. */ - fts5BufferSafeAppendBlob(pBuf, pDoclist, nDoclist); - }else{ - i64 iRowid = 0; - u64 iDelta = 0; - int iOff = 0; - - /* The entire doclist will not fit on this leaf. The following - ** loop iterates through the poslists that make up the current - ** doclist. */ - while( p->rc==SQLITE_OK && iOffp[0], (u16)pBuf->n); /* first rowid on page */ - pBuf->n += sqlite3Fts5PutVarint(&pBuf->p[pBuf->n], iRowid); - writer.bFirstRowidInPage = 0; - fts5WriteDlidxAppend(p, &writer, iRowid); + if( sqlite3Fts5HashIsEmpty(pHash)==0 ){ + iSegid = fts5AllocateSegid(p, pStruct); + if( iSegid ){ + const int pgsz = p->pConfig->pgsz; + int eDetail = p->pConfig->eDetail; + int bSecureDelete = p->pConfig->bSecureDelete; + Fts5StructureSegment *pSeg; /* New segment within pStruct */ + Fts5Buffer *pBuf; /* Buffer in which to assemble leaf page */ + Fts5Buffer *pPgidx; /* Buffer in which to assemble pgidx */ + + Fts5SegWriter writer; + fts5WriteInit(p, &writer, iSegid); + + pBuf = &writer.writer.buf; + pPgidx = &writer.writer.pgidx; + + /* fts5WriteInit() should have initialized the buffers to (most likely) + ** the maximum space required. */ + assert( p->rc || pBuf->nSpace>=(pgsz + FTS5_DATA_PADDING) ); + assert( p->rc || pPgidx->nSpace>=(pgsz + FTS5_DATA_PADDING) ); + + /* Begin scanning through hash table entries. This loop runs once for each + ** term/doclist currently stored within the hash table. */ + if( p->rc==SQLITE_OK ){ + p->rc = sqlite3Fts5HashScanInit(pHash, 0, 0); + } + while( p->rc==SQLITE_OK && 0==sqlite3Fts5HashScanEof(pHash) ){ + const char *zTerm; /* Buffer containing term */ + int nTerm; /* Size of zTerm in bytes */ + const u8 *pDoclist; /* Pointer to doclist for this term */ + int nDoclist; /* Size of doclist in bytes */ + + /* Get the term and doclist for this entry. */ + sqlite3Fts5HashScanEntry(pHash, &zTerm, &nTerm, &pDoclist, &nDoclist); + if( bSecureDelete==0 ){ + fts5WriteAppendTerm(p, &writer, nTerm, (const u8*)zTerm); + if( p->rc!=SQLITE_OK ) break; + assert( writer.bFirstRowidInPage==0 ); + } + + if( !bSecureDelete && pgsz>=(pBuf->n + pPgidx->n + nDoclist + 1) ){ + /* The entire doclist will fit on the current leaf. */ + fts5BufferSafeAppendBlob(pBuf, pDoclist, nDoclist); + }else{ + int bTermWritten = !bSecureDelete; + i64 iRowid = 0; + i64 iPrev = 0; + int iOff = 0; + + /* The entire doclist will not fit on this leaf. The following + ** loop iterates through the poslists that make up the current + ** doclist. */ + while( p->rc==SQLITE_OK && iOffrc!=SQLITE_OK || pDoclist[iOff]==0x01 ){ + iOff++; + continue; + } + } + } + + if( p->rc==SQLITE_OK && bTermWritten==0 ){ + fts5WriteAppendTerm(p, &writer, nTerm, (const u8*)zTerm); + bTermWritten = 1; + assert( p->rc!=SQLITE_OK || writer.bFirstRowidInPage==0 ); + } + + if( writer.bFirstRowidInPage ){ + fts5PutU16(&pBuf->p[0], (u16)pBuf->n); /* first rowid on page */ + pBuf->n += sqlite3Fts5PutVarint(&pBuf->p[pBuf->n], iRowid); + writer.bFirstRowidInPage = 0; + fts5WriteDlidxAppend(p, &writer, iRowid); + }else{ + u64 iRowidDelta = (u64)iRowid - (u64)iPrev; + pBuf->n += sqlite3Fts5PutVarint(&pBuf->p[pBuf->n], iRowidDelta); + } if( p->rc!=SQLITE_OK ) break; - }else{ - pBuf->n += sqlite3Fts5PutVarint(&pBuf->p[pBuf->n], iDelta); - } - assert( pBuf->n<=pBuf->nSpace ); - - if( eDetail==FTS5_DETAIL_NONE ){ - if( iOffp[pBuf->n++] = 0; - iOff++; + assert( pBuf->n<=pBuf->nSpace ); + iPrev = iRowid; + + if( eDetail==FTS5_DETAIL_NONE ){ if( iOffp[pBuf->n++] = 0; iOff++; + if( iOffp[pBuf->n++] = 0; + iOff++; + } + } + if( (pBuf->n + pPgidx->n)>=pgsz ){ + fts5WriteFlushLeaf(p, &writer); } - } - if( (pBuf->n + pPgidx->n)>=pgsz ){ - fts5WriteFlushLeaf(p, &writer); - } - }else{ - int bDummy; - int nPos; - int nCopy = fts5GetPoslistSize(&pDoclist[iOff], &nPos, &bDummy); - nCopy += nPos; - if( (pBuf->n + pPgidx->n + nCopy) <= pgsz ){ - /* The entire poslist will fit on the current leaf. So copy - ** it in one go. */ - fts5BufferSafeAppendBlob(pBuf, &pDoclist[iOff], nCopy); }else{ - /* The entire poslist will not fit on this leaf. So it needs - ** to be broken into sections. The only qualification being - ** that each varint must be stored contiguously. */ - const u8 *pPoslist = &pDoclist[iOff]; - int iPos = 0; - while( p->rc==SQLITE_OK ){ - int nSpace = pgsz - pBuf->n - pPgidx->n; - int n = 0; - if( (nCopy - iPos)<=nSpace ){ - n = nCopy - iPos; - }else{ - n = fts5PoslistPrefix(&pPoslist[iPos], nSpace); - } - assert( n>0 ); - fts5BufferSafeAppendBlob(pBuf, &pPoslist[iPos], n); - iPos += n; - if( (pBuf->n + pPgidx->n)>=pgsz ){ - fts5WriteFlushLeaf(p, &writer); + int bDel = 0; + int nPos = 0; + int nCopy = fts5GetPoslistSize(&pDoclist[iOff], &nPos, &bDel); + if( bDel && bSecureDelete ){ + fts5BufferAppendVarint(&p->rc, pBuf, nPos*2); + iOff += nCopy; + nCopy = nPos; + }else{ + nCopy += nPos; + } + if( (pBuf->n + pPgidx->n + nCopy) <= pgsz ){ + /* The entire poslist will fit on the current leaf. So copy + ** it in one go. */ + fts5BufferSafeAppendBlob(pBuf, &pDoclist[iOff], nCopy); + }else{ + /* The entire poslist will not fit on this leaf. So it needs + ** to be broken into sections. The only qualification being + ** that each varint must be stored contiguously. */ + const u8 *pPoslist = &pDoclist[iOff]; + int iPos = 0; + while( p->rc==SQLITE_OK ){ + int nSpace = pgsz - pBuf->n - pPgidx->n; + int n = 0; + if( (nCopy - iPos)<=nSpace ){ + n = nCopy - iPos; + }else{ + n = fts5PoslistPrefix(&pPoslist[iPos], nSpace); + } + assert( n>0 ); + fts5BufferSafeAppendBlob(pBuf, &pPoslist[iPos], n); + iPos += n; + if( (pBuf->n + pPgidx->n)>=pgsz ){ + fts5WriteFlushLeaf(p, &writer); + } + if( iPos>=nCopy ) break; } - if( iPos>=nCopy ) break; } + iOff += nCopy; } - iOff += nCopy; } } + + /* TODO2: Doclist terminator written here. */ + /* pBuf->p[pBuf->n++] = '\0'; */ + assert( pBuf->n<=pBuf->nSpace ); + if( p->rc==SQLITE_OK ) sqlite3Fts5HashScanNext(pHash); + } + fts5WriteFinish(p, &writer, &pgnoLast); + + assert( p->rc!=SQLITE_OK || bSecureDelete || pgnoLast>0 ); + if( pgnoLast>0 ){ + /* Update the Fts5Structure. It is written back to the database by the + ** fts5StructureRelease() call below. */ + if( pStruct->nLevel==0 ){ + fts5StructureAddLevel(&p->rc, &pStruct); + } + fts5StructureExtendLevel(&p->rc, pStruct, 0, 1, 0); + if( p->rc==SQLITE_OK ){ + pSeg = &pStruct->aLevel[0].aSeg[ pStruct->aLevel[0].nSeg++ ]; + pSeg->iSegid = iSegid; + pSeg->pgnoFirst = 1; + pSeg->pgnoLast = pgnoLast; + if( pStruct->nOriginCntr>0 ){ + pSeg->iOrigin1 = pStruct->nOriginCntr; + pSeg->iOrigin2 = pStruct->nOriginCntr; + pSeg->nEntry = p->nPendingRow; + pStruct->nOriginCntr++; + } + pStruct->nSegment++; + } + fts5StructurePromote(p, 0, pStruct); } - - /* TODO2: Doclist terminator written here. */ - /* pBuf->p[pBuf->n++] = '\0'; */ - assert( pBuf->n<=pBuf->nSpace ); - if( p->rc==SQLITE_OK ) sqlite3Fts5HashScanNext(pHash); - } - sqlite3Fts5HashClear(pHash); - fts5WriteFinish(p, &writer, &pgnoLast); - - /* Update the Fts5Structure. It is written back to the database by the - ** fts5StructureRelease() call below. */ - if( pStruct->nLevel==0 ){ - fts5StructureAddLevel(&p->rc, &pStruct); - } - fts5StructureExtendLevel(&p->rc, pStruct, 0, 1, 0); - if( p->rc==SQLITE_OK ){ - pSeg = &pStruct->aLevel[0].aSeg[ pStruct->aLevel[0].nSeg++ ]; - pSeg->iSegid = iSegid; - pSeg->pgnoFirst = 1; - pSeg->pgnoLast = pgnoLast; - pStruct->nSegment++; } - fts5StructurePromote(p, 0, pStruct); } - fts5IndexAutomerge(p, &pStruct, pgnoLast); + fts5IndexAutomerge(p, &pStruct, pgnoLast + p->nContentlessDelete); fts5IndexCrisismerge(p, &pStruct); fts5StructureWrite(p, pStruct); fts5StructureRelease(pStruct); @@ -4724,10 +5795,21 @@ static void fts5FlushOneHash(Fts5Index *p){ */ static void fts5IndexFlush(Fts5Index *p){ /* Unless it is empty, flush the hash table to disk */ - if( p->nPendingData ){ + if( p->flushRc ){ + p->rc = p->flushRc; + return; + } + if( p->nPendingData || p->nContentlessDelete ){ assert( p->pHash ); - p->nPendingData = 0; fts5FlushOneHash(p); + if( p->rc==SQLITE_OK ){ + sqlite3Fts5HashClear(p->pHash); + p->nPendingData = 0; + p->nPendingRow = 0; + p->nContentlessDelete = 0; + }else if( p->nPendingData || p->nContentlessDelete ){ + p->flushRc = p->rc; + } } } @@ -4736,40 +5818,47 @@ static Fts5Structure *fts5IndexOptimizeStruct( Fts5Structure *pStruct ){ Fts5Structure *pNew = 0; - sqlite3_int64 nByte = sizeof(Fts5Structure); + sqlite3_int64 nByte = SZ_FTS5STRUCTURE(1); int nSeg = pStruct->nSegment; int i; /* Figure out if this structure requires optimization. A structure does ** not require optimization if either: ** - ** + it consists of fewer than two segments, or - ** + all segments are on the same level, or - ** + all segments except one are currently inputs to a merge operation. + ** 1. it consists of fewer than two segments, or + ** 2. all segments are on the same level, or + ** 3. all segments except one are currently inputs to a merge operation. ** - ** In the first case, return NULL. In the second, increment the ref-count - ** on *pStruct and return a copy of the pointer to it. + ** In the first case, if there are no tombstone hash pages, return NULL. In + ** the second, increment the ref-count on *pStruct and return a copy of the + ** pointer to it. */ - if( nSeg<2 ) return 0; + if( nSeg==0 ) return 0; for(i=0; inLevel; i++){ int nThis = pStruct->aLevel[i].nSeg; - if( nThis==nSeg || (nThis==nSeg-1 && pStruct->aLevel[i].nMerge==nThis) ){ + int nMerge = pStruct->aLevel[i].nMerge; + if( nThis>0 && (nThis==nSeg || (nThis==nSeg-1 && nMerge==nThis)) ){ + if( nSeg==1 && nThis==1 && pStruct->aLevel[i].aSeg[0].nPgTombstone==0 ){ + return 0; + } fts5StructureRef(pStruct); return pStruct; } assert( pStruct->aLevel[i].nMerge<=nThis ); } - nByte += (pStruct->nLevel+1) * sizeof(Fts5StructureLevel); + nByte += (((i64)pStruct->nLevel)+1) * sizeof(Fts5StructureLevel); + assert( nByte==(i64)SZ_FTS5STRUCTURE(pStruct->nLevel+2) ); pNew = (Fts5Structure*)sqlite3Fts5MallocZero(&p->rc, nByte); if( pNew ){ Fts5StructureLevel *pLvl; nByte = nSeg * sizeof(Fts5StructureSegment); - pNew->nLevel = pStruct->nLevel+1; + pNew->nLevel = MIN(pStruct->nLevel+1, FTS5_MAX_LEVEL); pNew->nRef = 1; pNew->nWriteCounter = pStruct->nWriteCounter; - pLvl = &pNew->aLevel[pStruct->nLevel]; + pNew->nOriginCntr = pStruct->nOriginCntr; + pLvl = &pNew->aLevel[pNew->nLevel-1]; pLvl->aSeg = (Fts5StructureSegment*)sqlite3Fts5MallocZero(&p->rc, nByte); if( pLvl->aSeg ){ int iLvl, iSeg; @@ -4799,7 +5888,9 @@ int sqlite3Fts5IndexOptimize(Fts5Index *p){ assert( p->rc==SQLITE_OK ); fts5IndexFlush(p); + assert( p->rc!=SQLITE_OK || p->nContentlessDelete==0 ); pStruct = fts5StructureRead(p); + assert( p->rc!=SQLITE_OK || pStruct!=0 ); fts5StructureInvalidate(p); if( pStruct ){ @@ -4828,7 +5919,10 @@ int sqlite3Fts5IndexOptimize(Fts5Index *p){ ** INSERT command. */ int sqlite3Fts5IndexMerge(Fts5Index *p, int nMerge){ - Fts5Structure *pStruct = fts5StructureRead(p); + Fts5Structure *pStruct = 0; + + fts5IndexFlush(p); + pStruct = fts5StructureRead(p); if( pStruct ){ int nMin = p->pConfig->nUsermerge; fts5StructureInvalidate(p); @@ -4836,7 +5930,7 @@ int sqlite3Fts5IndexMerge(Fts5Index *p, int nMerge){ Fts5Structure *pNew = fts5IndexOptimizeStruct(p, pStruct); fts5StructureRelease(pStruct); pStruct = pNew; - nMin = 2; + nMin = 1; nMerge = nMerge*-1; } if( pStruct && pStruct->nLevel ){ @@ -4851,7 +5945,7 @@ int sqlite3Fts5IndexMerge(Fts5Index *p, int nMerge){ static void fts5AppendRowid( Fts5Index *p, - i64 iDelta, + u64 iDelta, Fts5Iter *pUnused, Fts5Buffer *pBuf ){ @@ -4861,7 +5955,7 @@ static void fts5AppendRowid( static void fts5AppendPoslist( Fts5Index *p, - i64 iDelta, + u64 iDelta, Fts5Iter *pMulti, Fts5Buffer *pBuf ){ @@ -4936,10 +6030,10 @@ static void fts5MergeAppendDocid( } #endif -#define fts5MergeAppendDocid(pBuf, iLastRowid, iRowid) { \ - assert( (pBuf)->n!=0 || (iLastRowid)==0 ); \ - fts5BufferSafeAppendVarint((pBuf), (iRowid) - (iLastRowid)); \ - (iLastRowid) = (iRowid); \ +#define fts5MergeAppendDocid(pBuf, iLastRowid, iRowid) { \ + assert( (pBuf)->n!=0 || (iLastRowid)==0 ); \ + fts5BufferSafeAppendVarint((pBuf), (u64)(iRowid) - (u64)(iLastRowid)); \ + (iLastRowid) = (iRowid); \ } /* @@ -5071,7 +6165,7 @@ static void fts5MergePrefixLists( /* Initialize a doclist-iterator for each input buffer. Arrange them in ** a linked-list starting at pHead in ascending order of rowid. Avoid ** linking any iterators already at EOF into the linked list at all. */ - assert( nBuf+1<=sizeof(aMerger)/sizeof(aMerger[0]) ); + assert( nBuf+1<=(int)(sizeof(aMerger)/sizeof(aMerger[0])) ); memset(aMerger, 0, sizeof(PrefixMerger)*(nBuf+1)); pHead = &aMerger[nBuf]; fts5DoclistIterInit(p1, &pHead->iter); @@ -5123,7 +6217,7 @@ static void fts5MergePrefixLists( } if( pHead==0 || pHead->pNext==0 ){ - p->rc = FTS5_CORRUPT; + FTS5_CORRUPT_IDX(p); break; } @@ -5160,7 +6254,7 @@ static void fts5MergePrefixLists( assert_nc( tmp.n+nTail<=nTmp ); assert( tmp.n+nTail<=nTmp+nMerge*10 ); if( tmp.n+nTail>nTmp-FTS5_DATA_ZERO_PADDING ){ - if( p->rc==SQLITE_OK ) p->rc = FTS5_CORRUPT; + if( p->rc==SQLITE_OK ) FTS5_CORRUPT_IDX(p); break; } fts5BufferSafeAppendVarint(&out, (tmp.n+nTail) * 2); @@ -5195,142 +6289,485 @@ static void fts5MergePrefixLists( *p1 = out; } -static void fts5SetupPrefixIter( - Fts5Index *p, /* Index to read from */ - int bDesc, /* True for "ORDER BY rowid DESC" */ - int iIdx, /* Index to scan for data */ - u8 *pToken, /* Buffer containing prefix to match */ + +/* +** Iterate through a range of entries in the FTS index, invoking the xVisit +** callback for each of them. +** +** Parameter pToken points to an nToken buffer containing an FTS index term +** (i.e. a document term with the preceding 1 byte index identifier - +** FTS5_MAIN_PREFIX or similar). If bPrefix is true, then the call visits +** all entries for terms that have pToken/nToken as a prefix. If bPrefix +** is false, then only entries with pToken/nToken as the entire key are +** visited. +** +** If the current table is a tokendata=1 table, then if bPrefix is true then +** each index term is treated separately. However, if bPrefix is false, then +** all index terms corresponding to pToken/nToken are collapsed into a single +** term before the callback is invoked. +** +** The callback invoked for each entry visited is specified by paramter xVisit. +** Each time it is invoked, it is passed a pointer to the Fts5Index object, +** a copy of the 7th paramter to this function (pCtx) and a pointer to the +** iterator that indicates the current entry. If the current entry is the +** first with a new term (i.e. different from that of the previous entry, +** including the very first term), then the final two parameters are passed +** a pointer to the term and its size in bytes, respectively. If the current +** entry is not the first associated with its term, these two parameters +** are passed 0. +** +** If parameter pColset is not NULL, then it is used to filter entries before +** the callback is invoked. +*/ +static int fts5VisitEntries( + Fts5Index *p, /* Fts5 index object */ + Fts5Colset *pColset, /* Columns filter to apply, or NULL */ + u8 *pToken, /* Buffer containing token */ int nToken, /* Size of buffer pToken in bytes */ - Fts5Colset *pColset, /* Restrict matches to these columns */ - Fts5Iter **ppIter /* OUT: New iterator */ + int bPrefix, /* True for a prefix scan */ + void (*xVisit)(Fts5Index*, void *pCtx, Fts5Iter *pIter, const u8*, int), + void *pCtx /* Passed as second argument to xVisit() */ ){ - Fts5Structure *pStruct; - Fts5Buffer *aBuf; - int nBuf = 32; - int nMerge = 1; - - void (*xMerge)(Fts5Index*, Fts5Buffer*, int, Fts5Buffer*); - void (*xAppend)(Fts5Index*, i64, Fts5Iter*, Fts5Buffer*); - if( p->pConfig->eDetail==FTS5_DETAIL_NONE ){ - xMerge = fts5MergeRowidLists; - xAppend = fts5AppendRowid; - }else{ - nMerge = FTS5_MERGE_NLIST-1; - nBuf = nMerge*8; /* Sufficient to merge (16^8)==(2^32) lists */ - xMerge = fts5MergePrefixLists; - xAppend = fts5AppendPoslist; - } - - aBuf = (Fts5Buffer*)fts5IdxMalloc(p, sizeof(Fts5Buffer)*nBuf); - pStruct = fts5StructureRead(p); - - if( aBuf && pStruct ){ - const int flags = FTS5INDEX_QUERY_SCAN - | FTS5INDEX_QUERY_SKIPEMPTY - | FTS5INDEX_QUERY_NOOUTPUT; - int i; - i64 iLastRowid = 0; - Fts5Iter *p1 = 0; /* Iterator used to gather data from index */ - Fts5Data *pData; - Fts5Buffer doclist; - int bNewTerm = 1; - - memset(&doclist, 0, sizeof(doclist)); - if( iIdx!=0 ){ - int dummy = 0; - const int f2 = FTS5INDEX_QUERY_SKIPEMPTY|FTS5INDEX_QUERY_NOOUTPUT; - pToken[0] = FTS5_MAIN_PREFIX; - fts5MultiIterNew(p, pStruct, f2, pColset, pToken, nToken, -1, 0, &p1); - fts5IterSetOutputCb(&p->rc, p1); - for(; - fts5MultiIterEof(p, p1)==0; - fts5MultiIterNext2(p, p1, &dummy) - ){ - Fts5SegIter *pSeg = &p1->aSeg[ p1->aFirst[1].iFirst ]; - p1->xSetOutputs(p1, pSeg); - if( p1->base.nData ){ - xAppend(p, p1->base.iRowid-iLastRowid, p1, &doclist); - iLastRowid = p1->base.iRowid; - } - } - fts5MultiIterFree(p1); - } - - pToken[0] = FTS5_MAIN_PREFIX + iIdx; - fts5MultiIterNew(p, pStruct, flags, pColset, pToken, nToken, -1, 0, &p1); - fts5IterSetOutputCb(&p->rc, p1); - for( /* no-op */ ; - fts5MultiIterEof(p, p1)==0; - fts5MultiIterNext2(p, p1, &bNewTerm) - ){ - Fts5SegIter *pSeg = &p1->aSeg[ p1->aFirst[1].iFirst ]; - int nTerm = pSeg->term.n; - const u8 *pTerm = pSeg->term.p; - p1->xSetOutputs(p1, pSeg); - - assert_nc( memcmp(pToken, pTerm, MIN(nToken, nTerm))<=0 ); - if( bNewTerm ){ - if( nTermbase.nData==0 ) continue; + const int flags = (bPrefix ? FTS5INDEX_QUERY_SCAN : 0) + | FTS5INDEX_QUERY_SKIPEMPTY + | FTS5INDEX_QUERY_NOOUTPUT; + Fts5Iter *p1 = 0; /* Iterator used to gather data from index */ + int bNewTerm = 1; + Fts5Structure *pStruct = fts5StructureRead(p); - if( p1->base.iRowid<=iLastRowid && doclist.n>0 ){ - for(i=0; p->rc==SQLITE_OK && doclist.n; i++){ - int i1 = i*nMerge; - int iStore; - assert( i1+nMerge<=nBuf ); - for(iStore=i1; iStorerc, p1); + for( /* no-op */ ; + fts5MultiIterEof(p, p1)==0; + fts5MultiIterNext2(p, p1, &bNewTerm) + ){ + Fts5SegIter *pSeg = &p1->aSeg[ p1->aFirst[1].iFirst ]; + int nNew = 0; + const u8 *pNew = 0; - xAppend(p, p1->base.iRowid-iLastRowid, p1, &doclist); - iLastRowid = p1->base.iRowid; - } + p1->xSetOutputs(p1, pSeg); + if( p->rc ) break; - assert( (nBuf%nMerge)==0 ); - for(i=0; irc==SQLITE_OK ){ - xMerge(p, &doclist, nMerge, &aBuf[i]); - } - for(iFree=i; iFreeterm.n; + pNew = pSeg->term.p; + if( nNewp = (u8*)&pData[1]; - pData->nn = pData->szLeaf = doclist.n; - if( doclist.n ) memcpy(pData->p, doclist.p, doclist.n); - fts5MultiIterNew2(p, pData, bDesc, ppIter); - } - fts5BufferFree(&doclist); + xVisit(p, pCtx, p1, pNew, nNew); } + fts5MultiIterFree(p1); fts5StructureRelease(pStruct); - sqlite3_free(aBuf); + return p->rc; } /* -** Indicate that all subsequent calls to sqlite3Fts5IndexWrite() pertain -** to the document with rowid iRowid. +** Usually, a tokendata=1 iterator (struct Fts5TokenDataIter) accumulates an +** array of these for each row it visits (so all iRowid fields are the same). +** Or, for an iterator used by an "ORDER BY rank" query, it accumulates an +** array of these for the entire query (in which case iRowid fields may take +** a variety of values). +** +** Each instance in the array indicates the iterator (and therefore term) +** associated with position iPos of rowid iRowid. This is used by the +** xInstToken() API. +** +** iRowid: +** Rowid for the current entry. +** +** iPos: +** Position of current entry within row. In the usual ((iCol<<32)+iOff) +** format (e.g. see macros FTS5_POS2COLUMN() and FTS5_POS2OFFSET()). +** +** iIter: +** If the Fts5TokenDataIter iterator that the entry is part of is +** actually an iterator (i.e. with nIter>0, not just a container for +** Fts5TokenDataMap structures), then this variable is an index into +** the apIter[] array. The corresponding term is that which the iterator +** at apIter[iIter] currently points to. +** +** Or, if the Fts5TokenDataIter iterator is just a container object +** (nIter==0), then iIter is an index into the term.p[] buffer where +** the term is stored. +** +** nByte: +** In the case where iIter is an index into term.p[], this variable +** is the size of the term in bytes. If iIter is an index into apIter[], +** this variable is unused. +*/ +struct Fts5TokenDataMap { + i64 iRowid; /* Row this token is located in */ + i64 iPos; /* Position of token */ + int iIter; /* Iterator token was read from */ + int nByte; /* Length of token in bytes (or 0) */ +}; + +/* +** An object used to supplement Fts5Iter for tokendata=1 iterators. +** +** This object serves two purposes. The first is as a container for an array +** of Fts5TokenDataMap structures, which are used to find the token required +** when the xInstToken() API is used. This is done by the nMapAlloc, nMap and +** aMap[] variables. +*/ +struct Fts5TokenDataIter { + int nMapAlloc; /* Allocated size of aMap[] in entries */ + int nMap; /* Number of valid entries in aMap[] */ + Fts5TokenDataMap *aMap; /* Array of (rowid+pos -> token) mappings */ + + /* The following are used for prefix-queries only. */ + Fts5Buffer terms; + + /* The following are used for other full-token tokendata queries only. */ + int nIter; + int nIterAlloc; + Fts5PoslistReader *aPoslistReader; + int *aPoslistToIter; + Fts5Iter *apIter[FLEXARRAY]; +}; + +/* Size in bytes of an Fts5TokenDataIter object holding up to N iterators */ +#define SZ_FTS5TOKENDATAITER(N) \ + (offsetof(Fts5TokenDataIter,apIter) + (N)*sizeof(Fts5Iter)) + +/* +** The two input arrays - a1[] and a2[] - are in sorted order. This function +** merges the two arrays together and writes the result to output array +** aOut[]. aOut[] is guaranteed to be large enough to hold the result. +** +** Duplicate entries are copied into the output. So the size of the output +** array is always (n1+n2) entries. +*/ +static void fts5TokendataMerge( + Fts5TokenDataMap *a1, int n1, /* Input array 1 */ + Fts5TokenDataMap *a2, int n2, /* Input array 2 */ + Fts5TokenDataMap *aOut /* Output array */ +){ + int i1 = 0; + int i2 = 0; + + assert( n1>=0 && n2>=0 ); + while( i1=n2 || (i1rc==SQLITE_OK ){ + if( pT->nMap==pT->nMapAlloc ){ + int nNew = pT->nMapAlloc ? pT->nMapAlloc*2 : 64; + int nAlloc = nNew * sizeof(Fts5TokenDataMap); + Fts5TokenDataMap *aNew; + + aNew = (Fts5TokenDataMap*)sqlite3_realloc(pT->aMap, nAlloc); + if( aNew==0 ){ + p->rc = SQLITE_NOMEM; + return; + } + + pT->aMap = aNew; + pT->nMapAlloc = nNew; + } + + pT->aMap[pT->nMap].iRowid = iRowid; + pT->aMap[pT->nMap].iPos = iPos; + pT->aMap[pT->nMap].iIter = iIter; + pT->aMap[pT->nMap].nByte = nByte; + pT->nMap++; + } +} + +/* +** Sort the contents of the pT->aMap[] array. +** +** The sorting algorithm requires a malloc(). If this fails, an error code +** is left in Fts5Index.rc before returning. +*/ +static void fts5TokendataIterSortMap(Fts5Index *p, Fts5TokenDataIter *pT){ + Fts5TokenDataMap *aTmp = 0; + int nByte = pT->nMap * sizeof(Fts5TokenDataMap); + + aTmp = (Fts5TokenDataMap*)sqlite3Fts5MallocZero(&p->rc, nByte); + if( aTmp ){ + Fts5TokenDataMap *a1 = pT->aMap; + Fts5TokenDataMap *a2 = aTmp; + i64 nHalf; + + for(nHalf=1; nHalfnMap; nHalf=nHalf*2){ + int i1; + for(i1=0; i1nMap; i1+=(nHalf*2)){ + int n1 = MIN(nHalf, pT->nMap-i1); + int n2 = MIN(nHalf, pT->nMap-i1-n1); + fts5TokendataMerge(&a1[i1], n1, &a1[i1+n1], n2, &a2[i1]); + } + SWAPVAL(Fts5TokenDataMap*, a1, a2); + } + + if( a1!=pT->aMap ){ + memcpy(pT->aMap, a1, pT->nMap*sizeof(Fts5TokenDataMap)); + } + sqlite3_free(aTmp); + +#ifdef SQLITE_DEBUG + { + int ii; + for(ii=1; iinMap; ii++){ + Fts5TokenDataMap *p1 = &pT->aMap[ii-1]; + Fts5TokenDataMap *p2 = &pT->aMap[ii]; + assert( p1->iRowidiRowid + || (p1->iRowid==p2->iRowid && p1->iPos<=p2->iPos) + ); + } + } +#endif + } +} + +/* +** Delete an Fts5TokenDataIter structure and its contents. +*/ +static void fts5TokendataIterDelete(Fts5TokenDataIter *pSet){ + if( pSet ){ + int ii; + for(ii=0; iinIter; ii++){ + fts5MultiIterFree(pSet->apIter[ii]); + } + fts5BufferFree(&pSet->terms); + sqlite3_free(pSet->aPoslistReader); + sqlite3_free(pSet->aMap); + sqlite3_free(pSet); + } +} + + +/* +** fts5VisitEntries() context object used by fts5SetupPrefixIterTokendata() +** to pass data to prefixIterSetupTokendataCb(). +*/ +typedef struct TokendataSetupCtx TokendataSetupCtx; +struct TokendataSetupCtx { + Fts5TokenDataIter *pT; /* Object being populated with mappings */ + int iTermOff; /* Offset of current term in terms.p[] */ + int nTermByte; /* Size of current term in bytes */ +}; + +/* +** fts5VisitEntries() callback used by fts5SetupPrefixIterTokendata(). This +** callback adds an entry to the Fts5TokenDataIter.aMap[] array for each +** position in the current position-list. It doesn't matter that some of +** these may be out of order - they will be sorted later. +*/ +static void prefixIterSetupTokendataCb( + Fts5Index *p, + void *pCtx, + Fts5Iter *p1, + const u8 *pNew, + int nNew +){ + TokendataSetupCtx *pSetup = (TokendataSetupCtx*)pCtx; + int iPosOff = 0; + i64 iPos = 0; + + if( pNew ){ + pSetup->nTermByte = nNew-1; + pSetup->iTermOff = pSetup->pT->terms.n; + fts5BufferAppendBlob(&p->rc, &pSetup->pT->terms, nNew-1, pNew+1); + } + + while( 0==sqlite3Fts5PoslistNext64( + p1->base.pData, p1->base.nData, &iPosOff, &iPos + ) ){ + fts5TokendataIterAppendMap(p, + pSetup->pT, pSetup->iTermOff, pSetup->nTermByte, p1->base.iRowid, iPos + ); + } +} + + +/* +** Context object passed by fts5SetupPrefixIter() to fts5VisitEntries(). +*/ +typedef struct PrefixSetupCtx PrefixSetupCtx; +struct PrefixSetupCtx { + void (*xMerge)(Fts5Index*, Fts5Buffer*, int, Fts5Buffer*); + void (*xAppend)(Fts5Index*, u64, Fts5Iter*, Fts5Buffer*); + i64 iLastRowid; + int nMerge; + Fts5Buffer *aBuf; + int nBuf; + Fts5Buffer doclist; + TokendataSetupCtx *pTokendata; +}; + +/* +** fts5VisitEntries() callback used by fts5SetupPrefixIter() +*/ +static void prefixIterSetupCb( + Fts5Index *p, + void *pCtx, + Fts5Iter *p1, + const u8 *pNew, + int nNew +){ + PrefixSetupCtx *pSetup = (PrefixSetupCtx*)pCtx; + const int nMerge = pSetup->nMerge; + + if( p1->base.nData>0 ){ + if( p1->base.iRowid<=pSetup->iLastRowid && pSetup->doclist.n>0 ){ + int i; + for(i=0; p->rc==SQLITE_OK && pSetup->doclist.n; i++){ + int i1 = i*nMerge; + int iStore; + assert( i1+nMerge<=pSetup->nBuf ); + for(iStore=i1; iStoreaBuf[iStore].n==0 ){ + fts5BufferSwap(&pSetup->doclist, &pSetup->aBuf[iStore]); + fts5BufferZero(&pSetup->doclist); + break; + } + } + if( iStore==i1+nMerge ){ + pSetup->xMerge(p, &pSetup->doclist, nMerge, &pSetup->aBuf[i1]); + for(iStore=i1; iStoreaBuf[iStore]); + } + } + } + pSetup->iLastRowid = 0; + } + + pSetup->xAppend( + p, (u64)p1->base.iRowid-(u64)pSetup->iLastRowid, p1, &pSetup->doclist + ); + pSetup->iLastRowid = p1->base.iRowid; + } + + if( pSetup->pTokendata ){ + prefixIterSetupTokendataCb(p, (void*)pSetup->pTokendata, p1, pNew, nNew); + } +} + +static void fts5SetupPrefixIter( + Fts5Index *p, /* Index to read from */ + int bDesc, /* True for "ORDER BY rowid DESC" */ + int iIdx, /* Index to scan for data */ + u8 *pToken, /* Buffer containing prefix to match */ + int nToken, /* Size of buffer pToken in bytes */ + Fts5Colset *pColset, /* Restrict matches to these columns */ + Fts5Iter **ppIter /* OUT: New iterator */ +){ + Fts5Structure *pStruct; + PrefixSetupCtx s; + TokendataSetupCtx s2; + + memset(&s, 0, sizeof(s)); + memset(&s2, 0, sizeof(s2)); + + s.nMerge = 1; + s.iLastRowid = 0; + s.nBuf = 32; + if( iIdx==0 + && p->pConfig->eDetail==FTS5_DETAIL_FULL + && p->pConfig->bPrefixInsttoken + ){ + s.pTokendata = &s2; + s2.pT = (Fts5TokenDataIter*)fts5IdxMalloc(p, SZ_FTS5TOKENDATAITER(1)); + } + + if( p->pConfig->eDetail==FTS5_DETAIL_NONE ){ + s.xMerge = fts5MergeRowidLists; + s.xAppend = fts5AppendRowid; + }else{ + s.nMerge = FTS5_MERGE_NLIST-1; + s.nBuf = s.nMerge*8; /* Sufficient to merge (16^8)==(2^32) lists */ + s.xMerge = fts5MergePrefixLists; + s.xAppend = fts5AppendPoslist; + } + + s.aBuf = (Fts5Buffer*)fts5IdxMalloc(p, sizeof(Fts5Buffer)*s.nBuf); + pStruct = fts5StructureRead(p); + assert( p->rc!=SQLITE_OK || (s.aBuf && pStruct) ); + + if( p->rc==SQLITE_OK ){ + void *pCtx = (void*)&s; + int i; + Fts5Data *pData; + + /* If iIdx is non-zero, then it is the number of a prefix-index for + ** prefixes 1 character longer than the prefix being queried for. That + ** index contains all the doclists required, except for the one + ** corresponding to the prefix itself. That one is extracted from the + ** main term index here. */ + if( iIdx!=0 ){ + pToken[0] = FTS5_MAIN_PREFIX; + fts5VisitEntries(p, pColset, pToken, nToken, 0, prefixIterSetupCb, pCtx); + } + + pToken[0] = FTS5_MAIN_PREFIX + iIdx; + fts5VisitEntries(p, pColset, pToken, nToken, 1, prefixIterSetupCb, pCtx); + + assert( (s.nBuf%s.nMerge)==0 ); + for(i=0; irc==SQLITE_OK ){ + s.xMerge(p, &s.doclist, s.nMerge, &s.aBuf[i]); + } + for(iFree=i; iFreerc!=SQLITE_OK ); + if( pData ){ + pData->p = (u8*)&pData[1]; + pData->nn = pData->szLeaf = s.doclist.n; + if( s.doclist.n ) memcpy(pData->p, s.doclist.p, s.doclist.n); + fts5MultiIterNew2(p, pData, bDesc, ppIter); + } + + assert( (*ppIter)!=0 || p->rc!=SQLITE_OK ); + if( p->rc==SQLITE_OK && s.pTokendata ){ + fts5TokendataIterSortMap(p, s2.pT); + (*ppIter)->pTokenDataIter = s2.pT; + s2.pT = 0; + } + } + + fts5TokendataIterDelete(s2.pT); + fts5BufferFree(&s.doclist); + fts5StructureRelease(pStruct); + sqlite3_free(s.aBuf); +} + + +/* +** Indicate that all subsequent calls to sqlite3Fts5IndexWrite() pertain +** to the document with rowid iRowid. */ int sqlite3Fts5IndexBeginWrite(Fts5Index *p, int bDelete, i64 iRowid){ assert( p->rc==SQLITE_OK ); @@ -5343,13 +6780,16 @@ int sqlite3Fts5IndexBeginWrite(Fts5Index *p, int bDelete, i64 iRowid){ /* Flush the hash table to disk if required */ if( iRowidiWriteRowid || (iRowid==p->iWriteRowid && p->bDelete==0) - || (p->nPendingData > p->pConfig->nHashSize) + || (p->nPendingData > p->pConfig->nHashSize) ){ fts5IndexFlush(p); } p->iWriteRowid = iRowid; p->bDelete = bDelete; + if( bDelete==0 ){ + p->nPendingRow++; + } return fts5IndexReturn(p); } @@ -5359,7 +6799,7 @@ int sqlite3Fts5IndexBeginWrite(Fts5Index *p, int bDelete, i64 iRowid){ int sqlite3Fts5IndexSync(Fts5Index *p){ assert( p->rc==SQLITE_OK ); fts5IndexFlush(p); - sqlite3Fts5IndexCloseReader(p); + fts5IndexCloseReader(p); return fts5IndexReturn(p); } @@ -5370,11 +6810,10 @@ int sqlite3Fts5IndexSync(Fts5Index *p){ ** records must be invalidated. */ int sqlite3Fts5IndexRollback(Fts5Index *p){ - sqlite3Fts5IndexCloseReader(p); + fts5IndexCloseReader(p); fts5IndexDiscardData(p); fts5StructureInvalidate(p); - /* assert( p->rc==SQLITE_OK ); */ - return SQLITE_OK; + return fts5IndexReturn(p); } /* @@ -5383,12 +6822,20 @@ int sqlite3Fts5IndexRollback(Fts5Index *p){ ** and the initial version of the "averages" record (a zero-byte blob). */ int sqlite3Fts5IndexReinit(Fts5Index *p){ - Fts5Structure s; + Fts5Structure *pTmp; + union { + Fts5Structure sFts; + u8 tmpSpace[SZ_FTS5STRUCTURE(1)]; + } uFts; fts5StructureInvalidate(p); fts5IndexDiscardData(p); - memset(&s, 0, sizeof(Fts5Structure)); + pTmp = &uFts.sFts; + memset(uFts.tmpSpace, 0, sizeof(uFts.tmpSpace)); + if( p->pConfig->bContentlessDelete ){ + pTmp->nOriginCntr = 1; + } fts5DataWrite(p, FTS5_AVERAGES_ROWID, (const u8*)"", 0); - fts5StructureWrite(p, &s); + fts5StructureWrite(p, pTmp); return fts5IndexReturn(p); } @@ -5450,7 +6897,9 @@ int sqlite3Fts5IndexClose(Fts5Index *p){ sqlite3_finalize(p->pIdxWriter); sqlite3_finalize(p->pIdxDeleter); sqlite3_finalize(p->pIdxSelect); + sqlite3_finalize(p->pIdxNextSelect); sqlite3_finalize(p->pDataVersion); + sqlite3_finalize(p->pDeleteFromIdx); sqlite3Fts5HashFree(p->pHash); sqlite3_free(p->zDataTbl); sqlite3_free(p); @@ -5545,84 +6994,479 @@ int sqlite3Fts5IndexWrite( } /* -** Open a new iterator to iterate though all rowid that match the -** specified token or token prefix. +** pToken points to a buffer of size nToken bytes containing a search +** term, including the index number at the start, used on a tokendata=1 +** table. This function returns true if the term in buffer pBuf matches +** token pToken/nToken. */ -int sqlite3Fts5IndexQuery( - Fts5Index *p, /* FTS index to query */ - const char *pToken, int nToken, /* Token (or prefix) to query for */ - int flags, /* Mask of FTS5INDEX_QUERY_X flags */ - Fts5Colset *pColset, /* Match these columns only */ - Fts5IndexIter **ppIter /* OUT: New iterator object */ +static int fts5IsTokendataPrefix( + Fts5Buffer *pBuf, + const u8 *pToken, + int nToken ){ - Fts5Config *pConfig = p->pConfig; - Fts5Iter *pRet = 0; - Fts5Buffer buf = {0, 0, 0}; + return ( + pBuf->n>=nToken + && 0==memcmp(pBuf->p, pToken, nToken) + && (pBuf->n==nToken || pBuf->p[nToken]==0x00) + ); +} - /* If the QUERY_SCAN flag is set, all other flags must be clear. */ - assert( (flags & FTS5INDEX_QUERY_SCAN)==0 || flags==FTS5INDEX_QUERY_SCAN ); +/* +** Ensure the segment-iterator passed as the only argument points to EOF. +*/ +static void fts5SegIterSetEOF(Fts5SegIter *pSeg){ + fts5DataRelease(pSeg->pLeaf); + pSeg->pLeaf = 0; +} - if( sqlite3Fts5BufferSize(&p->rc, &buf, nToken+1)==0 ){ - int iIdx = 0; /* Index to search */ - int iPrefixIdx = 0; /* +1 prefix index */ - if( nToken ) memcpy(&buf.p[1], pToken, nToken); +static void fts5IterClose(Fts5IndexIter *pIndexIter){ + if( pIndexIter ){ + Fts5Iter *pIter = (Fts5Iter*)pIndexIter; + Fts5Index *pIndex = pIter->pIndex; + fts5TokendataIterDelete(pIter->pTokenDataIter); + fts5MultiIterFree(pIter); + fts5IndexCloseReader(pIndex); + } +} - /* Figure out which index to search and set iIdx accordingly. If this - ** is a prefix query for which there is no prefix index, set iIdx to - ** greater than pConfig->nPrefix to indicate that the query will be - ** satisfied by scanning multiple terms in the main index. - ** - ** If the QUERY_TEST_NOIDX flag was specified, then this must be a - ** prefix-query. Instead of using a prefix-index (if one exists), - ** evaluate the prefix query using the main FTS index. This is used - ** for internal sanity checking by the integrity-check in debug - ** mode only. */ -#ifdef SQLITE_DEBUG - if( pConfig->bPrefixIndex==0 || (flags & FTS5INDEX_QUERY_TEST_NOIDX) ){ - assert( flags & FTS5INDEX_QUERY_PREFIX ); - iIdx = 1+pConfig->nPrefix; - }else -#endif - if( flags & FTS5INDEX_QUERY_PREFIX ){ - int nChar = fts5IndexCharlen(pToken, nToken); - for(iIdx=1; iIdx<=pConfig->nPrefix; iIdx++){ - int nIdxChar = pConfig->aPrefix[iIdx-1]; - if( nIdxChar==nChar ) break; - if( nIdxChar==nChar+1 ) iPrefixIdx = iIdx; - } - } +/* +** This function appends iterator pAppend to Fts5TokenDataIter pIn and +** returns the result. +*/ +static Fts5TokenDataIter *fts5AppendTokendataIter( + Fts5Index *p, /* Index object (for error code) */ + Fts5TokenDataIter *pIn, /* Current Fts5TokenDataIter struct */ + Fts5Iter *pAppend /* Append this iterator */ +){ + Fts5TokenDataIter *pRet = pIn; - if( iIdx<=pConfig->nPrefix ){ - /* Straight index lookup */ - Fts5Structure *pStruct = fts5StructureRead(p); - buf.p[0] = (u8)(FTS5_MAIN_PREFIX + iIdx); - if( pStruct ){ - fts5MultiIterNew(p, pStruct, flags | FTS5INDEX_QUERY_SKIPEMPTY, - pColset, buf.p, nToken+1, -1, 0, &pRet - ); - fts5StructureRelease(pStruct); - } - }else{ - /* Scan multiple terms in the main index */ - int bDesc = (flags & FTS5INDEX_QUERY_DESC)!=0; - fts5SetupPrefixIter(p, bDesc, iPrefixIdx, buf.p, nToken+1, pColset,&pRet); - if( pRet==0 ){ - assert( p->rc!=SQLITE_OK ); + if( p->rc==SQLITE_OK ){ + if( pIn==0 || pIn->nIter==pIn->nIterAlloc ){ + int nAlloc = pIn ? pIn->nIterAlloc*2 : 16; + int nByte = SZ_FTS5TOKENDATAITER(nAlloc+1); + Fts5TokenDataIter *pNew = (Fts5TokenDataIter*)sqlite3_realloc(pIn, nByte); + + if( pNew==0 ){ + p->rc = SQLITE_NOMEM; }else{ - assert( pRet->pColset==0 ); - fts5IterSetOutputCb(&p->rc, pRet); - if( p->rc==SQLITE_OK ){ - Fts5SegIter *pSeg = &pRet->aSeg[pRet->aFirst[1].iFirst]; - if( pSeg->pLeaf ) pRet->xSetOutputs(pRet, pSeg); - } + if( pIn==0 ) memset(pNew, 0, nByte); + pRet = pNew; + pNew->nIterAlloc = nAlloc; } } + } + if( p->rc ){ + fts5IterClose((Fts5IndexIter*)pAppend); + }else{ + pRet->apIter[pRet->nIter++] = pAppend; + } + assert( pRet==0 || pRet->nIter<=pRet->nIterAlloc ); - if( p->rc ){ - sqlite3Fts5IterClose((Fts5IndexIter*)pRet); - pRet = 0; - sqlite3Fts5IndexCloseReader(p); - } + return pRet; +} + +/* +** The iterator passed as the only argument must be a tokendata=1 iterator +** (pIter->pTokenDataIter!=0). This function sets the iterator output +** variables (pIter->base.*) according to the contents of the current +** row. +*/ +static void fts5IterSetOutputsTokendata(Fts5Iter *pIter){ + int ii; + int nHit = 0; + i64 iRowid = SMALLEST_INT64; + int iMin = 0; + + Fts5TokenDataIter *pT = pIter->pTokenDataIter; + + pIter->base.nData = 0; + pIter->base.pData = 0; + + for(ii=0; iinIter; ii++){ + Fts5Iter *p = pT->apIter[ii]; + if( p->base.bEof==0 ){ + if( nHit==0 || p->base.iRowidbase.iRowid; + nHit = 1; + pIter->base.pData = p->base.pData; + pIter->base.nData = p->base.nData; + iMin = ii; + }else if( p->base.iRowid==iRowid ){ + nHit++; + } + } + } + + if( nHit==0 ){ + pIter->base.bEof = 1; + }else{ + int eDetail = pIter->pIndex->pConfig->eDetail; + pIter->base.bEof = 0; + pIter->base.iRowid = iRowid; + + if( nHit==1 && eDetail==FTS5_DETAIL_FULL ){ + fts5TokendataIterAppendMap(pIter->pIndex, pT, iMin, 0, iRowid, -1); + }else + if( nHit>1 && eDetail!=FTS5_DETAIL_NONE ){ + int nReader = 0; + int nByte = 0; + i64 iPrev = 0; + + /* Allocate array of iterators if they are not already allocated. */ + if( pT->aPoslistReader==0 ){ + pT->aPoslistReader = (Fts5PoslistReader*)sqlite3Fts5MallocZero( + &pIter->pIndex->rc, + pT->nIter * (sizeof(Fts5PoslistReader) + sizeof(int)) + ); + if( pT->aPoslistReader==0 ) return; + pT->aPoslistToIter = (int*)&pT->aPoslistReader[pT->nIter]; + } + + /* Populate an iterator for each poslist that will be merged */ + for(ii=0; iinIter; ii++){ + Fts5Iter *p = pT->apIter[ii]; + if( iRowid==p->base.iRowid ){ + pT->aPoslistToIter[nReader] = ii; + sqlite3Fts5PoslistReaderInit( + p->base.pData, p->base.nData, &pT->aPoslistReader[nReader++] + ); + nByte += p->base.nData; + } + } + + /* Ensure the output buffer is large enough */ + if( fts5BufferGrow(&pIter->pIndex->rc, &pIter->poslist, nByte+nHit*10) ){ + return; + } + + /* Ensure the token-mapping is large enough */ + if( eDetail==FTS5_DETAIL_FULL && pT->nMapAlloc<(pT->nMap + nByte) ){ + int nNew = (pT->nMapAlloc + nByte) * 2; + Fts5TokenDataMap *aNew = (Fts5TokenDataMap*)sqlite3_realloc( + pT->aMap, nNew*sizeof(Fts5TokenDataMap) + ); + if( aNew==0 ){ + pIter->pIndex->rc = SQLITE_NOMEM; + return; + } + pT->aMap = aNew; + pT->nMapAlloc = nNew; + } + + pIter->poslist.n = 0; + + while( 1 ){ + i64 iMinPos = LARGEST_INT64; + + /* Find smallest position */ + iMin = 0; + for(ii=0; iiaPoslistReader[ii]; + if( pReader->bEof==0 ){ + if( pReader->iPosiPos; + iMin = ii; + } + } + } + + /* If all readers were at EOF, break out of the loop. */ + if( iMinPos==LARGEST_INT64 ) break; + + sqlite3Fts5PoslistSafeAppend(&pIter->poslist, &iPrev, iMinPos); + sqlite3Fts5PoslistReaderNext(&pT->aPoslistReader[iMin]); + + if( eDetail==FTS5_DETAIL_FULL ){ + pT->aMap[pT->nMap].iPos = iMinPos; + pT->aMap[pT->nMap].iIter = pT->aPoslistToIter[iMin]; + pT->aMap[pT->nMap].iRowid = iRowid; + pT->nMap++; + } + } + + pIter->base.pData = pIter->poslist.p; + pIter->base.nData = pIter->poslist.n; + } + } +} + +/* +** The iterator passed as the only argument must be a tokendata=1 iterator +** (pIter->pTokenDataIter!=0). This function advances the iterator. If +** argument bFrom is false, then the iterator is advanced to the next +** entry. Or, if bFrom is true, it is advanced to the first entry with +** a rowid of iFrom or greater. +*/ +static void fts5TokendataIterNext(Fts5Iter *pIter, int bFrom, i64 iFrom){ + int ii; + Fts5TokenDataIter *pT = pIter->pTokenDataIter; + Fts5Index *pIndex = pIter->pIndex; + + for(ii=0; iinIter; ii++){ + Fts5Iter *p = pT->apIter[ii]; + if( p->base.bEof==0 + && (p->base.iRowid==pIter->base.iRowid || (bFrom && p->base.iRowidbase.bEof==0 + && p->base.iRowidrc==SQLITE_OK + ){ + fts5MultiIterNext(pIndex, p, 0, 0); + } + } + } + + if( pIndex->rc==SQLITE_OK ){ + fts5IterSetOutputsTokendata(pIter); + } +} + +/* +** If the segment-iterator passed as the first argument is at EOF, then +** set pIter->term to a copy of buffer pTerm. +*/ +static void fts5TokendataSetTermIfEof(Fts5Iter *pIter, Fts5Buffer *pTerm){ + if( pIter && pIter->aSeg[0].pLeaf==0 ){ + fts5BufferSet(&pIter->pIndex->rc, &pIter->aSeg[0].term, pTerm->n, pTerm->p); + } +} + +/* +** This function sets up an iterator to use for a non-prefix query on a +** tokendata=1 table. +*/ +static Fts5Iter *fts5SetupTokendataIter( + Fts5Index *p, /* FTS index to query */ + const u8 *pToken, /* Buffer containing query term */ + int nToken, /* Size of buffer pToken in bytes */ + Fts5Colset *pColset /* Colset to filter on */ +){ + Fts5Iter *pRet = 0; + Fts5TokenDataIter *pSet = 0; + Fts5Structure *pStruct = 0; + const int flags = FTS5INDEX_QUERY_SCANONETERM | FTS5INDEX_QUERY_SCAN; + + Fts5Buffer bSeek = {0, 0, 0}; + Fts5Buffer *pSmall = 0; + + fts5IndexFlush(p); + pStruct = fts5StructureRead(p); + + while( p->rc==SQLITE_OK ){ + Fts5Iter *pPrev = pSet ? pSet->apIter[pSet->nIter-1] : 0; + Fts5Iter *pNew = 0; + Fts5SegIter *pNewIter = 0; + Fts5SegIter *pPrevIter = 0; + + int iLvl, iSeg, ii; + + pNew = fts5MultiIterAlloc(p, pStruct->nSegment); + if( pSmall ){ + fts5BufferSet(&p->rc, &bSeek, pSmall->n, pSmall->p); + fts5BufferAppendBlob(&p->rc, &bSeek, 1, (const u8*)"\0"); + }else{ + fts5BufferSet(&p->rc, &bSeek, nToken, pToken); + } + if( p->rc ){ + fts5IterClose((Fts5IndexIter*)pNew); + break; + } + + pNewIter = &pNew->aSeg[0]; + pPrevIter = (pPrev ? &pPrev->aSeg[0] : 0); + for(iLvl=0; iLvlnLevel; iLvl++){ + for(iSeg=pStruct->aLevel[iLvl].nSeg-1; iSeg>=0; iSeg--){ + Fts5StructureSegment *pSeg = &pStruct->aLevel[iLvl].aSeg[iSeg]; + int bDone = 0; + + if( pPrevIter ){ + if( fts5BufferCompare(pSmall, &pPrevIter->term) ){ + memcpy(pNewIter, pPrevIter, sizeof(Fts5SegIter)); + memset(pPrevIter, 0, sizeof(Fts5SegIter)); + bDone = 1; + }else if( pPrevIter->iEndofDoclist>pPrevIter->pLeaf->szLeaf ){ + fts5SegIterNextInit(p,(const char*)bSeek.p,bSeek.n-1,pSeg,pNewIter); + bDone = 1; + } + } + + if( bDone==0 ){ + fts5SegIterSeekInit(p, bSeek.p, bSeek.n, flags, pSeg, pNewIter); + } + + if( pPrevIter ){ + if( pPrevIter->pTombArray ){ + pNewIter->pTombArray = pPrevIter->pTombArray; + pNewIter->pTombArray->nRef++; + } + }else{ + fts5SegIterAllocTombstone(p, pNewIter); + } + + pNewIter++; + if( pPrevIter ) pPrevIter++; + if( p->rc ) break; + } + } + fts5TokendataSetTermIfEof(pPrev, pSmall); + + pNew->bSkipEmpty = 1; + pNew->pColset = pColset; + fts5IterSetOutputCb(&p->rc, pNew); + + /* Loop through all segments in the new iterator. Find the smallest + ** term that any segment-iterator points to. Iterator pNew will be + ** used for this term. Also, set any iterator that points to a term that + ** does not match pToken/nToken to point to EOF */ + pSmall = 0; + for(ii=0; iinSeg; ii++){ + Fts5SegIter *pII = &pNew->aSeg[ii]; + if( 0==fts5IsTokendataPrefix(&pII->term, pToken, nToken) ){ + fts5SegIterSetEOF(pII); + } + if( pII->pLeaf && (!pSmall || fts5BufferCompare(pSmall, &pII->term)>0) ){ + pSmall = &pII->term; + } + } + + /* If pSmall is still NULL at this point, then the new iterator does + ** not point to any terms that match the query. So delete it and break + ** out of the loop - all required iterators have been collected. */ + if( pSmall==0 ){ + fts5IterClose((Fts5IndexIter*)pNew); + break; + } + + /* Append this iterator to the set and continue. */ + pSet = fts5AppendTokendataIter(p, pSet, pNew); + } + + if( p->rc==SQLITE_OK && pSet ){ + int ii; + for(ii=0; iinIter; ii++){ + Fts5Iter *pIter = pSet->apIter[ii]; + int iSeg; + for(iSeg=0; iSegnSeg; iSeg++){ + pIter->aSeg[iSeg].flags |= FTS5_SEGITER_ONETERM; + } + fts5MultiIterFinishSetup(p, pIter); + } + } + + if( p->rc==SQLITE_OK ){ + pRet = fts5MultiIterAlloc(p, 0); + } + if( pRet ){ + pRet->nSeg = 0; + pRet->pTokenDataIter = pSet; + if( pSet ){ + fts5IterSetOutputsTokendata(pRet); + }else{ + pRet->base.bEof = 1; + } + }else{ + fts5TokendataIterDelete(pSet); + } + + fts5StructureRelease(pStruct); + fts5BufferFree(&bSeek); + return pRet; +} + +/* +** Open a new iterator to iterate though all rowid that match the +** specified token or token prefix. +*/ +int sqlite3Fts5IndexQuery( + Fts5Index *p, /* FTS index to query */ + const char *pToken, int nToken, /* Token (or prefix) to query for */ + int flags, /* Mask of FTS5INDEX_QUERY_X flags */ + Fts5Colset *pColset, /* Match these columns only */ + Fts5IndexIter **ppIter /* OUT: New iterator object */ +){ + Fts5Config *pConfig = p->pConfig; + Fts5Iter *pRet = 0; + Fts5Buffer buf = {0, 0, 0}; + + /* If the QUERY_SCAN flag is set, all other flags must be clear. */ + assert( (flags & FTS5INDEX_QUERY_SCAN)==0 || flags==FTS5INDEX_QUERY_SCAN ); + + if( sqlite3Fts5BufferSize(&p->rc, &buf, nToken+1)==0 ){ + int iIdx = 0; /* Index to search */ + int iPrefixIdx = 0; /* +1 prefix index */ + int bTokendata = pConfig->bTokendata; + assert( buf.p!=0 ); + if( nToken>0 ) memcpy(&buf.p[1], pToken, nToken); + + /* The NOTOKENDATA flag is set when each token in a tokendata=1 table + ** should be treated individually, instead of merging all those with + ** a common prefix into a single entry. This is used, for example, by + ** queries performed as part of an integrity-check, or by the fts5vocab + ** module. */ + if( flags & (FTS5INDEX_QUERY_NOTOKENDATA|FTS5INDEX_QUERY_SCAN) ){ + bTokendata = 0; + } + + /* Figure out which index to search and set iIdx accordingly. If this + ** is a prefix query for which there is no prefix index, set iIdx to + ** greater than pConfig->nPrefix to indicate that the query will be + ** satisfied by scanning multiple terms in the main index. + ** + ** If the QUERY_TEST_NOIDX flag was specified, then this must be a + ** prefix-query. Instead of using a prefix-index (if one exists), + ** evaluate the prefix query using the main FTS index. This is used + ** for internal sanity checking by the integrity-check in debug + ** mode only. */ +#ifdef SQLITE_DEBUG + if( pConfig->bPrefixIndex==0 || (flags & FTS5INDEX_QUERY_TEST_NOIDX) ){ + assert( flags & FTS5INDEX_QUERY_PREFIX ); + iIdx = 1+pConfig->nPrefix; + }else +#endif + if( flags & FTS5INDEX_QUERY_PREFIX ){ + int nChar = fts5IndexCharlen(pToken, nToken); + for(iIdx=1; iIdx<=pConfig->nPrefix; iIdx++){ + int nIdxChar = pConfig->aPrefix[iIdx-1]; + if( nIdxChar==nChar ) break; + if( nIdxChar==nChar+1 ) iPrefixIdx = iIdx; + } + } + + if( bTokendata && iIdx==0 ){ + buf.p[0] = FTS5_MAIN_PREFIX; + pRet = fts5SetupTokendataIter(p, buf.p, nToken+1, pColset); + }else if( iIdx<=pConfig->nPrefix ){ + /* Straight index lookup */ + Fts5Structure *pStruct = fts5StructureRead(p); + buf.p[0] = (u8)(FTS5_MAIN_PREFIX + iIdx); + if( pStruct ){ + fts5MultiIterNew(p, pStruct, flags | FTS5INDEX_QUERY_SKIPEMPTY, + pColset, buf.p, nToken+1, -1, 0, &pRet + ); + fts5StructureRelease(pStruct); + } + }else{ + /* Scan multiple terms in the main index for a prefix query. */ + int bDesc = (flags & FTS5INDEX_QUERY_DESC)!=0; + fts5SetupPrefixIter(p, bDesc, iPrefixIdx, buf.p, nToken+1, pColset,&pRet); + if( pRet==0 ){ + assert( p->rc!=SQLITE_OK ); + }else{ + assert( pRet->pColset==0 ); + fts5IterSetOutputCb(&p->rc, pRet); + if( p->rc==SQLITE_OK ){ + Fts5SegIter *pSeg = &pRet->aSeg[pRet->aFirst[1].iFirst]; + if( pSeg->pLeaf ) pRet->xSetOutputs(pRet, pSeg); + } + } + } + + if( p->rc ){ + fts5IterClose((Fts5IndexIter*)pRet); + pRet = 0; + fts5IndexCloseReader(p); + } *ppIter = (Fts5IndexIter*)pRet; sqlite3Fts5BufferFree(&buf); @@ -5639,7 +7483,12 @@ int sqlite3Fts5IndexQuery( int sqlite3Fts5IterNext(Fts5IndexIter *pIndexIter){ Fts5Iter *pIter = (Fts5Iter*)pIndexIter; assert( pIter->pIndex->rc==SQLITE_OK ); - fts5MultiIterNext(pIter->pIndex, pIter, 0, 0); + if( pIter->nSeg==0 ){ + assert( pIter->pTokenDataIter ); + fts5TokendataIterNext(pIter, 0, 0); + }else{ + fts5MultiIterNext(pIter->pIndex, pIter, 0, 0); + } return fts5IndexReturn(pIter->pIndex); } @@ -5672,7 +7521,12 @@ int sqlite3Fts5IterNextScan(Fts5IndexIter *pIndexIter){ */ int sqlite3Fts5IterNextFrom(Fts5IndexIter *pIndexIter, i64 iMatch){ Fts5Iter *pIter = (Fts5Iter*)pIndexIter; - fts5MultiIterNextFrom(pIter->pIndex, pIter, iMatch); + if( pIter->nSeg==0 ){ + assert( pIter->pTokenDataIter ); + fts5TokendataIterNext(pIter, 1, iMatch); + }else{ + fts5MultiIterNextFrom(pIter->pIndex, pIter, iMatch); + } return fts5IndexReturn(pIter->pIndex); } @@ -5687,97 +7541,608 @@ const char *sqlite3Fts5IterTerm(Fts5IndexIter *pIndexIter, int *pn){ return (z ? &z[1] : 0); } -/* -** Close an iterator opened by an earlier call to sqlite3Fts5IndexQuery(). -*/ -void sqlite3Fts5IterClose(Fts5IndexIter *pIndexIter){ - if( pIndexIter ){ - Fts5Iter *pIter = (Fts5Iter*)pIndexIter; - Fts5Index *pIndex = pIter->pIndex; - fts5MultiIterFree(pIter); - sqlite3Fts5IndexCloseReader(pIndex); +/* +** pIter is a prefix query. This function populates pIter->pTokenDataIter +** with an Fts5TokenDataIter object containing mappings for all rows +** matched by the query. +*/ +static int fts5SetupPrefixIterTokendata( + Fts5Iter *pIter, + const char *pToken, /* Token prefix to search for */ + int nToken /* Size of pToken in bytes */ +){ + Fts5Index *p = pIter->pIndex; + Fts5Buffer token = {0, 0, 0}; + TokendataSetupCtx ctx; + + memset(&ctx, 0, sizeof(ctx)); + + fts5BufferGrow(&p->rc, &token, nToken+1); + assert( token.p!=0 || p->rc!=SQLITE_OK ); + ctx.pT = (Fts5TokenDataIter*)sqlite3Fts5MallocZero(&p->rc, + SZ_FTS5TOKENDATAITER(1)); + + if( p->rc==SQLITE_OK ){ + + /* Fill in the token prefix to search for */ + token.p[0] = FTS5_MAIN_PREFIX; + memcpy(&token.p[1], pToken, nToken); + token.n = nToken+1; + + fts5VisitEntries( + p, 0, token.p, token.n, 1, prefixIterSetupTokendataCb, (void*)&ctx + ); + + fts5TokendataIterSortMap(p, ctx.pT); + } + + if( p->rc==SQLITE_OK ){ + pIter->pTokenDataIter = ctx.pT; + }else{ + fts5TokendataIterDelete(ctx.pT); + } + fts5BufferFree(&token); + + return fts5IndexReturn(p); +} + +/* +** This is used by xInstToken() to access the token at offset iOff, column +** iCol of row iRowid. The token is returned via output variables *ppOut +** and *pnOut. The iterator passed as the first argument must be a tokendata=1 +** iterator (pIter->pTokenDataIter!=0). +** +** pToken/nToken: +*/ +int sqlite3Fts5IterToken( + Fts5IndexIter *pIndexIter, + const char *pToken, int nToken, + i64 iRowid, + int iCol, + int iOff, + const char **ppOut, int *pnOut +){ + Fts5Iter *pIter = (Fts5Iter*)pIndexIter; + Fts5TokenDataIter *pT = pIter->pTokenDataIter; + i64 iPos = (((i64)iCol)<<32) + iOff; + Fts5TokenDataMap *aMap = 0; + int i1 = 0; + int i2 = 0; + int iTest = 0; + + assert( pT || (pToken && pIter->nSeg>0) ); + if( pT==0 ){ + int rc = fts5SetupPrefixIterTokendata(pIter, pToken, nToken); + if( rc!=SQLITE_OK ) return rc; + pT = pIter->pTokenDataIter; + } + + i2 = pT->nMap; + aMap = pT->aMap; + + while( i2>i1 ){ + iTest = (i1 + i2) / 2; + + if( aMap[iTest].iRowidiRowid ){ + i2 = iTest; + }else{ + if( aMap[iTest].iPosiPos ){ + i2 = iTest; + }else{ + break; + } + } + } + + if( i2>i1 ){ + if( pIter->nSeg==0 ){ + Fts5Iter *pMap = pT->apIter[aMap[iTest].iIter]; + *ppOut = (const char*)pMap->aSeg[0].term.p+1; + *pnOut = pMap->aSeg[0].term.n-1; + }else{ + Fts5TokenDataMap *p = &aMap[iTest]; + *ppOut = (const char*)&pT->terms.p[p->iIter]; + *pnOut = aMap[iTest].nByte; + } + } + + return SQLITE_OK; +} + +/* +** Clear any existing entries from the token-map associated with the +** iterator passed as the only argument. +*/ +void sqlite3Fts5IndexIterClearTokendata(Fts5IndexIter *pIndexIter){ + Fts5Iter *pIter = (Fts5Iter*)pIndexIter; + if( pIter && pIter->pTokenDataIter + && (pIter->nSeg==0 || pIter->pIndex->pConfig->eDetail!=FTS5_DETAIL_FULL) + ){ + pIter->pTokenDataIter->nMap = 0; + } +} + +/* +** Set a token-mapping for the iterator passed as the first argument. This +** is used in detail=column or detail=none mode when a token is requested +** using the xInstToken() API. In this case the caller tokenizers the +** current row and configures the token-mapping via multiple calls to this +** function. +*/ +int sqlite3Fts5IndexIterWriteTokendata( + Fts5IndexIter *pIndexIter, + const char *pToken, int nToken, + i64 iRowid, int iCol, int iOff +){ + Fts5Iter *pIter = (Fts5Iter*)pIndexIter; + Fts5TokenDataIter *pT = pIter->pTokenDataIter; + Fts5Index *p = pIter->pIndex; + i64 iPos = (((i64)iCol)<<32) + iOff; + + assert( p->pConfig->eDetail!=FTS5_DETAIL_FULL ); + assert( pIter->pTokenDataIter || pIter->nSeg>0 ); + if( pIter->nSeg>0 ){ + /* This is a prefix term iterator. */ + if( pT==0 ){ + pT = (Fts5TokenDataIter*)sqlite3Fts5MallocZero(&p->rc, + SZ_FTS5TOKENDATAITER(1)); + pIter->pTokenDataIter = pT; + } + if( pT ){ + fts5TokendataIterAppendMap(p, pT, pT->terms.n, nToken, iRowid, iPos); + fts5BufferAppendBlob(&p->rc, &pT->terms, nToken, (const u8*)pToken); + } + }else{ + int ii; + for(ii=0; iinIter; ii++){ + Fts5Buffer *pTerm = &pT->apIter[ii]->aSeg[0].term; + if( nToken==pTerm->n-1 && memcmp(pToken, pTerm->p+1, nToken)==0 ) break; + } + if( iinIter ){ + fts5TokendataIterAppendMap(p, pT, ii, 0, iRowid, iPos); + } + } + return fts5IndexReturn(p); +} + +/* +** Close an iterator opened by an earlier call to sqlite3Fts5IndexQuery(). +*/ +void sqlite3Fts5IterClose(Fts5IndexIter *pIndexIter){ + if( pIndexIter ){ + Fts5Index *pIndex = ((Fts5Iter*)pIndexIter)->pIndex; + fts5IterClose(pIndexIter); + fts5IndexReturn(pIndex); + } +} + +/* +** Read and decode the "averages" record from the database. +** +** Parameter anSize must point to an array of size nCol, where nCol is +** the number of user defined columns in the FTS table. +*/ +int sqlite3Fts5IndexGetAverages(Fts5Index *p, i64 *pnRow, i64 *anSize){ + int nCol = p->pConfig->nCol; + Fts5Data *pData; + + *pnRow = 0; + memset(anSize, 0, sizeof(i64) * nCol); + pData = fts5DataRead(p, FTS5_AVERAGES_ROWID); + if( p->rc==SQLITE_OK && pData->nn ){ + int i = 0; + int iCol; + i += fts5GetVarint(&pData->p[i], (u64*)pnRow); + for(iCol=0; inn && iColp[i], (u64*)&anSize[iCol]); + } + } + + fts5DataRelease(pData); + return fts5IndexReturn(p); +} + +/* +** Replace the current "averages" record with the contents of the buffer +** supplied as the second argument. +*/ +int sqlite3Fts5IndexSetAverages(Fts5Index *p, const u8 *pData, int nData){ + assert( p->rc==SQLITE_OK ); + fts5DataWrite(p, FTS5_AVERAGES_ROWID, pData, nData); + return fts5IndexReturn(p); +} + +/* +** Return the total number of blocks this module has read from the %_data +** table since it was created. +*/ +int sqlite3Fts5IndexReads(Fts5Index *p){ + return p->nRead; +} + +/* +** Set the 32-bit cookie value stored at the start of all structure +** records to the value passed as the second argument. +** +** Return SQLITE_OK if successful, or an SQLite error code if an error +** occurs. +*/ +int sqlite3Fts5IndexSetCookie(Fts5Index *p, int iNew){ + int rc; /* Return code */ + Fts5Config *pConfig = p->pConfig; /* Configuration object */ + u8 aCookie[4]; /* Binary representation of iNew */ + sqlite3_blob *pBlob = 0; + + assert( p->rc==SQLITE_OK ); + sqlite3Fts5Put32(aCookie, iNew); + + rc = sqlite3_blob_open(pConfig->db, pConfig->zDb, p->zDataTbl, + "block", FTS5_STRUCTURE_ROWID, 1, &pBlob + ); + if( rc==SQLITE_OK ){ + sqlite3_blob_write(pBlob, aCookie, 4, 0); + rc = sqlite3_blob_close(pBlob); + } + + return rc; +} + +int sqlite3Fts5IndexLoadConfig(Fts5Index *p){ + Fts5Structure *pStruct; + pStruct = fts5StructureRead(p); + fts5StructureRelease(pStruct); + return fts5IndexReturn(p); +} + +/* +** Retrieve the origin value that will be used for the segment currently +** being accumulated in the in-memory hash table when it is flushed to +** disk. If successful, SQLITE_OK is returned and (*piOrigin) set to +** the queried value. Or, if an error occurs, an error code is returned +** and the final value of (*piOrigin) is undefined. +*/ +int sqlite3Fts5IndexGetOrigin(Fts5Index *p, i64 *piOrigin){ + Fts5Structure *pStruct; + pStruct = fts5StructureRead(p); + if( pStruct ){ + *piOrigin = pStruct->nOriginCntr; + fts5StructureRelease(pStruct); + } + return fts5IndexReturn(p); +} + +/* +** Buffer pPg contains a page of a tombstone hash table - one of nPg pages +** associated with the same segment. This function adds rowid iRowid to +** the hash table. The caller is required to guarantee that there is at +** least one free slot on the page. +** +** If parameter bForce is false and the hash table is deemed to be full +** (more than half of the slots are occupied), then non-zero is returned +** and iRowid not inserted. Or, if bForce is true or if the hash table page +** is not full, iRowid is inserted and zero returned. +*/ +static int fts5IndexTombstoneAddToPage( + Fts5Data *pPg, + int bForce, + int nPg, + u64 iRowid +){ + const int szKey = TOMBSTONE_KEYSIZE(pPg); + const int nSlot = TOMBSTONE_NSLOT(pPg); + const int nElem = fts5GetU32(&pPg->p[4]); + int iSlot = (iRowid / nPg) % nSlot; + int nCollide = nSlot; + + if( szKey==4 && iRowid>0xFFFFFFFF ) return 2; + if( iRowid==0 ){ + pPg->p[1] = 0x01; + return 0; + } + + if( bForce==0 && nElem>=(nSlot/2) ){ + return 1; + } + + fts5PutU32(&pPg->p[4], nElem+1); + if( szKey==4 ){ + u32 *aSlot = (u32*)&pPg->p[8]; + while( aSlot[iSlot] ){ + iSlot = (iSlot + 1) % nSlot; + if( nCollide--==0 ) return 0; + } + fts5PutU32((u8*)&aSlot[iSlot], (u32)iRowid); + }else{ + u64 *aSlot = (u64*)&pPg->p[8]; + while( aSlot[iSlot] ){ + iSlot = (iSlot + 1) % nSlot; + if( nCollide--==0 ) return 0; + } + fts5PutU64((u8*)&aSlot[iSlot], iRowid); + } + + return 0; +} + +/* +** This function attempts to build a new hash containing all the keys +** currently in the tombstone hash table for segment pSeg. The new +** hash will be stored in the nOut buffers passed in array apOut[]. +** All pages of the new hash use key-size szKey (4 or 8). +** +** Return 0 if the hash is successfully rebuilt into the nOut pages. +** Or non-zero if it is not (because one page became overfull). In this +** case the caller should retry with a larger nOut parameter. +** +** Parameter pData1 is page iPg1 of the hash table being rebuilt. +*/ +static int fts5IndexTombstoneRehash( + Fts5Index *p, + Fts5StructureSegment *pSeg, /* Segment to rebuild hash of */ + Fts5Data *pData1, /* One page of current hash - or NULL */ + int iPg1, /* Which page of the current hash is pData1 */ + int szKey, /* 4 or 8, the keysize */ + int nOut, /* Number of output pages */ + Fts5Data **apOut /* Array of output hash pages */ +){ + int ii; + int res = 0; + + /* Initialize the headers of all the output pages */ + for(ii=0; iip[0] = szKey; + fts5PutU32(&apOut[ii]->p[4], 0); + } + + /* Loop through the current pages of the hash table. */ + for(ii=0; res==0 && iinPgTombstone; ii++){ + Fts5Data *pData = 0; /* Page ii of the current hash table */ + Fts5Data *pFree = 0; /* Free this at the end of the loop */ + + if( iPg1==ii ){ + pData = pData1; + }else{ + pFree = pData = fts5DataRead(p, FTS5_TOMBSTONE_ROWID(pSeg->iSegid, ii)); + } + + if( pData ){ + int szKeyIn = TOMBSTONE_KEYSIZE(pData); + int nSlotIn = (pData->nn - 8) / szKeyIn; + int iIn; + for(iIn=0; iInp[8]; + if( aSlot[iIn] ) iVal = fts5GetU32((u8*)&aSlot[iIn]); + }else{ + u64 *aSlot = (u64*)&pData->p[8]; + if( aSlot[iIn] ) iVal = fts5GetU64((u8*)&aSlot[iIn]); + } + + /* If iVal is not 0 at this point, insert it into the new hash table */ + if( iVal ){ + Fts5Data *pPg = apOut[(iVal % nOut)]; + res = fts5IndexTombstoneAddToPage(pPg, 0, nOut, iVal); + if( res ) break; + } + } + + /* If this is page 0 of the old hash, copy the rowid-0-flag from the + ** old hash to the new. */ + if( ii==0 ){ + apOut[0]->p[1] = pData->p[1]; + } + } + fts5DataRelease(pFree); } + + return res; } /* -** Read and decode the "averages" record from the database. +** This is called to rebuild the hash table belonging to segment pSeg. +** If parameter pData1 is not NULL, then one page of the existing hash table +** has already been loaded - pData1, which is page iPg1. The key-size for +** the new hash table is szKey (4 or 8). ** -** Parameter anSize must point to an array of size nCol, where nCol is -** the number of user defined columns in the FTS table. +** If successful, the new hash table is not written to disk. Instead, +** output parameter (*pnOut) is set to the number of pages in the new +** hash table, and (*papOut) to point to an array of buffers containing +** the new page data. +** +** If an error occurs, an error code is left in the Fts5Index object and +** both output parameters set to 0 before returning. */ -int sqlite3Fts5IndexGetAverages(Fts5Index *p, i64 *pnRow, i64 *anSize){ - int nCol = p->pConfig->nCol; - Fts5Data *pData; +static void fts5IndexTombstoneRebuild( + Fts5Index *p, + Fts5StructureSegment *pSeg, /* Segment to rebuild hash of */ + Fts5Data *pData1, /* One page of current hash - or NULL */ + int iPg1, /* Which page of the current hash is pData1 */ + int szKey, /* 4 or 8, the keysize */ + int *pnOut, /* OUT: Number of output pages */ + Fts5Data ***papOut /* OUT: Output hash pages */ +){ + const int MINSLOT = 32; + int nSlotPerPage = MAX(MINSLOT, (p->pConfig->pgsz - 8) / szKey); + int nSlot = 0; /* Number of slots in each output page */ + int nOut = 0; - *pnRow = 0; - memset(anSize, 0, sizeof(i64) * nCol); - pData = fts5DataRead(p, FTS5_AVERAGES_ROWID); - if( p->rc==SQLITE_OK && pData->nn ){ - int i = 0; - int iCol; - i += fts5GetVarint(&pData->p[i], (u64*)pnRow); - for(iCol=0; inn && iColp[i], (u64*)&anSize[iCol]); + /* Figure out how many output pages (nOut) and how many slots per + ** page (nSlot). There are three possibilities: + ** + ** 1. The hash table does not yet exist. In this case the new hash + ** table will consist of a single page with MINSLOT slots. + ** + ** 2. The hash table exists but is currently a single page. In this + ** case an attempt is made to grow the page to accommodate the new + ** entry. The page is allowed to grow up to nSlotPerPage (see above) + ** slots. + ** + ** 3. The hash table already consists of more than one page, or of + ** a single page already so large that it cannot be grown. In this + ** case the new hash consists of (nPg*2+1) pages of nSlotPerPage + ** slots each, where nPg is the current number of pages in the + ** hash table. + */ + if( pSeg->nPgTombstone==0 ){ + /* Case 1. */ + nOut = 1; + nSlot = MINSLOT; + }else if( pSeg->nPgTombstone==1 ){ + /* Case 2. */ + int nElem = (int)fts5GetU32(&pData1->p[4]); + assert( pData1 && iPg1==0 ); + nOut = 1; + nSlot = MAX(nElem*4, MINSLOT); + if( nSlot>nSlotPerPage ) nOut = 0; + } + if( nOut==0 ){ + /* Case 3. */ + nOut = (pSeg->nPgTombstone * 2 + 1); + nSlot = nSlotPerPage; + } + + /* Allocate the required array and output pages */ + while( 1 ){ + int res = 0; + int ii = 0; + int szPage = 0; + Fts5Data **apOut = 0; + + /* Allocate space for the new hash table */ + assert( nSlot>=MINSLOT ); + apOut = (Fts5Data**)sqlite3Fts5MallocZero(&p->rc, sizeof(Fts5Data*) * nOut); + szPage = 8 + nSlot*szKey; + for(ii=0; iirc, + sizeof(Fts5Data)+szPage + ); + if( pNew ){ + pNew->nn = szPage; + pNew->p = (u8*)&pNew[1]; + apOut[ii] = pNew; + } } - } - fts5DataRelease(pData); - return fts5IndexReturn(p); + /* Rebuild the hash table. */ + if( p->rc==SQLITE_OK ){ + res = fts5IndexTombstoneRehash(p, pSeg, pData1, iPg1, szKey, nOut, apOut); + } + if( res==0 ){ + if( p->rc ){ + fts5IndexFreeArray(apOut, nOut); + apOut = 0; + nOut = 0; + } + *pnOut = nOut; + *papOut = apOut; + break; + } + + /* If control flows to here, it was not possible to rebuild the hash + ** table. Free all buffers and then try again with more pages. */ + assert( p->rc==SQLITE_OK ); + fts5IndexFreeArray(apOut, nOut); + nSlot = nSlotPerPage; + nOut = nOut*2 + 1; + } } -/* -** Replace the current "averages" record with the contents of the buffer -** supplied as the second argument. -*/ -int sqlite3Fts5IndexSetAverages(Fts5Index *p, const u8 *pData, int nData){ - assert( p->rc==SQLITE_OK ); - fts5DataWrite(p, FTS5_AVERAGES_ROWID, pData, nData); - return fts5IndexReturn(p); -} /* -** Return the total number of blocks this module has read from the %_data -** table since it was created. +** Add a tombstone for rowid iRowid to segment pSeg. */ -int sqlite3Fts5IndexReads(Fts5Index *p){ - return p->nRead; -} +static void fts5IndexTombstoneAdd( + Fts5Index *p, + Fts5StructureSegment *pSeg, + u64 iRowid +){ + Fts5Data *pPg = 0; + int iPg = -1; + int szKey = 0; + int nHash = 0; + Fts5Data **apHash = 0; + + p->nContentlessDelete++; + + if( pSeg->nPgTombstone>0 ){ + iPg = iRowid % pSeg->nPgTombstone; + pPg = fts5DataRead(p, FTS5_TOMBSTONE_ROWID(pSeg->iSegid,iPg)); + if( pPg==0 ){ + assert( p->rc!=SQLITE_OK ); + return; + } -/* -** Set the 32-bit cookie value stored at the start of all structure -** records to the value passed as the second argument. -** -** Return SQLITE_OK if successful, or an SQLite error code if an error -** occurs. -*/ -int sqlite3Fts5IndexSetCookie(Fts5Index *p, int iNew){ - int rc; /* Return code */ - Fts5Config *pConfig = p->pConfig; /* Configuration object */ - u8 aCookie[4]; /* Binary representation of iNew */ - sqlite3_blob *pBlob = 0; + if( 0==fts5IndexTombstoneAddToPage(pPg, 0, pSeg->nPgTombstone, iRowid) ){ + fts5DataWrite(p, FTS5_TOMBSTONE_ROWID(pSeg->iSegid,iPg), pPg->p, pPg->nn); + fts5DataRelease(pPg); + return; + } + } - assert( p->rc==SQLITE_OK ); - sqlite3Fts5Put32(aCookie, iNew); + /* Have to rebuild the hash table. First figure out the key-size (4 or 8). */ + szKey = pPg ? TOMBSTONE_KEYSIZE(pPg) : 4; + if( iRowid>0xFFFFFFFF ) szKey = 8; - rc = sqlite3_blob_open(pConfig->db, pConfig->zDb, p->zDataTbl, - "block", FTS5_STRUCTURE_ROWID, 1, &pBlob - ); - if( rc==SQLITE_OK ){ - sqlite3_blob_write(pBlob, aCookie, 4, 0); - rc = sqlite3_blob_close(pBlob); + /* Rebuild the hash table */ + fts5IndexTombstoneRebuild(p, pSeg, pPg, iPg, szKey, &nHash, &apHash); + assert( p->rc==SQLITE_OK || (nHash==0 && apHash==0) ); + + /* If all has succeeded, write the new rowid into one of the new hash + ** table pages, then write them all out to disk. */ + if( nHash ){ + int ii = 0; + fts5IndexTombstoneAddToPage(apHash[iRowid % nHash], 1, nHash, iRowid); + for(ii=0; iiiSegid, ii); + fts5DataWrite(p, iTombstoneRowid, apHash[ii]->p, apHash[ii]->nn); + } + pSeg->nPgTombstone = nHash; + fts5StructureWrite(p, p->pStruct); } - return rc; + fts5DataRelease(pPg); + fts5IndexFreeArray(apHash, nHash); } -int sqlite3Fts5IndexLoadConfig(Fts5Index *p){ +/* +** Add iRowid to the tombstone list of the segment or segments that contain +** rows from origin iOrigin. Return SQLITE_OK if successful, or an SQLite +** error code otherwise. +*/ +int sqlite3Fts5IndexContentlessDelete(Fts5Index *p, i64 iOrigin, i64 iRowid){ Fts5Structure *pStruct; pStruct = fts5StructureRead(p); - fts5StructureRelease(pStruct); + if( pStruct ){ + int bFound = 0; /* True after pSeg->nEntryTombstone incr. */ + int iLvl; + for(iLvl=pStruct->nLevel-1; iLvl>=0; iLvl--){ + int iSeg; + for(iSeg=pStruct->aLevel[iLvl].nSeg-1; iSeg>=0; iSeg--){ + Fts5StructureSegment *pSeg = &pStruct->aLevel[iLvl].aSeg[iSeg]; + if( pSeg->iOrigin1<=(u64)iOrigin && pSeg->iOrigin2>=(u64)iOrigin ){ + if( bFound==0 ){ + pSeg->nEntryTombstone++; + bFound = 1; + } + fts5IndexTombstoneAdd(p, pSeg, iRowid); + } + } + } + fts5StructureRelease(pStruct); + } return fts5IndexReturn(p); } - /************************************************************************* ************************************************************************** ** Below this point is the implementation of the integrity-check @@ -5860,7 +8225,9 @@ static int fts5QueryCksum( int eDetail = p->pConfig->eDetail; u64 cksum = *pCksum; Fts5IndexIter *pIter = 0; - int rc = sqlite3Fts5IndexQuery(p, z, n, flags, 0, &pIter); + int rc = sqlite3Fts5IndexQuery( + p, z, n, (flags | FTS5INDEX_QUERY_NOTOKENDATA), 0, &pIter + ); while( rc==SQLITE_OK && ALWAYS(pIter!=0) && 0==sqlite3Fts5IterEof(pIter) ){ i64 rowid = pIter->iRowid; @@ -5882,7 +8249,7 @@ static int fts5QueryCksum( rc = sqlite3Fts5IterNext(pIter); } } - sqlite3Fts5IterClose(pIter); + fts5IterClose(pIter); *pCksum = cksum; return rc; @@ -5923,19 +8290,27 @@ static int fts5TestUtf8(const char *z, int n){ /* ** This function is also purely an internal test. It does not contribute to ** FTS functionality, or even the integrity-check, in any way. +** +** This function sets output variable (*pbFail) to true if the test fails. Or +** leaves it unchanged if the test succeeds. */ static void fts5TestTerm( Fts5Index *p, Fts5Buffer *pPrev, /* Previous term */ const char *z, int n, /* Possibly new term to test */ u64 expected, - u64 *pCksum + u64 *pCksum, + int *pbFail ){ int rc = p->rc; if( pPrev->n==0 ){ fts5BufferSet(&rc, pPrev, n, (const u8*)z); }else - if( rc==SQLITE_OK && (pPrev->n!=n || memcmp(pPrev->p, z, n)) ){ + if( *pbFail==0 + && rc==SQLITE_OK + && (pPrev->n!=n || memcmp(pPrev->p, z, n)) + && (p->pHash==0 || p->pHash->nEntry==0) + ){ u64 cksum3 = *pCksum; const char *zTerm = (const char*)&pPrev->p[1]; /* term sans prefix-byte */ int nTerm = pPrev->n-1; /* Size of zTerm in bytes */ @@ -5985,7 +8360,7 @@ static void fts5TestTerm( fts5BufferSet(&rc, pPrev, n, (const u8*)z); if( rc==SQLITE_OK && cksum3!=expected ){ - rc = FTS5_CORRUPT; + *pbFail = 1; } *pCksum = cksum3; } @@ -5994,7 +8369,7 @@ static void fts5TestTerm( #else # define fts5TestDlidxReverse(x,y,z) -# define fts5TestTerm(u,v,w,x,y,z) +# define fts5TestTerm(t,u,v,w,x,y,z) #endif /* @@ -6019,15 +8394,18 @@ static void fts5IndexIntegrityCheckEmpty( for(i=iFirst; p->rc==SQLITE_OK && i<=iLast; i++){ Fts5Data *pLeaf = fts5DataRead(p, FTS5_SEGMENT_ROWID(pSeg->iSegid, i)); if( pLeaf ){ - if( !fts5LeafIsTermless(pLeaf) ) p->rc = FTS5_CORRUPT; - if( i>=iNoRowid && 0!=fts5LeafFirstRowidOff(pLeaf) ) p->rc = FTS5_CORRUPT; + if( !fts5LeafIsTermless(pLeaf) + || (i>=iNoRowid && 0!=fts5LeafFirstRowidOff(pLeaf)) + ){ + FTS5_CORRUPT_ROWID(p, FTS5_SEGMENT_ROWID(pSeg->iSegid, i)); + } } fts5DataRelease(pLeaf); } } -static void fts5IntegrityCheckPgidx(Fts5Index *p, Fts5Data *pLeaf){ - int iTermOff = 0; +static void fts5IntegrityCheckPgidx(Fts5Index *p, i64 iRowid, Fts5Data *pLeaf){ + i64 iTermOff = 0; int ii; Fts5Buffer buf1 = {0,0,0}; @@ -6036,7 +8414,7 @@ static void fts5IntegrityCheckPgidx(Fts5Index *p, Fts5Data *pLeaf){ ii = pLeaf->szLeaf; while( iinn && p->rc==SQLITE_OK ){ int res; - int iOff; + i64 iOff; int nIncr; ii += fts5GetVarint32(&pLeaf->p[ii], nIncr); @@ -6044,12 +8422,12 @@ static void fts5IntegrityCheckPgidx(Fts5Index *p, Fts5Data *pLeaf){ iOff = iTermOff; if( iOff>=pLeaf->szLeaf ){ - p->rc = FTS5_CORRUPT; + FTS5_CORRUPT_ROWID(p, iRowid); }else if( iTermOff==nIncr ){ int nByte; iOff += fts5GetVarint32(&pLeaf->p[iOff], nByte); if( (iOff+nByte)>pLeaf->szLeaf ){ - p->rc = FTS5_CORRUPT; + FTS5_CORRUPT_ROWID(p, iRowid); }else{ fts5BufferSet(&p->rc, &buf1, nByte, &pLeaf->p[iOff]); } @@ -6058,7 +8436,7 @@ static void fts5IntegrityCheckPgidx(Fts5Index *p, Fts5Data *pLeaf){ iOff += fts5GetVarint32(&pLeaf->p[iOff], nKeep); iOff += fts5GetVarint32(&pLeaf->p[iOff], nByte); if( nKeep>buf1.n || (iOff+nByte)>pLeaf->szLeaf ){ - p->rc = FTS5_CORRUPT; + FTS5_CORRUPT_ROWID(p, iRowid); }else{ buf1.n = nKeep; fts5BufferAppendBlob(&p->rc, &buf1, nByte, &pLeaf->p[iOff]); @@ -6066,7 +8444,7 @@ static void fts5IntegrityCheckPgidx(Fts5Index *p, Fts5Data *pLeaf){ if( p->rc==SQLITE_OK ){ res = fts5BufferCompare(&buf1, &buf2); - if( res<=0 ) p->rc = FTS5_CORRUPT; + if( res<=0 ) FTS5_CORRUPT_ROWID(p, iRowid); } } fts5BufferSet(&p->rc, &buf2, buf1.n, buf1.p); @@ -6081,6 +8459,7 @@ static void fts5IndexIntegrityCheckSegment( Fts5StructureSegment *pSeg /* Segment to check internal consistency */ ){ Fts5Config *pConfig = p->pConfig; + int bSecureDelete = (pConfig->iVersion==FTS5_CURRENT_VERSION_SECUREDELETE); sqlite3_stmt *pStmt = 0; int rc2; int iIdxPrevLeaf = pSeg->pgnoFirst-1; @@ -6116,7 +8495,19 @@ static void fts5IndexIntegrityCheckSegment( ** is also a rowid pointer within the leaf page header, it points to a ** location before the term. */ if( pLeaf->nn<=pLeaf->szLeaf ){ - p->rc = FTS5_CORRUPT; + + if( nIdxTerm==0 + && pConfig->iVersion==FTS5_CURRENT_VERSION_SECUREDELETE + && pLeaf->nn==pLeaf->szLeaf + && pLeaf->nn==4 + ){ + /* special case - the very first page in a segment keeps its %_idx + ** entry even if all the terms are removed from it by secure-delete + ** operations. */ + }else{ + FTS5_CORRUPT_ROWID(p, iRow); + } + }else{ int iOff; /* Offset of first term on leaf */ int iRowidOff; /* Offset of first rowid on leaf */ @@ -6126,15 +8517,15 @@ static void fts5IndexIntegrityCheckSegment( iOff = fts5LeafFirstTermOff(pLeaf); iRowidOff = fts5LeafFirstRowidOff(pLeaf); if( iRowidOff>=iOff || iOff>=pLeaf->szLeaf ){ - p->rc = FTS5_CORRUPT; + FTS5_CORRUPT_ROWID(p, iRow); }else{ iOff += fts5GetVarint32(&pLeaf->p[iOff], nTerm); res = fts5Memcmp(&pLeaf->p[iOff], zIdxTerm, MIN(nTerm, nIdxTerm)); if( res==0 ) res = nTerm - nIdxTerm; - if( res<0 ) p->rc = FTS5_CORRUPT; + if( res<0 ) FTS5_CORRUPT_ROWID(p, iRow); } - fts5IntegrityCheckPgidx(p, pLeaf); + fts5IntegrityCheckPgidx(p, iRow, pLeaf); } fts5DataRelease(pLeaf); if( p->rc ) break; @@ -6164,7 +8555,7 @@ static void fts5IndexIntegrityCheckSegment( iKey = FTS5_SEGMENT_ROWID(iSegid, iPg); pLeaf = fts5DataRead(p, iKey); if( pLeaf ){ - if( fts5LeafFirstRowidOff(pLeaf)!=0 ) p->rc = FTS5_CORRUPT; + if( fts5LeafFirstRowidOff(pLeaf)!=0 ) FTS5_CORRUPT_ROWID(p, iKey); fts5DataRelease(pLeaf); } } @@ -6179,10 +8570,13 @@ static void fts5IndexIntegrityCheckSegment( int iRowidOff = fts5LeafFirstRowidOff(pLeaf); ASSERT_SZLEAF_OK(pLeaf); if( iRowidOff>=pLeaf->szLeaf ){ - p->rc = FTS5_CORRUPT; - }else{ + FTS5_CORRUPT_ROWID(p, iKey); + }else if( bSecureDelete==0 || iRowidOff>0 ){ + i64 iDlRowid = fts5DlidxIterRowid(pDlidx); fts5GetVarint(&pLeaf->p[iRowidOff], (u64*)&iRowid); - if( iRowid!=fts5DlidxIterRowid(pDlidx) ) p->rc = FTS5_CORRUPT; + if( iRowidrc ) break; if( eDetail==FTS5_DETAIL_NONE ){ if( 0==fts5MultiIterIsEmpty(p, pIter) ){ @@ -6292,15 +8688,26 @@ int sqlite3Fts5IndexIntegrityCheck(Fts5Index *p, u64 cksum, int bUseCksum){ } } } - fts5TestTerm(p, &term, 0, 0, cksum2, &cksum3); + fts5TestTerm(p, &term, 0, 0, cksum2, &cksum3, &bTestFail); fts5MultiIterFree(pIter); - if( p->rc==SQLITE_OK && bUseCksum && cksum!=cksum2 ) p->rc = FTS5_CORRUPT; - - fts5StructureRelease(pStruct); + if( p->rc==SQLITE_OK && bUseCksum && cksum!=cksum2 ){ + p->rc = FTS5_CORRUPT; + sqlite3Fts5ConfigErrmsg(p->pConfig, + "fts5: checksum mismatch for table \"%s\"", p->pConfig->zName + ); + } #ifdef SQLITE_DEBUG + /* In SQLITE_DEBUG builds, expensive extra checks were run as part of + ** the integrity-check above. If no other errors were detected, but one + ** of these tests failed, set the result to SQLITE_CORRUPT_VTAB here. */ + if( p->rc==SQLITE_OK && bTestFail ){ + p->rc = FTS5_CORRUPT; + } fts5BufferFree(&term); #endif + + fts5StructureRelease(pStruct); fts5BufferFree(&poslist); return fts5IndexReturn(p); } @@ -6311,13 +8718,14 @@ int sqlite3Fts5IndexIntegrityCheck(Fts5Index *p, u64 cksum, int bUseCksum){ ** function only. */ -#ifdef SQLITE_TEST +#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG) /* ** Decode a segment-data rowid from the %_data table. This function is ** the opposite of macro FTS5_SEGMENT_ROWID(). */ static void fts5DecodeRowid( i64 iRowid, /* Rowid from %_data table */ + int *pbTombstone, /* OUT: Tombstone hash flag */ int *piSegid, /* OUT: Segment id */ int *pbDlidx, /* OUT: Dlidx flag */ int *piHeight, /* OUT: Height */ @@ -6333,13 +8741,16 @@ static void fts5DecodeRowid( iRowid >>= FTS5_DATA_DLI_B; *piSegid = (int)(iRowid & (((i64)1 << FTS5_DATA_ID_B) - 1)); + iRowid >>= FTS5_DATA_ID_B; + + *pbTombstone = (int)(iRowid & 0x0001); } -#endif /* SQLITE_TEST */ +#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */ -#ifdef SQLITE_TEST +#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG) static void fts5DebugRowid(int *pRc, Fts5Buffer *pBuf, i64 iKey){ - int iSegid, iHeight, iPgno, bDlidx; /* Rowid compenents */ - fts5DecodeRowid(iKey, &iSegid, &bDlidx, &iHeight, &iPgno); + int iSegid, iHeight, iPgno, bDlidx, bTomb; /* Rowid components */ + fts5DecodeRowid(iKey, &bTomb, &iSegid, &bDlidx, &iHeight, &iPgno); if( iSegid==0 ){ if( iKey==FTS5_AVERAGES_ROWID ){ @@ -6349,14 +8760,16 @@ static void fts5DebugRowid(int *pRc, Fts5Buffer *pBuf, i64 iKey){ } } else{ - sqlite3Fts5BufferAppendPrintf(pRc, pBuf, "{%ssegid=%d h=%d pgno=%d}", - bDlidx ? "dlidx " : "", iSegid, iHeight, iPgno + sqlite3Fts5BufferAppendPrintf(pRc, pBuf, "{%s%ssegid=%d h=%d pgno=%d}", + bDlidx ? "dlidx " : "", + bTomb ? "tombstone " : "", + iSegid, iHeight, iPgno ); } } -#endif /* SQLITE_TEST */ +#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */ -#ifdef SQLITE_TEST +#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG) static void fts5DebugStructure( int *pRc, /* IN/OUT: error code */ Fts5Buffer *pBuf, @@ -6371,16 +8784,22 @@ static void fts5DebugStructure( ); for(iSeg=0; iSegnSeg; iSeg++){ Fts5StructureSegment *pSeg = &pLvl->aSeg[iSeg]; - sqlite3Fts5BufferAppendPrintf(pRc, pBuf, " {id=%d leaves=%d..%d}", + sqlite3Fts5BufferAppendPrintf(pRc, pBuf, " {id=%d leaves=%d..%d", pSeg->iSegid, pSeg->pgnoFirst, pSeg->pgnoLast ); + if( pSeg->iOrigin1>0 ){ + sqlite3Fts5BufferAppendPrintf(pRc, pBuf, " origin=%lld..%lld", + pSeg->iOrigin1, pSeg->iOrigin2 + ); + } + sqlite3Fts5BufferAppendPrintf(pRc, pBuf, "}"); } sqlite3Fts5BufferAppendPrintf(pRc, pBuf, "}"); } } -#endif /* SQLITE_TEST */ +#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */ -#ifdef SQLITE_TEST +#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG) /* ** This is part of the fts5_decode() debugging aid. ** @@ -6405,9 +8824,9 @@ static void fts5DecodeStructure( fts5DebugStructure(pRc, pBuf, p); fts5StructureRelease(p); } -#endif /* SQLITE_TEST */ +#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */ -#ifdef SQLITE_TEST +#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG) /* ** This is part of the fts5_decode() debugging aid. ** @@ -6430,9 +8849,9 @@ static void fts5DecodeAverages( zSpace = " "; } } -#endif /* SQLITE_TEST */ +#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */ -#ifdef SQLITE_TEST +#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG) /* ** Buffer (a/n) is assumed to contain a list of serialized varints. Read ** each varint and append its string representation to buffer pBuf. Return @@ -6449,9 +8868,9 @@ static int fts5DecodePoslist(int *pRc, Fts5Buffer *pBuf, const u8 *a, int n){ } return iOff; } -#endif /* SQLITE_TEST */ +#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */ -#ifdef SQLITE_TEST +#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG) /* ** The start of buffer (a/n) contains the start of a doclist. The doclist ** may or may not finish within the buffer. This function appends a text @@ -6484,9 +8903,9 @@ static int fts5DecodeDoclist(int *pRc, Fts5Buffer *pBuf, const u8 *a, int n){ return iOff; } -#endif /* SQLITE_TEST */ +#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */ -#ifdef SQLITE_TEST +#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG) /* ** This function is part of the fts5_decode() debugging function. It is ** only ever used with detail=none tables. @@ -6527,9 +8946,27 @@ static void fts5DecodeRowidList( sqlite3Fts5BufferAppendPrintf(pRc, pBuf, " %lld%s", iRowid, zApp); } } -#endif /* SQLITE_TEST */ +#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */ + +#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG) +static void fts5BufferAppendTerm(int *pRc, Fts5Buffer *pBuf, Fts5Buffer *pTerm){ + int ii; + fts5BufferGrow(pRc, pBuf, pTerm->n*2 + 1); + if( *pRc==SQLITE_OK ){ + for(ii=0; iin; ii++){ + if( pTerm->p[ii]==0x00 ){ + pBuf->p[pBuf->n++] = '\\'; + pBuf->p[pBuf->n++] = '0'; + }else{ + pBuf->p[pBuf->n++] = pTerm->p[ii]; + } + } + pBuf->p[pBuf->n] = 0x00; + } +} +#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */ -#ifdef SQLITE_TEST +#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG) /* ** The implementation of user-defined scalar function fts5_decode(). */ @@ -6540,6 +8977,7 @@ static void fts5DecodeFunction( ){ i64 iRowid; /* Rowid for record being decoded */ int iSegid,iHeight,iPgno,bDlidx;/* Rowid components */ + int bTomb; const u8 *aBlob; int n; /* Record to decode */ u8 *a = 0; Fts5Buffer s; /* Build up text to return here */ @@ -6557,12 +8995,12 @@ static void fts5DecodeFunction( ** buffer overreads even if the record is corrupt. */ n = sqlite3_value_bytes(apVal[1]); aBlob = sqlite3_value_blob(apVal[1]); - nSpace = n + FTS5_DATA_ZERO_PADDING; + nSpace = ((i64)n) + FTS5_DATA_ZERO_PADDING; a = (u8*)sqlite3Fts5MallocZero(&rc, nSpace); if( a==0 ) goto decode_out; if( n>0 ) memcpy(a, aBlob, n); - fts5DecodeRowid(iRowid, &iSegid, &bDlidx, &iHeight, &iPgno); + fts5DecodeRowid(iRowid, &bTomb, &iSegid, &bDlidx, &iHeight, &iPgno); fts5DebugRowid(&rc, &s, iRowid); if( bDlidx ){ @@ -6581,6 +9019,28 @@ static void fts5DecodeFunction( " %d(%lld)", lvl.iLeafPgno, lvl.iRowid ); } + }else if( bTomb ){ + u32 nElem = fts5GetU32(&a[4]); + int szKey = (aBlob[0]==4 || aBlob[0]==8) ? aBlob[0] : 8; + int nSlot = (n - 8) / szKey; + int ii; + sqlite3Fts5BufferAppendPrintf(&rc, &s, " nElem=%d", (int)nElem); + if( aBlob[1] ){ + sqlite3Fts5BufferAppendPrintf(&rc, &s, " 0"); + } + for(ii=0; iiestimatedCost = (double)100; + pIdxInfo->estimatedRows = 100; + pIdxInfo->idxNum = 0; + for(i=0, p=pIdxInfo->aConstraint; inConstraint; i++, p++){ + if( p->usable==0 ) continue; + if( p->op==SQLITE_INDEX_CONSTRAINT_EQ && p->iColumn==11 ){ + rc = SQLITE_OK; + pIdxInfo->aConstraintUsage[i].omit = 1; + pIdxInfo->aConstraintUsage[i].argvIndex = 1; + break; + } + } + return rc; +} + +/* +** This method is the destructor for bytecodevtab objects. +*/ +static int fts5structDisconnectMethod(sqlite3_vtab *pVtab){ + Fts5StructVtab *p = (Fts5StructVtab*)pVtab; + sqlite3_free(p); + return SQLITE_OK; +} + +/* +** Constructor for a new bytecodevtab_cursor object. +*/ +static int fts5structOpenMethod(sqlite3_vtab *p, sqlite3_vtab_cursor **ppCsr){ + int rc = SQLITE_OK; + Fts5StructVcsr *pNew = 0; + + pNew = sqlite3Fts5MallocZero(&rc, sizeof(*pNew)); + *ppCsr = (sqlite3_vtab_cursor*)pNew; + + return SQLITE_OK; +} + +/* +** Destructor for a bytecodevtab_cursor. +*/ +static int fts5structCloseMethod(sqlite3_vtab_cursor *cur){ + Fts5StructVcsr *pCsr = (Fts5StructVcsr*)cur; + fts5StructureRelease(pCsr->pStruct); + sqlite3_free(pCsr); + return SQLITE_OK; +} + + +/* +** Advance a bytecodevtab_cursor to its next row of output. +*/ +static int fts5structNextMethod(sqlite3_vtab_cursor *cur){ + Fts5StructVcsr *pCsr = (Fts5StructVcsr*)cur; + Fts5Structure *p = pCsr->pStruct; + + assert( pCsr->pStruct ); + pCsr->iSeg++; + pCsr->iRowid++; + while( pCsr->iLevelnLevel && pCsr->iSeg>=p->aLevel[pCsr->iLevel].nSeg ){ + pCsr->iLevel++; + pCsr->iSeg = 0; + } + if( pCsr->iLevel>=p->nLevel ){ + fts5StructureRelease(pCsr->pStruct); + pCsr->pStruct = 0; + } + return SQLITE_OK; +} + +/* +** Return TRUE if the cursor has been moved off of the last +** row of output. +*/ +static int fts5structEofMethod(sqlite3_vtab_cursor *cur){ + Fts5StructVcsr *pCsr = (Fts5StructVcsr*)cur; + return pCsr->pStruct==0; +} + +static int fts5structRowidMethod( + sqlite3_vtab_cursor *cur, + sqlite_int64 *piRowid +){ + Fts5StructVcsr *pCsr = (Fts5StructVcsr*)cur; + *piRowid = pCsr->iRowid; + return SQLITE_OK; +} + +/* +** Return values of columns for the row at which the bytecodevtab_cursor +** is currently pointing. +*/ +static int fts5structColumnMethod( + sqlite3_vtab_cursor *cur, /* The cursor */ + sqlite3_context *ctx, /* First argument to sqlite3_result_...() */ + int i /* Which column to return */ +){ + Fts5StructVcsr *pCsr = (Fts5StructVcsr*)cur; + Fts5Structure *p = pCsr->pStruct; + Fts5StructureSegment *pSeg = &p->aLevel[pCsr->iLevel].aSeg[pCsr->iSeg]; + + switch( i ){ + case 0: /* level */ + sqlite3_result_int(ctx, pCsr->iLevel); + break; + case 1: /* segment */ + sqlite3_result_int(ctx, pCsr->iSeg); + break; + case 2: /* merge */ + sqlite3_result_int(ctx, pCsr->iSeg < p->aLevel[pCsr->iLevel].nMerge); + break; + case 3: /* segid */ + sqlite3_result_int(ctx, pSeg->iSegid); + break; + case 4: /* leaf1 */ + sqlite3_result_int(ctx, pSeg->pgnoFirst); + break; + case 5: /* leaf2 */ + sqlite3_result_int(ctx, pSeg->pgnoLast); + break; + case 6: /* origin1 */ + sqlite3_result_int64(ctx, pSeg->iOrigin1); + break; + case 7: /* origin2 */ + sqlite3_result_int64(ctx, pSeg->iOrigin2); + break; + case 8: /* npgtombstone */ + sqlite3_result_int(ctx, pSeg->nPgTombstone); + break; + case 9: /* nentrytombstone */ + sqlite3_result_int64(ctx, pSeg->nEntryTombstone); + break; + case 10: /* nentry */ + sqlite3_result_int64(ctx, pSeg->nEntry); + break; + } + return SQLITE_OK; +} + +/* +** Initialize a cursor. +** +** idxNum==0 means show all subprograms +** idxNum==1 means show only the main bytecode and omit subprograms. +*/ +static int fts5structFilterMethod( + sqlite3_vtab_cursor *pVtabCursor, + int idxNum, const char *idxStr, + int argc, sqlite3_value **argv +){ + Fts5StructVcsr *pCsr = (Fts5StructVcsr *)pVtabCursor; + int rc = SQLITE_OK; + + const u8 *aBlob = 0; + int nBlob = 0; + + assert( argc==1 ); + fts5StructureRelease(pCsr->pStruct); + pCsr->pStruct = 0; + + nBlob = sqlite3_value_bytes(argv[0]); + aBlob = (const u8*)sqlite3_value_blob(argv[0]); + rc = fts5StructureDecode(aBlob, nBlob, 0, &pCsr->pStruct); + if( rc==SQLITE_OK ){ + pCsr->iLevel = 0; + pCsr->iRowid = 0; + pCsr->iSeg = -1; + rc = fts5structNextMethod(pVtabCursor); + } + + return rc; +} + +#endif /* SQLITE_TEST || SQLITE_FTS5_DEBUG */ /* ** This is called as part of registering the FTS5 module with database @@ -6785,7 +9474,7 @@ static void fts5RowidFunction( ** SQLite error code is returned instead. */ int sqlite3Fts5IndexInit(sqlite3 *db){ -#ifdef SQLITE_TEST +#if defined(SQLITE_TEST) || defined(SQLITE_FTS5_DEBUG) int rc = sqlite3_create_function( db, "fts5_decode", 2, SQLITE_UTF8, 0, fts5DecodeFunction, 0, 0 ); @@ -6802,6 +9491,37 @@ int sqlite3Fts5IndexInit(sqlite3 *db){ db, "fts5_rowid", -1, SQLITE_UTF8, 0, fts5RowidFunction, 0, 0 ); } + + if( rc==SQLITE_OK ){ + static const sqlite3_module fts5structure_module = { + 0, /* iVersion */ + 0, /* xCreate */ + fts5structConnectMethod, /* xConnect */ + fts5structBestIndexMethod, /* xBestIndex */ + fts5structDisconnectMethod, /* xDisconnect */ + 0, /* xDestroy */ + fts5structOpenMethod, /* xOpen */ + fts5structCloseMethod, /* xClose */ + fts5structFilterMethod, /* xFilter */ + fts5structNextMethod, /* xNext */ + fts5structEofMethod, /* xEof */ + fts5structColumnMethod, /* xColumn */ + fts5structRowidMethod, /* xRowid */ + 0, /* xUpdate */ + 0, /* xBegin */ + 0, /* xSync */ + 0, /* xCommit */ + 0, /* xRollback */ + 0, /* xFindFunction */ + 0, /* xRename */ + 0, /* xSavepoint */ + 0, /* xRelease */ + 0, /* xRollbackTo */ + 0, /* xShadowName */ + 0 /* xIntegrity */ + }; + rc = sqlite3_create_module(db, "fts5_structure", &fts5structure_module, 0); + } return rc; #else return SQLITE_OK; diff --git a/ext/fts5/fts5_main.c b/ext/fts5/fts5_main.c index fd3a9066b7..f45b9ef906 100644 --- a/ext/fts5/fts5_main.c +++ b/ext/fts5/fts5_main.c @@ -83,8 +83,18 @@ struct Fts5Global { Fts5TokenizerModule *pTok; /* First in list of all tokenizer modules */ Fts5TokenizerModule *pDfltTok; /* Default tokenizer module */ Fts5Cursor *pCsr; /* First in list of all open cursors */ + u32 aLocaleHdr[4]; }; +/* +** Size of header on fts5_locale() values. And macro to access a buffer +** containing a copy of the header from an Fts5Config pointer. +*/ +#define FTS5_LOCALE_HDR_SIZE ((int)sizeof( ((Fts5Global*)0)->aLocaleHdr )) +#define FTS5_LOCALE_HDR(pConfig) ((const u8*)(pConfig->pGlobal->aLocaleHdr)) + +#define FTS5_INSTTOKEN_SUBTYPE 73 + /* ** Each auxiliary function registered with the FTS5 module is represented ** by an object of the following type. All such objects are stored as part @@ -103,11 +113,28 @@ struct Fts5Auxiliary { ** Each tokenizer module registered with the FTS5 module is represented ** by an object of the following type. All such objects are stored as part ** of the Fts5Global.pTok list. +** +** bV2Native: +** True if the tokenizer was registered using xCreateTokenizer_v2(), false +** for xCreateTokenizer(). If this variable is true, then x2 is populated +** with the routines as supplied by the caller and x1 contains synthesized +** wrapper routines. In this case the user-data pointer passed to +** x1.xCreate should be a pointer to the Fts5TokenizerModule structure, +** not a copy of pUserData. +** +** Of course, if bV2Native is false, then x1 contains the real routines and +** x2 the synthesized ones. In this case a pointer to the Fts5TokenizerModule +** object should be passed to x2.xCreate. +** +** The synthesized wrapper routines are necessary for xFindTokenizer(_v2) +** calls. */ struct Fts5TokenizerModule { char *zName; /* Name of tokenizer */ void *pUserData; /* User pointer passed to xCreate() */ - fts5_tokenizer x; /* Tokenizer functions */ + int bV2Native; /* True if v2 native tokenizer */ + fts5_tokenizer x1; /* Tokenizer functions */ + fts5_tokenizer_v2 x2; /* V2 tokenizer functions */ void (*xDestroy)(void*); /* Destructor function */ Fts5TokenizerModule *pNext; /* Next registered tokenizer module */ }; @@ -117,6 +144,8 @@ struct Fts5FullTable { Fts5Storage *pStorage; /* Document store */ Fts5Global *pGlobal; /* Global (connection wide) data */ Fts5Cursor *pSortCsr; /* Sort data from this cursor */ + int iSavepoint; /* Successful xSavepoint()+1 */ + #ifdef SQLITE_DEBUG struct Fts5TransactionState ts; #endif @@ -141,9 +170,11 @@ struct Fts5Sorter { i64 iRowid; /* Current rowid */ const u8 *aPoslist; /* Position lists for current row */ int nIdx; /* Number of entries in aIdx[] */ - int aIdx[1]; /* Offsets into aPoslist for current row */ + int aIdx[FLEXARRAY]; /* Offsets into aPoslist for current row */ }; +/* Size (int bytes) of an Fts5Sorter object with N indexes */ +#define SZ_FTS5SORTER(N) (offsetof(Fts5Sorter,nIdx)+((N+2)/2)*sizeof(i64)) /* ** Virtual-table cursor object. @@ -193,7 +224,7 @@ struct Fts5Cursor { Fts5Auxiliary *pAux; /* Currently executing extension function */ Fts5Auxdata *pAuxdata; /* First in linked list of saved aux-data */ - /* Cache used by auxiliary functions xInst() and xInstCount() */ + /* Cache used by auxiliary API functions xInst() and xInstCount() */ Fts5PoslistReader *aInstIter; /* One for each phrase */ int nInstAlloc; /* Size of aInst[] array (entries / 3) */ int nInstCount; /* Number of phrase instances */ @@ -260,7 +291,7 @@ static void fts5CheckTransactionState(Fts5FullTable *p, int op, int iSavepoint){ break; case FTS5_SYNC: - assert( p->ts.eState==1 ); + assert( p->ts.eState==1 || p->ts.eState==2 ); p->ts.eState = 2; break; @@ -275,21 +306,21 @@ static void fts5CheckTransactionState(Fts5FullTable *p, int op, int iSavepoint){ break; case FTS5_SAVEPOINT: - assert( p->ts.eState==1 ); + assert( p->ts.eState>=1 ); assert( iSavepoint>=0 ); assert( iSavepoint>=p->ts.iSavepoint ); p->ts.iSavepoint = iSavepoint; break; case FTS5_RELEASE: - assert( p->ts.eState==1 ); + assert( p->ts.eState>=1 ); assert( iSavepoint>=0 ); assert( iSavepoint<=p->ts.iSavepoint ); p->ts.iSavepoint = iSavepoint-1; break; case FTS5_ROLLBACKTO: - assert( p->ts.eState==1 ); + assert( p->ts.eState>=1 ); assert( iSavepoint>=-1 ); /* The following assert() can fail if another vtab strikes an error ** within an xSavepoint() call then SQLite calls xRollbackTo() - without @@ -304,10 +335,16 @@ static void fts5CheckTransactionState(Fts5FullTable *p, int op, int iSavepoint){ #endif /* -** Return true if pTab is a contentless table. +** Return true if pTab is a contentless table. If parameter bIncludeUnindexed +** is true, this includes contentless tables that store UNINDEXED columns +** only. */ -static int fts5IsContentless(Fts5FullTable *pTab){ - return pTab->p.pConfig->eContent==FTS5_CONTENT_NONE; +static int fts5IsContentless(Fts5FullTable *pTab, int bIncludeUnindexed){ + int eContent = pTab->p.pConfig->eContent; + return ( + eContent==FTS5_CONTENT_NONE + || (bIncludeUnindexed && eContent==FTS5_CONTENT_UNINDEXED) + ); } /* @@ -375,8 +412,12 @@ static int fts5InitVtab( assert( (rc==SQLITE_OK && *pzErr==0) || pConfig==0 ); } if( rc==SQLITE_OK ){ + pConfig->pzErrmsg = pzErr; pTab->p.pConfig = pConfig; pTab->pGlobal = pGlobal; + if( bCreate || sqlite3Fts5TokenizerPreload(&pConfig->t) ){ + rc = sqlite3Fts5LoadTokenizer(pConfig); + } } /* Open the index sub-system */ @@ -398,13 +439,17 @@ static int fts5InitVtab( /* Load the initial configuration */ if( rc==SQLITE_OK ){ - assert( pConfig->pzErrmsg==0 ); - pConfig->pzErrmsg = pzErr; - rc = sqlite3Fts5IndexLoadConfig(pTab->p.pIndex); - sqlite3Fts5IndexRollback(pTab->p.pIndex); - pConfig->pzErrmsg = 0; + rc = sqlite3Fts5ConfigLoad(pTab->p.pConfig, pTab->p.pConfig->iCookie-1); + } + + if( rc==SQLITE_OK && pConfig->eContent==FTS5_CONTENT_NORMAL ){ + rc = sqlite3_vtab_config(db, SQLITE_VTAB_CONSTRAINT_SUPPORT, (int)1); + } + if( rc==SQLITE_OK ){ + rc = sqlite3_vtab_config(db, SQLITE_VTAB_INNOCUOUS); } + if( pConfig ) pConfig->pzErrmsg = 0; if( rc!=SQLITE_OK ){ fts5FreeVtab(pTab); pTab = 0; @@ -466,16 +511,27 @@ static void fts5SetUniqueFlag(sqlite3_index_info *pIdxInfo){ #endif } +static void fts5SetEstimatedRows(sqlite3_index_info *pIdxInfo, i64 nRow){ +#if SQLITE_VERSION_NUMBER>=3008002 +#ifndef SQLITE_CORE + if( sqlite3_libversion_number()>=3008002 ) +#endif + { + pIdxInfo->estimatedRows = nRow; + } +#endif +} + static int fts5UsePatternMatch( Fts5Config *pConfig, struct sqlite3_index_constraint *p ){ assert( FTS5_PATTERN_GLOB==SQLITE_INDEX_CONSTRAINT_GLOB ); assert( FTS5_PATTERN_LIKE==SQLITE_INDEX_CONSTRAINT_LIKE ); - if( pConfig->ePattern==FTS5_PATTERN_GLOB && p->op==FTS5_PATTERN_GLOB ){ + if( pConfig->t.ePattern==FTS5_PATTERN_GLOB && p->op==FTS5_PATTERN_GLOB ){ return 1; } - if( pConfig->ePattern==FTS5_PATTERN_LIKE + if( pConfig->t.ePattern==FTS5_PATTERN_LIKE && (p->op==FTS5_PATTERN_LIKE || p->op==FTS5_PATTERN_GLOB) ){ return 1; @@ -522,10 +578,10 @@ static int fts5UsePatternMatch( ** This function ensures that there is at most one "r" or "=". And that if ** there exists an "=" then there is no "<" or ">". ** -** Costs are assigned as follows: +** If an unusable MATCH operator is present in the WHERE clause, then +** SQLITE_CONSTRAINT is returned. ** -** a) If an unusable MATCH operator is present in the WHERE clause, the -** cost is unconditionally set to 1e50 (a really big number). +** Costs are assigned as follows: ** ** a) If a MATCH operator is present, the cost depends on the other ** constraints also present. As follows: @@ -558,7 +614,7 @@ static int fts5BestIndexMethod(sqlite3_vtab *pVTab, sqlite3_index_info *pInfo){ int bSeenEq = 0; int bSeenGt = 0; int bSeenLt = 0; - int bSeenMatch = 0; + int nSeenMatch = 0; int bSeenRank = 0; @@ -589,21 +645,19 @@ static int fts5BestIndexMethod(sqlite3_vtab *pVTab, sqlite3_index_info *pInfo){ /* A MATCH operator or equivalent */ if( p->usable==0 || iCol<0 ){ /* As there exists an unusable MATCH constraint this is an - ** unusable plan. Set a prohibitively high cost. */ - pInfo->estimatedCost = 1e50; - assert( iIdxStr < pInfo->nConstraint*6 + 1 ); + ** unusable plan. Return SQLITE_CONSTRAINT. */ idxStr[iIdxStr] = 0; - return SQLITE_OK; + return SQLITE_CONSTRAINT; }else{ if( iCol==nCol+1 ){ if( bSeenRank ) continue; idxStr[iIdxStr++] = 'r'; bSeenRank = 1; - }else if( iCol>=0 ){ - bSeenMatch = 1; + }else{ + nSeenMatch++; idxStr[iIdxStr++] = 'M'; sqlite3_snprintf(6, &idxStr[iIdxStr], "%d", iCol); - idxStr += strlen(&idxStr[iIdxStr]); + iIdxStr += (int)strlen(&idxStr[iIdxStr]); assert( idxStr[iIdxStr]=='\0' ); } pInfo->aConstraintUsage[i].argvIndex = ++iCons; @@ -617,10 +671,12 @@ static int fts5BestIndexMethod(sqlite3_vtab *pVTab, sqlite3_index_info *pInfo){ idxStr += strlen(&idxStr[iIdxStr]); pInfo->aConstraintUsage[i].argvIndex = ++iCons; assert( idxStr[iIdxStr]=='\0' ); + nSeenMatch++; }else if( bSeenEq==0 && p->op==SQLITE_INDEX_CONSTRAINT_EQ && iCol<0 ){ idxStr[iIdxStr++] = '='; bSeenEq = 1; pInfo->aConstraintUsage[i].argvIndex = ++iCons; + pInfo->aConstraintUsage[i].omit = 1; } } } @@ -647,12 +703,15 @@ static int fts5BestIndexMethod(sqlite3_vtab *pVTab, sqlite3_index_info *pInfo){ } idxStr[iIdxStr] = '\0'; - /* Set idxFlags flags for the ORDER BY clause */ + /* Set idxFlags flags for the ORDER BY clause + ** + ** Note that tokendata=1 tables cannot currently handle "ORDER BY rowid DESC". + */ if( pInfo->nOrderBy==1 ){ int iSort = pInfo->aOrderBy[0].iColumn; - if( iSort==(pConfig->nCol+1) && bSeenMatch ){ + if( iSort==(pConfig->nCol+1) && nSeenMatch>0 ){ idxFlags |= FTS5_BI_ORDER_RANK; - }else if( iSort==-1 ){ + }else if( iSort==-1 && (!pInfo->aOrderBy[0].desc || !pConfig->bTokendata) ){ idxFlags |= FTS5_BI_ORDER_ROWID; } if( BitFlagTest(idxFlags, FTS5_BI_ORDER_RANK|FTS5_BI_ORDER_ROWID) ){ @@ -665,14 +724,21 @@ static int fts5BestIndexMethod(sqlite3_vtab *pVTab, sqlite3_index_info *pInfo){ /* Calculate the estimated cost based on the flags set in idxFlags. */ if( bSeenEq ){ - pInfo->estimatedCost = bSeenMatch ? 100.0 : 10.0; - if( bSeenMatch==0 ) fts5SetUniqueFlag(pInfo); - }else if( bSeenLt && bSeenGt ){ - pInfo->estimatedCost = bSeenMatch ? 500.0 : 250000.0; - }else if( bSeenLt || bSeenGt ){ - pInfo->estimatedCost = bSeenMatch ? 750.0 : 750000.0; + pInfo->estimatedCost = nSeenMatch ? 1000.0 : 25.0; + fts5SetUniqueFlag(pInfo); + fts5SetEstimatedRows(pInfo, 1); }else{ - pInfo->estimatedCost = bSeenMatch ? 1000.0 : 1000000.0; + if( bSeenLt && bSeenGt ){ + pInfo->estimatedCost = nSeenMatch ? 5000.0 : 750000.0; + }else if( bSeenLt || bSeenGt ){ + pInfo->estimatedCost = nSeenMatch ? 7500.0 : 2250000.0; + }else{ + pInfo->estimatedCost = nSeenMatch ? 10000.0 : 3000000.0; + } + for(i=1; iestimatedCost *= 0.4; + } + fts5SetEstimatedRows(pInfo, (i64)(pInfo->estimatedCost / 4.0)); } pInfo->idxNum = idxFlags; @@ -804,7 +870,7 @@ static int fts5SorterNext(Fts5Cursor *pCsr){ rc = sqlite3_step(pSorter->pStmt); if( rc==SQLITE_DONE ){ rc = SQLITE_OK; - CsrFlagSet(pCsr, FTS5CSR_EOF); + CsrFlagSet(pCsr, FTS5CSR_EOF|FTS5CSR_REQUIRE_CONTENT); }else if( rc==SQLITE_ROW ){ const u8 *a; const u8 *aBlob; @@ -871,7 +937,9 @@ static int fts5CursorReseek(Fts5Cursor *pCsr, int *pbSkip){ int bDesc = pCsr->bDesc; i64 iRowid = sqlite3Fts5ExprRowid(pCsr->pExpr); - rc = sqlite3Fts5ExprFirst(pCsr->pExpr, pTab->p.pIndex, iRowid, bDesc); + rc = sqlite3Fts5ExprFirst( + pCsr->pExpr, pTab->p.pIndex, iRowid, pCsr->iLastRowid, bDesc + ); if( rc==SQLITE_OK && iRowid!=sqlite3Fts5ExprRowid(pCsr->pExpr) ){ *pbSkip = 1; } @@ -904,6 +972,16 @@ static int fts5NextMethod(sqlite3_vtab_cursor *pCursor){ ); assert( !CsrFlagTest(pCsr, FTS5CSR_EOF) ); + /* If this cursor uses FTS5_PLAN_MATCH and this is a tokendata=1 table, + ** clear any token mappings accumulated at the fts5_index.c level. In + ** other cases, specifically FTS5_PLAN_SOURCE and FTS5_PLAN_SORTED_MATCH, + ** we need to retain the mappings for the entire query. */ + if( pCsr->ePlan==FTS5_PLAN_MATCH + && ((Fts5Table*)pCursor->pVtab)->pConfig->bTokendata + ){ + sqlite3Fts5ExprClearTokens(pCsr->pExpr); + } + if( pCsr->ePlan<3 ){ int bSkip = 0; if( (rc = fts5CursorReseek(pCsr, &bSkip)) || bSkip ) return rc; @@ -938,6 +1016,7 @@ static int fts5NextMethod(sqlite3_vtab_cursor *pCursor){ } }else{ rc = SQLITE_OK; + CsrFlagSet(pCsr, FTS5CSR_REQUIRE_DOCSIZE); } break; } @@ -967,7 +1046,7 @@ static int fts5PrepareStatement( rc = sqlite3_prepare_v3(pConfig->db, zSql, -1, SQLITE_PREPARE_PERSISTENT, &pRet, 0); if( rc!=SQLITE_OK ){ - *pConfig->pzErrmsg = sqlite3_mprintf("%s", sqlite3_errmsg(pConfig->db)); + sqlite3Fts5ConfigErrmsg(pConfig, "%s", sqlite3_errmsg(pConfig->db)); } sqlite3_free(zSql); } @@ -991,7 +1070,7 @@ static int fts5CursorFirstSorted( const char *zRankArgs = pCsr->zRankArgs; nPhrase = sqlite3Fts5ExprPhraseCount(pCsr->pExpr); - nByte = sizeof(Fts5Sorter) + sizeof(int) * (nPhrase-1); + nByte = SZ_FTS5SORTER(nPhrase); pSorter = (Fts5Sorter*)sqlite3_malloc64(nByte); if( pSorter==0 ) return SQLITE_NOMEM; memset(pSorter, 0, (size_t)nByte); @@ -1032,7 +1111,9 @@ static int fts5CursorFirstSorted( static int fts5CursorFirst(Fts5FullTable *pTab, Fts5Cursor *pCsr, int bDesc){ int rc; Fts5Expr *pExpr = pCsr->pExpr; - rc = sqlite3Fts5ExprFirst(pExpr, pTab->p.pIndex, pCsr->iFirstRowid, bDesc); + rc = sqlite3Fts5ExprFirst( + pExpr, pTab->p.pIndex, pCsr->iFirstRowid, pCsr->iLastRowid, bDesc + ); if( sqlite3Fts5ExprEof(pExpr) ){ CsrFlagSet(pCsr, FTS5CSR_EOF); } @@ -1191,6 +1272,145 @@ static i64 fts5GetRowidLimit(sqlite3_value *pVal, i64 iDefault){ return iDefault; } +/* +** Set the error message on the virtual table passed as the first argument. +*/ +static void fts5SetVtabError(Fts5FullTable *p, const char *zFormat, ...){ + va_list ap; /* ... printf arguments */ + va_start(ap, zFormat); + sqlite3_free(p->p.base.zErrMsg); + p->p.base.zErrMsg = sqlite3_vmprintf(zFormat, ap); + va_end(ap); +} + +/* +** Arrange for subsequent calls to sqlite3Fts5Tokenize() to use the locale +** specified by pLocale/nLocale. The buffer indicated by pLocale must remain +** valid until after the final call to sqlite3Fts5Tokenize() that will use +** the locale. +*/ +static void sqlite3Fts5SetLocale( + Fts5Config *pConfig, + const char *zLocale, + int nLocale +){ + Fts5TokenizerConfig *pT = &pConfig->t; + pT->pLocale = zLocale; + pT->nLocale = nLocale; +} + +/* +** Clear any locale configured by an earlier call to sqlite3Fts5SetLocale(). +*/ +void sqlite3Fts5ClearLocale(Fts5Config *pConfig){ + sqlite3Fts5SetLocale(pConfig, 0, 0); +} + +/* +** Return true if the value passed as the only argument is an +** fts5_locale() value. +*/ +int sqlite3Fts5IsLocaleValue(Fts5Config *pConfig, sqlite3_value *pVal){ + int ret = 0; + if( sqlite3_value_type(pVal)==SQLITE_BLOB ){ + /* Call sqlite3_value_bytes() after sqlite3_value_blob() in this case. + ** If the blob was created using zeroblob(), then sqlite3_value_blob() + ** may call malloc(). If this malloc() fails, then the values returned + ** by both value_blob() and value_bytes() will be 0. If value_bytes() were + ** called first, then the NULL pointer returned by value_blob() might + ** be dereferenced. */ + const u8 *pBlob = sqlite3_value_blob(pVal); + int nBlob = sqlite3_value_bytes(pVal); + if( nBlob>FTS5_LOCALE_HDR_SIZE + && 0==memcmp(pBlob, FTS5_LOCALE_HDR(pConfig), FTS5_LOCALE_HDR_SIZE) + ){ + ret = 1; + } + } + return ret; +} + +/* +** Value pVal is guaranteed to be an fts5_locale() value, according to +** sqlite3Fts5IsLocaleValue(). This function extracts the text and locale +** from the value and returns them separately. +** +** If successful, SQLITE_OK is returned and (*ppText) and (*ppLoc) set +** to point to buffers containing the text and locale, as utf-8, +** respectively. In this case output parameters (*pnText) and (*pnLoc) are +** set to the sizes in bytes of these two buffers. +** +** Or, if an error occurs, then an SQLite error code is returned. The final +** value of the four output parameters is undefined in this case. +*/ +int sqlite3Fts5DecodeLocaleValue( + sqlite3_value *pVal, + const char **ppText, + int *pnText, + const char **ppLoc, + int *pnLoc +){ + const char *p = sqlite3_value_blob(pVal); + int n = sqlite3_value_bytes(pVal); + int nLoc = 0; + + assert( sqlite3_value_type(pVal)==SQLITE_BLOB ); + assert( n>FTS5_LOCALE_HDR_SIZE ); + + for(nLoc=FTS5_LOCALE_HDR_SIZE; p[nLoc]; nLoc++){ + if( nLoc==(n-1) ){ + return SQLITE_MISMATCH; + } + } + *ppLoc = &p[FTS5_LOCALE_HDR_SIZE]; + *pnLoc = nLoc - FTS5_LOCALE_HDR_SIZE; + + *ppText = &p[nLoc+1]; + *pnText = n - nLoc - 1; + return SQLITE_OK; +} + +/* +** Argument pVal is the text of a full-text search expression. It may or +** may not have been wrapped by fts5_locale(). This function extracts +** the text of the expression, and sets output variable (*pzText) to +** point to a nul-terminated buffer containing the expression. +** +** If pVal was an fts5_locale() value, then sqlite3Fts5SetLocale() is called +** to set the tokenizer to use the specified locale. +** +** If output variable (*pbFreeAndReset) is set to true, then the caller +** is required to (a) call sqlite3Fts5ClearLocale() to reset the tokenizer +** locale, and (b) call sqlite3_free() to free (*pzText). +*/ +static int fts5ExtractExprText( + Fts5Config *pConfig, /* Fts5 configuration */ + sqlite3_value *pVal, /* Value to extract expression text from */ + char **pzText, /* OUT: nul-terminated buffer of text */ + int *pbFreeAndReset /* OUT: Free (*pzText) and clear locale */ +){ + int rc = SQLITE_OK; + + if( sqlite3Fts5IsLocaleValue(pConfig, pVal) ){ + const char *pText = 0; + int nText = 0; + const char *pLoc = 0; + int nLoc = 0; + rc = sqlite3Fts5DecodeLocaleValue(pVal, &pText, &nText, &pLoc, &nLoc); + *pzText = sqlite3Fts5Mprintf(&rc, "%.*s", nText, pText); + if( rc==SQLITE_OK ){ + sqlite3Fts5SetLocale(pConfig, pLoc, nLoc); + } + *pbFreeAndReset = 1; + }else{ + *pzText = (char*)sqlite3_value_text(pVal); + *pbFreeAndReset = 0; + } + + return rc; +} + + /* ** This is the xFilter interface for the virtual table. See ** the virtual table xFilter method documentation for additional @@ -1221,17 +1441,12 @@ static int fts5FilterMethod( sqlite3_value *pRowidGe = 0; /* rowid >= ? expression (or NULL) */ int iCol; /* Column on LHS of MATCH operator */ char **pzErrmsg = pConfig->pzErrmsg; + int bPrefixInsttoken = pConfig->bPrefixInsttoken; int i; int iIdxStr = 0; Fts5Expr *pExpr = 0; - if( pConfig->bLock ){ - pTab->p.base.zErrMsg = sqlite3_mprintf( - "recursively defined fts5 content table" - ); - return SQLITE_ERROR; - } - + assert( pConfig->bLock==0 ); if( pCsr->ePlan ){ fts5FreeCursorComponents(pCsr); memset(&pCsr->ePlan, 0, sizeof(Fts5Cursor) - ((u8*)&pCsr->ePlan-(u8*)pCsr)); @@ -1255,8 +1470,17 @@ static int fts5FilterMethod( pRank = apVal[i]; break; case 'M': { - const char *zText = (const char*)sqlite3_value_text(apVal[i]); + char *zText = 0; + int bFreeAndReset = 0; + int bInternal = 0; + + rc = fts5ExtractExprText(pConfig, apVal[i], &zText, &bFreeAndReset); + if( rc!=SQLITE_OK ) goto filter_out; if( zText==0 ) zText = ""; + if( sqlite3_value_subtype(apVal[i])==FTS5_INSTTOKEN_SUBTYPE ){ + pConfig->bPrefixInsttoken = 1; + } + iCol = 0; do{ iCol = iCol*10 + (idxStr[iIdxStr]-'0'); @@ -1268,7 +1492,7 @@ static int fts5FilterMethod( ** indicates that the MATCH expression is not a full text query, ** but a request for an internal parameter. */ rc = fts5SpecialMatch(pTab, pCsr, &zText[1]); - goto filter_out; + bInternal = 1; }else{ char **pzErr = &pTab->p.base.zErrMsg; rc = sqlite3Fts5ExprNew(pConfig, 0, iCol, zText, &pExpr, pzErr); @@ -1276,9 +1500,15 @@ static int fts5FilterMethod( rc = sqlite3Fts5ExprAnd(&pCsr->pExpr, pExpr); pExpr = 0; } - if( rc!=SQLITE_OK ) goto filter_out; } + if( bFreeAndReset ){ + sqlite3_free(zText); + sqlite3Fts5ClearLocale(pConfig); + } + + if( bInternal || rc!=SQLITE_OK ) goto filter_out; + break; } case 'L': @@ -1329,6 +1559,9 @@ static int fts5FilterMethod( pCsr->iFirstRowid = fts5GetRowidLimit(pRowidGe, SMALLEST_INT64); } + rc = sqlite3Fts5IndexLoadConfig(pTab->p.pIndex); + if( rc!=SQLITE_OK ) goto filter_out; + if( pTab->pSortCsr ){ /* If pSortCsr is non-NULL, then this call is being made as part of ** processing for a "... MATCH ORDER BY rank" query (ePlan is @@ -1351,6 +1584,7 @@ static int fts5FilterMethod( pCsr->pExpr = pTab->pSortCsr->pExpr; rc = fts5CursorFirst(pTab, pCsr, bDesc); }else if( pCsr->pExpr ){ + assert( rc==SQLITE_OK ); rc = fts5CursorParseRank(pConfig, pCsr, pRank); if( rc==SQLITE_OK ){ if( bOrderByRank ){ @@ -1362,9 +1596,7 @@ static int fts5FilterMethod( } } }else if( pConfig->zContent==0 ){ - *pConfig->pzErrmsg = sqlite3_mprintf( - "%s: table does not support scanning", pConfig->zName - ); + fts5SetVtabError(pTab,"%s: table does not support scanning",pConfig->zName); rc = SQLITE_ERROR; }else{ /* This is either a full-table scan (ePlan==FTS5_PLAN_SCAN) or a lookup @@ -1388,6 +1620,7 @@ static int fts5FilterMethod( filter_out: sqlite3Fts5ExprFree(pExpr); pConfig->pzErrmsg = pzErrmsg; + pConfig->bPrefixInsttoken = bPrefixInsttoken; return rc; } @@ -1407,9 +1640,13 @@ static i64 fts5CursorRowid(Fts5Cursor *pCsr){ assert( pCsr->ePlan==FTS5_PLAN_MATCH || pCsr->ePlan==FTS5_PLAN_SORTED_MATCH || pCsr->ePlan==FTS5_PLAN_SOURCE + || pCsr->ePlan==FTS5_PLAN_SCAN + || pCsr->ePlan==FTS5_PLAN_ROWID ); if( pCsr->pSorter ){ return pCsr->pSorter->iRowid; + }else if( pCsr->ePlan>=FTS5_PLAN_SCAN ){ + return sqlite3_column_int64(pCsr->pStmt, 0); }else{ return sqlite3Fts5ExprRowid(pCsr->pExpr); } @@ -1426,25 +1663,16 @@ static int fts5RowidMethod(sqlite3_vtab_cursor *pCursor, sqlite_int64 *pRowid){ int ePlan = pCsr->ePlan; assert( CsrFlagTest(pCsr, FTS5CSR_EOF)==0 ); - switch( ePlan ){ - case FTS5_PLAN_SPECIAL: - *pRowid = 0; - break; - - case FTS5_PLAN_SOURCE: - case FTS5_PLAN_MATCH: - case FTS5_PLAN_SORTED_MATCH: - *pRowid = fts5CursorRowid(pCsr); - break; - - default: - *pRowid = sqlite3_column_int64(pCsr->pStmt, 0); - break; + if( ePlan==FTS5_PLAN_SPECIAL ){ + *pRowid = 0; + }else{ + *pRowid = fts5CursorRowid(pCsr); } return SQLITE_OK; } + /* ** If the cursor requires seeking (bSeekRequired flag is set), seek it. ** Return SQLITE_OK if no error occurs, or an SQLite error code otherwise. @@ -1481,8 +1709,13 @@ static int fts5SeekCursor(Fts5Cursor *pCsr, int bErrormsg){ rc = sqlite3_reset(pCsr->pStmt); if( rc==SQLITE_OK ){ rc = FTS5_CORRUPT; + fts5SetVtabError((Fts5FullTable*)pTab, + "fts5: missing row %lld from content table %s", + fts5CursorRowid(pCsr), + pTab->pConfig->zContent + ); }else if( pTab->pConfig->pzErrmsg ){ - *pTab->pConfig->pzErrmsg = sqlite3_mprintf( + fts5SetVtabError((Fts5FullTable*)pTab, "%s", sqlite3_errmsg(pTab->pConfig->db) ); } @@ -1491,14 +1724,6 @@ static int fts5SeekCursor(Fts5Cursor *pCsr, int bErrormsg){ return rc; } -static void fts5SetVtabError(Fts5FullTable *p, const char *zFormat, ...){ - va_list ap; /* ... printf arguments */ - va_start(ap, zFormat); - assert( p->p.base.zErrMsg==0 ); - p->p.base.zErrMsg = sqlite3_vmprintf(zFormat, ap); - va_end(ap); -} - /* ** This function is called to handle an FTS INSERT command. In other words, ** an INSERT statement of the form: @@ -1522,6 +1747,7 @@ static int fts5SpecialInsert( Fts5Config *pConfig = pTab->p.pConfig; int rc = SQLITE_OK; int bError = 0; + int bLoadConfig = 0; if( 0==sqlite3_stricmp("delete-all", zCmd) ){ if( pConfig->eContent==FTS5_CONTENT_NORMAL ){ @@ -1533,8 +1759,9 @@ static int fts5SpecialInsert( }else{ rc = sqlite3Fts5StorageDeleteAll(pTab->pStorage); } + bLoadConfig = 1; }else if( 0==sqlite3_stricmp("rebuild", zCmd) ){ - if( pConfig->eContent==FTS5_CONTENT_NONE ){ + if( fts5IsContentless(pTab, 1) ){ fts5SetVtabError(pTab, "'rebuild' may not be used with a contentless fts5 table" ); @@ -1542,6 +1769,7 @@ static int fts5SpecialInsert( }else{ rc = sqlite3Fts5StorageRebuild(pTab->pStorage); } + bLoadConfig = 1; }else if( 0==sqlite3_stricmp("optimize", zCmd) ){ rc = sqlite3Fts5StorageOptimize(pTab->pStorage); }else if( 0==sqlite3_stricmp("merge", zCmd) ){ @@ -1554,8 +1782,13 @@ static int fts5SpecialInsert( }else if( 0==sqlite3_stricmp("prefix-index", zCmd) ){ pConfig->bPrefixIndex = sqlite3_value_int(pVal); #endif + }else if( 0==sqlite3_stricmp("flush", zCmd) ){ + rc = sqlite3Fts5FlushToDisk(&pTab->p); }else{ - rc = sqlite3Fts5IndexLoadConfig(pTab->p.pIndex); + rc = sqlite3Fts5FlushToDisk(&pTab->p); + if( rc==SQLITE_OK ){ + rc = sqlite3Fts5IndexLoadConfig(pTab->p.pIndex); + } if( rc==SQLITE_OK ){ rc = sqlite3Fts5ConfigSetValue(pTab->p.pConfig, zCmd, pVal, &bError); } @@ -1567,6 +1800,12 @@ static int fts5SpecialInsert( } } } + + if( rc==SQLITE_OK && bLoadConfig ){ + pTab->p.pConfig->iCookie--; + rc = sqlite3Fts5IndexLoadConfig(pTab->p.pIndex); + } + return rc; } @@ -1578,7 +1817,7 @@ static int fts5SpecialDelete( int eType1 = sqlite3_value_type(apVal[1]); if( eType1==SQLITE_INTEGER ){ sqlite3_int64 iDel = sqlite3_value_int64(apVal[1]); - rc = sqlite3Fts5StorageDelete(pTab->pStorage, iDel, &apVal[2]); + rc = sqlite3Fts5StorageDelete(pTab->pStorage, iDel, &apVal[2], 0); } return rc; } @@ -1591,7 +1830,7 @@ static void fts5StorageInsert( ){ int rc = *pRc; if( rc==SQLITE_OK ){ - rc = sqlite3Fts5StorageContentInsert(pTab->pStorage, apVal, piRowid); + rc = sqlite3Fts5StorageContentInsert(pTab->pStorage, 0, apVal, piRowid); } if( rc==SQLITE_OK ){ rc = sqlite3Fts5StorageIndexInsert(pTab->pStorage, apVal, *piRowid); @@ -1599,6 +1838,67 @@ static void fts5StorageInsert( *pRc = rc; } +/* +** +** This function is called when the user attempts an UPDATE on a contentless +** table. Parameter bRowidModified is true if the UPDATE statement modifies +** the rowid value. Parameter apVal[] contains the new values for each user +** defined column of the fts5 table. pConfig is the configuration object of the +** table being updated (guaranteed to be contentless). The contentless_delete=1 +** and contentless_unindexed=1 options may or may not be set. +** +** This function returns SQLITE_OK if the UPDATE can go ahead, or an SQLite +** error code if it cannot. In this case an error message is also loaded into +** pConfig. Output parameter (*pbContent) is set to true if the caller should +** update the %_content table only - not the FTS index or any other shadow +** table. This occurs when an UPDATE modifies only UNINDEXED columns of the +** table. +** +** An UPDATE may proceed if: +** +** * The only columns modified are UNINDEXED columns, or +** +** * The contentless_delete=1 option was specified and all of the indexed +** columns (not a subset) have been modified. +*/ +static int fts5ContentlessUpdate( + Fts5Config *pConfig, + sqlite3_value **apVal, + int bRowidModified, + int *pbContent +){ + int ii; + int bSeenIndex = 0; /* Have seen modified indexed column */ + int bSeenIndexNC = 0; /* Have seen unmodified indexed column */ + int rc = SQLITE_OK; + + for(ii=0; iinCol; ii++){ + if( pConfig->abUnindexed[ii]==0 ){ + if( sqlite3_value_nochange(apVal[ii]) ){ + bSeenIndexNC++; + }else{ + bSeenIndex++; + } + } + } + + if( bSeenIndex==0 && bRowidModified==0 ){ + *pbContent = 1; + }else{ + if( bSeenIndexNC || pConfig->bContentlessDelete==0 ){ + rc = SQLITE_ERROR; + sqlite3Fts5ConfigErrmsg(pConfig, + (pConfig->bContentlessDelete ? + "%s a subset of columns on fts5 contentless-delete table: %s" : + "%s contentless fts5 table: %s") + , "cannot UPDATE", pConfig->zName + ); + } + } + + return rc; +} + /* ** This function is the implementation of the xUpdate callback used by ** FTS3 virtual tables. It is invoked by SQLite each time a row is to be @@ -1625,7 +1925,7 @@ static int fts5UpdateMethod( int rc = SQLITE_OK; /* Return code */ /* A transaction must be open when this is called. */ - assert( pTab->ts.eState==1 ); + assert( pTab->ts.eState==1 || pTab->ts.eState==2 ); assert( pVtab->zErrMsg==0 ); assert( nArg==1 || nArg==(2+pConfig->nCol+2) ); @@ -1633,6 +1933,11 @@ static int fts5UpdateMethod( || sqlite3_value_type(apVal[0])==SQLITE_NULL ); assert( pTab->p.pConfig->pzErrmsg==0 ); + if( pConfig->pgsz==0 ){ + rc = sqlite3Fts5ConfigLoad(pTab->p.pConfig, pTab->p.pConfig->iCookie); + if( rc!=SQLITE_OK ) return rc; + } + pTab->p.pConfig->pzErrmsg = &pTab->p.base.zErrMsg; /* Put any active cursors into REQUIRE_SEEK state. */ @@ -1647,7 +1952,14 @@ static int fts5UpdateMethod( if( pConfig->eContent!=FTS5_CONTENT_NORMAL && 0==sqlite3_stricmp("delete", z) ){ - rc = fts5SpecialDelete(pTab, apVal); + if( pConfig->bContentlessDelete ){ + fts5SetVtabError(pTab, + "'delete' may not be used with a contentless_delete=1 table" + ); + rc = SQLITE_ERROR; + }else{ + rc = fts5SpecialDelete(pTab, apVal); + } }else{ rc = fts5SpecialInsert(pTab, z, apVal[2 + pConfig->nCol + 1]); } @@ -1664,74 +1976,111 @@ static int fts5UpdateMethod( ** Cases 3 and 4 may violate the rowid constraint. */ int eConflict = SQLITE_ABORT; - if( pConfig->eContent==FTS5_CONTENT_NORMAL ){ + if( pConfig->eContent==FTS5_CONTENT_NORMAL || pConfig->bContentlessDelete ){ eConflict = sqlite3_vtab_on_conflict(pConfig->db); } assert( eType0==SQLITE_INTEGER || eType0==SQLITE_NULL ); assert( nArg!=1 || eType0==SQLITE_INTEGER ); - /* Filter out attempts to run UPDATE or DELETE on contentless tables. - ** This is not suported. */ - if( eType0==SQLITE_INTEGER && fts5IsContentless(pTab) ){ - pTab->p.base.zErrMsg = sqlite3_mprintf( - "cannot %s contentless fts5 table: %s", - (nArg>1 ? "UPDATE" : "DELETE from"), pConfig->zName - ); - rc = SQLITE_ERROR; - } - /* DELETE */ - else if( nArg==1 ){ - i64 iDel = sqlite3_value_int64(apVal[0]); /* Rowid to delete */ - rc = sqlite3Fts5StorageDelete(pTab->pStorage, iDel, 0); + if( nArg==1 ){ + /* It is only possible to DELETE from a contentless table if the + ** contentless_delete=1 flag is set. */ + if( fts5IsContentless(pTab, 1) && pConfig->bContentlessDelete==0 ){ + fts5SetVtabError(pTab, + "cannot DELETE from contentless fts5 table: %s", pConfig->zName + ); + rc = SQLITE_ERROR; + }else{ + i64 iDel = sqlite3_value_int64(apVal[0]); /* Rowid to delete */ + rc = sqlite3Fts5StorageDelete(pTab->pStorage, iDel, 0, 0); + } } /* INSERT or UPDATE */ else{ int eType1 = sqlite3_value_numeric_type(apVal[1]); - if( eType1!=SQLITE_INTEGER && eType1!=SQLITE_NULL ){ - rc = SQLITE_MISMATCH; + /* It is an error to write an fts5_locale() value to a table without + ** the locale=1 option. */ + if( pConfig->bLocale==0 ){ + int ii; + for(ii=0; iinCol; ii++){ + sqlite3_value *pVal = apVal[ii+2]; + if( sqlite3Fts5IsLocaleValue(pConfig, pVal) ){ + fts5SetVtabError(pTab, "fts5_locale() requires locale=1"); + rc = SQLITE_MISMATCH; + goto update_out; + } + } } - else if( eType0!=SQLITE_INTEGER ){ - /* If this is a REPLACE, first remove the current entry (if any) */ + if( eType0!=SQLITE_INTEGER ){ + /* An INSERT statement. If the conflict-mode is REPLACE, first remove + ** the current entry (if any). */ if( eConflict==SQLITE_REPLACE && eType1==SQLITE_INTEGER ){ i64 iNew = sqlite3_value_int64(apVal[1]); /* Rowid to delete */ - rc = sqlite3Fts5StorageDelete(pTab->pStorage, iNew, 0); + rc = sqlite3Fts5StorageDelete(pTab->pStorage, iNew, 0, 0); } fts5StorageInsert(&rc, pTab, apVal, pRowid); } /* UPDATE */ else{ + Fts5Storage *pStorage = pTab->pStorage; i64 iOld = sqlite3_value_int64(apVal[0]); /* Old rowid */ i64 iNew = sqlite3_value_int64(apVal[1]); /* New rowid */ - if( eType1==SQLITE_INTEGER && iOld!=iNew ){ + int bContent = 0; /* Content only update */ + + /* If this is a contentless table (including contentless_unindexed=1 + ** tables), check if the UPDATE may proceed. */ + if( fts5IsContentless(pTab, 1) ){ + rc = fts5ContentlessUpdate(pConfig, &apVal[2], iOld!=iNew, &bContent); + if( rc!=SQLITE_OK ) goto update_out; + } + + if( eType1!=SQLITE_INTEGER ){ + rc = SQLITE_MISMATCH; + }else if( iOld!=iNew ){ + assert( bContent==0 ); if( eConflict==SQLITE_REPLACE ){ - rc = sqlite3Fts5StorageDelete(pTab->pStorage, iOld, 0); + rc = sqlite3Fts5StorageDelete(pStorage, iOld, 0, 1); if( rc==SQLITE_OK ){ - rc = sqlite3Fts5StorageDelete(pTab->pStorage, iNew, 0); + rc = sqlite3Fts5StorageDelete(pStorage, iNew, 0, 0); } fts5StorageInsert(&rc, pTab, apVal, pRowid); }else{ - rc = sqlite3Fts5StorageContentInsert(pTab->pStorage, apVal, pRowid); + rc = sqlite3Fts5StorageFindDeleteRow(pStorage, iOld); + if( rc==SQLITE_OK ){ + rc = sqlite3Fts5StorageContentInsert(pStorage, 0, apVal, pRowid); + } if( rc==SQLITE_OK ){ - rc = sqlite3Fts5StorageDelete(pTab->pStorage, iOld, 0); + rc = sqlite3Fts5StorageDelete(pStorage, iOld, 0, 0); } if( rc==SQLITE_OK ){ - rc = sqlite3Fts5StorageIndexInsert(pTab->pStorage, apVal,*pRowid); + rc = sqlite3Fts5StorageIndexInsert(pStorage, apVal, *pRowid); } } + }else if( bContent ){ + /* This occurs when an UPDATE on a contentless table affects *only* + ** UNINDEXED columns. This is a no-op for contentless_unindexed=0 + ** tables, or a write to the %_content table only for =1 tables. */ + assert( fts5IsContentless(pTab, 1) ); + rc = sqlite3Fts5StorageFindDeleteRow(pStorage, iOld); + if( rc==SQLITE_OK ){ + rc = sqlite3Fts5StorageContentInsert(pStorage, 1, apVal, pRowid); + } }else{ - rc = sqlite3Fts5StorageDelete(pTab->pStorage, iOld, 0); + rc = sqlite3Fts5StorageDelete(pStorage, iOld, 0, 1); fts5StorageInsert(&rc, pTab, apVal, pRowid); } + sqlite3Fts5StorageReleaseDeleteRow(pStorage); } } } + update_out: pTab->p.pConfig->pzErrmsg = 0; return rc; } @@ -1744,8 +2093,7 @@ static int fts5SyncMethod(sqlite3_vtab *pVtab){ Fts5FullTable *pTab = (Fts5FullTable*)pVtab; fts5CheckTransactionState(pTab, FTS5_SYNC, 0); pTab->p.pConfig->pzErrmsg = &pTab->p.base.zErrMsg; - fts5TripCursors(pTab); - rc = sqlite3Fts5StorageSync(pTab->pStorage); + rc = sqlite3Fts5FlushToDisk(&pTab->p); pTab->p.pConfig->pzErrmsg = 0; return rc; } @@ -1754,9 +2102,11 @@ static int fts5SyncMethod(sqlite3_vtab *pVtab){ ** Implementation of xBegin() method. */ static int fts5BeginMethod(sqlite3_vtab *pVtab){ - fts5CheckTransactionState((Fts5FullTable*)pVtab, FTS5_BEGIN, 0); - fts5NewTransaction((Fts5FullTable*)pVtab); - return SQLITE_OK; + int rc = fts5NewTransaction((Fts5FullTable*)pVtab); + if( rc==SQLITE_OK ){ + fts5CheckTransactionState((Fts5FullTable*)pVtab, FTS5_BEGIN, 0); + } + return rc; } /* @@ -1779,6 +2129,7 @@ static int fts5RollbackMethod(sqlite3_vtab *pVtab){ Fts5FullTable *pTab = (Fts5FullTable*)pVtab; fts5CheckTransactionState(pTab, FTS5_ROLLBACK, 0); rc = sqlite3Fts5StorageRollback(pTab->pStorage); + pTab->p.pConfig->pgsz = 0; return rc; } @@ -1810,17 +2161,40 @@ static int fts5ApiRowCount(Fts5Context *pCtx, i64 *pnRow){ return sqlite3Fts5StorageRowCount(pTab->pStorage, pnRow); } -static int fts5ApiTokenize( +/* +** Implementation of xTokenize_v2() API. +*/ +static int fts5ApiTokenize_v2( Fts5Context *pCtx, const char *pText, int nText, + const char *pLoc, int nLoc, void *pUserData, int (*xToken)(void*, int, const char*, int, int, int) ){ Fts5Cursor *pCsr = (Fts5Cursor*)pCtx; Fts5Table *pTab = (Fts5Table*)(pCsr->base.pVtab); - return sqlite3Fts5Tokenize( - pTab->pConfig, FTS5_TOKENIZE_AUX, pText, nText, pUserData, xToken + int rc = SQLITE_OK; + + sqlite3Fts5SetLocale(pTab->pConfig, pLoc, nLoc); + rc = sqlite3Fts5Tokenize(pTab->pConfig, + FTS5_TOKENIZE_AUX, pText, nText, pUserData, xToken ); + sqlite3Fts5SetLocale(pTab->pConfig, 0, 0); + + return rc; +} + +/* +** Implementation of xTokenize() API. This is just xTokenize_v2() with NULL/0 +** passed as the locale. +*/ +static int fts5ApiTokenize( + Fts5Context *pCtx, + const char *pText, int nText, + void *pUserData, + int (*xToken)(void*, int, const char*, int, int, int) +){ + return fts5ApiTokenize_v2(pCtx, pText, nText, 0, 0, pUserData, xToken); } static int fts5ApiPhraseCount(Fts5Context *pCtx){ @@ -1833,6 +2207,49 @@ static int fts5ApiPhraseSize(Fts5Context *pCtx, int iPhrase){ return sqlite3Fts5ExprPhraseSize(pCsr->pExpr, iPhrase); } +/* +** Argument pStmt is an SQL statement of the type used by Fts5Cursor. This +** function extracts the text value of column iCol of the current row. +** Additionally, if there is an associated locale, it invokes +** sqlite3Fts5SetLocale() to configure the tokenizer. In all cases the caller +** should invoke sqlite3Fts5ClearLocale() to clear the locale at some point +** after this function returns. +** +** If successful, (*ppText) is set to point to a buffer containing the text +** value as utf-8 and SQLITE_OK returned. (*pnText) is set to the size of that +** buffer in bytes. It is not guaranteed to be nul-terminated. If an error +** occurs, an SQLite error code is returned. The final values of the two +** output parameters are undefined in this case. +*/ +static int fts5TextFromStmt( + Fts5Config *pConfig, + sqlite3_stmt *pStmt, + int iCol, + const char **ppText, + int *pnText +){ + sqlite3_value *pVal = sqlite3_column_value(pStmt, iCol+1); + const char *pLoc = 0; + int nLoc = 0; + int rc = SQLITE_OK; + + if( pConfig->bLocale + && pConfig->eContent==FTS5_CONTENT_EXTERNAL + && sqlite3Fts5IsLocaleValue(pConfig, pVal) + ){ + rc = sqlite3Fts5DecodeLocaleValue(pVal, ppText, pnText, &pLoc, &nLoc); + }else{ + *ppText = (const char*)sqlite3_value_text(pVal); + *pnText = sqlite3_value_bytes(pVal); + if( pConfig->bLocale && pConfig->eContent==FTS5_CONTENT_NORMAL ){ + pLoc = (const char*)sqlite3_column_text(pStmt, iCol+1+pConfig->nCol); + nLoc = sqlite3_column_bytes(pStmt, iCol+1+pConfig->nCol); + } + } + sqlite3Fts5SetLocale(pConfig, pLoc, nLoc); + return rc; +} + static int fts5ApiColumnText( Fts5Context *pCtx, int iCol, @@ -1841,46 +2258,69 @@ static int fts5ApiColumnText( ){ int rc = SQLITE_OK; Fts5Cursor *pCsr = (Fts5Cursor*)pCtx; - if( fts5IsContentless((Fts5FullTable*)(pCsr->base.pVtab)) - || pCsr->ePlan==FTS5_PLAN_SPECIAL - ){ + Fts5Table *pTab = (Fts5Table*)(pCsr->base.pVtab); + + assert( pCsr->ePlan!=FTS5_PLAN_SPECIAL ); + if( iCol<0 || iCol>=pTab->pConfig->nCol ){ + rc = SQLITE_RANGE; + }else if( fts5IsContentless((Fts5FullTable*)(pCsr->base.pVtab), 0) ){ *pz = 0; *pn = 0; }else{ rc = fts5SeekCursor(pCsr, 0); if( rc==SQLITE_OK ){ - *pz = (const char*)sqlite3_column_text(pCsr->pStmt, iCol+1); - *pn = sqlite3_column_bytes(pCsr->pStmt, iCol+1); + rc = fts5TextFromStmt(pTab->pConfig, pCsr->pStmt, iCol, pz, pn); + sqlite3Fts5ClearLocale(pTab->pConfig); } } return rc; } +/* +** This is called by various API functions - xInst, xPhraseFirst, +** xPhraseFirstColumn etc. - to obtain the position list for phrase iPhrase +** of the current row. This function works for both detail=full tables (in +** which case the position-list was read from the fts index) or for other +** detail= modes if the row content is available. +*/ static int fts5CsrPoslist( - Fts5Cursor *pCsr, - int iPhrase, - const u8 **pa, - int *pn + Fts5Cursor *pCsr, /* Fts5 cursor object */ + int iPhrase, /* Phrase to find position list for */ + const u8 **pa, /* OUT: Pointer to position list buffer */ + int *pn /* OUT: Size of (*pa) in bytes */ ){ Fts5Config *pConfig = ((Fts5Table*)(pCsr->base.pVtab))->pConfig; int rc = SQLITE_OK; int bLive = (pCsr->pSorter==0); - if( CsrFlagTest(pCsr, FTS5CSR_REQUIRE_POSLIST) ){ - + if( iPhrase<0 || iPhrase>=sqlite3Fts5ExprPhraseCount(pCsr->pExpr) ){ + rc = SQLITE_RANGE; + }else if( pConfig->eDetail!=FTS5_DETAIL_FULL + && fts5IsContentless((Fts5FullTable*)pCsr->base.pVtab, 1) + ){ + *pa = 0; + *pn = 0; + return SQLITE_OK; + }else if( CsrFlagTest(pCsr, FTS5CSR_REQUIRE_POSLIST) ){ if( pConfig->eDetail!=FTS5_DETAIL_FULL ){ Fts5PoslistPopulator *aPopulator; int i; + aPopulator = sqlite3Fts5ExprClearPoslists(pCsr->pExpr, bLive); if( aPopulator==0 ) rc = SQLITE_NOMEM; + if( rc==SQLITE_OK ){ + rc = fts5SeekCursor(pCsr, 0); + } for(i=0; inCol && rc==SQLITE_OK; i++){ - int n; const char *z; - rc = fts5ApiColumnText((Fts5Context*)pCsr, i, &z, &n); + const char *z = 0; + int n = 0; + rc = fts5TextFromStmt(pConfig, pCsr->pStmt, i, &z, &n); if( rc==SQLITE_OK ){ rc = sqlite3Fts5ExprPopulatePoslists( pConfig, pCsr->pExpr, aPopulator, i, z, n ); } + sqlite3Fts5ClearLocale(pConfig); } sqlite3_free(aPopulator); @@ -1891,13 +2331,18 @@ static int fts5CsrPoslist( CsrFlagClear(pCsr, FTS5CSR_REQUIRE_POSLIST); } - if( pCsr->pSorter && pConfig->eDetail==FTS5_DETAIL_FULL ){ - Fts5Sorter *pSorter = pCsr->pSorter; - int i1 = (iPhrase==0 ? 0 : pSorter->aIdx[iPhrase-1]); - *pn = pSorter->aIdx[iPhrase] - i1; - *pa = &pSorter->aPoslist[i1]; + if( rc==SQLITE_OK ){ + if( pCsr->pSorter && pConfig->eDetail==FTS5_DETAIL_FULL ){ + Fts5Sorter *pSorter = pCsr->pSorter; + int i1 = (iPhrase==0 ? 0 : pSorter->aIdx[iPhrase-1]); + *pn = pSorter->aIdx[iPhrase] - i1; + *pa = &pSorter->aPoslist[i1]; + }else{ + *pn = sqlite3Fts5ExprPoslist(pCsr->pExpr, iPhrase, pa); + } }else{ - *pn = sqlite3Fts5ExprPoslist(pCsr->pExpr, iPhrase, pa); + *pa = 0; + *pn = 0; } return rc; @@ -1968,7 +2413,8 @@ static int fts5CacheInstArray(Fts5Cursor *pCsr){ aInst[0] = iBest; aInst[1] = FTS5_POS2COLUMN(aIter[iBest].iPos); aInst[2] = FTS5_POS2OFFSET(aIter[iBest].iPos); - if( aInst[1]<0 || aInst[1]>=nCol ){ + assert( aInst[1]>=0 ); + if( aInst[1]>=nCol ){ rc = FTS5_CORRUPT; break; } @@ -2006,12 +2452,6 @@ static int fts5ApiInst( ){ if( iIdx<0 || iIdx>=pCsr->nInstCount ){ rc = SQLITE_RANGE; -#if 0 - }else if( fts5IsOffsetless((Fts5Table*)pCsr->base.pVtab) ){ - *piPhrase = pCsr->aInst[iIdx*3]; - *piCol = pCsr->aInst[iIdx*3 + 2]; - *piOff = -1; -#endif }else{ *piPhrase = pCsr->aInst[iIdx*3]; *piCol = pCsr->aInst[iIdx*3 + 1]; @@ -2052,7 +2492,7 @@ static int fts5ApiColumnSize(Fts5Context *pCtx, int iCol, int *pnToken){ if( pConfig->bColumnsize ){ i64 iRowid = fts5CursorRowid(pCsr); rc = sqlite3Fts5StorageDocsize(pTab->pStorage, iRowid, pCsr->aColumnSize); - }else if( pConfig->zContent==0 ){ + }else if( !pConfig->zContent || pConfig->eContent==FTS5_CONTENT_UNINDEXED ){ int i; for(i=0; inCol; i++){ if( pConfig->abUnindexed[i]==0 ){ @@ -2061,17 +2501,19 @@ static int fts5ApiColumnSize(Fts5Context *pCtx, int iCol, int *pnToken){ } }else{ int i; + rc = fts5SeekCursor(pCsr, 0); for(i=0; rc==SQLITE_OK && inCol; i++){ if( pConfig->abUnindexed[i]==0 ){ - const char *z; int n; - void *p = (void*)(&pCsr->aColumnSize[i]); + const char *z = 0; + int n = 0; pCsr->aColumnSize[i] = 0; - rc = fts5ApiColumnText(pCtx, i, &z, &n); + rc = fts5TextFromStmt(pConfig, pCsr->pStmt, i, &z, &n); if( rc==SQLITE_OK ){ - rc = sqlite3Fts5Tokenize( - pConfig, FTS5_TOKENIZE_AUX, z, n, p, fts5ColumnSizeCb + rc = sqlite3Fts5Tokenize(pConfig, FTS5_TOKENIZE_AUX, + z, n, (void*)&pCsr->aColumnSize[i], fts5ColumnSizeCb ); } + sqlite3Fts5ClearLocale(pConfig); } } } @@ -2151,11 +2593,10 @@ static void *fts5ApiGetAuxdata(Fts5Context *pCtx, int bClear){ } static void fts5ApiPhraseNext( - Fts5Context *pUnused, + Fts5Context *pCtx, Fts5PhraseIter *pIter, int *piCol, int *piOff ){ - UNUSED_PARAM(pUnused); if( pIter->a>=pIter->b ){ *piCol = -1; *piOff = -1; @@ -2163,8 +2604,12 @@ static void fts5ApiPhraseNext( int iVal; pIter->a += fts5GetVarint32(pIter->a, iVal); if( iVal==1 ){ + /* Avoid returning a (*piCol) value that is too large for the table, + ** even if the position-list is corrupt. The caller might not be + ** expecting it. */ + int nCol = ((Fts5Table*)(((Fts5Cursor*)pCtx)->base.pVtab))->pConfig->nCol; pIter->a += fts5GetVarint32(pIter->a, iVal); - *piCol = iVal; + *piCol = (iVal>=nCol ? nCol-1 : iVal); *piOff = 0; pIter->a += fts5GetVarint32(pIter->a, iVal); } @@ -2266,13 +2711,96 @@ static int fts5ApiPhraseFirstColumn( return rc; } +/* +** xQueryToken() API implemenetation. +*/ +static int fts5ApiQueryToken( + Fts5Context* pCtx, + int iPhrase, + int iToken, + const char **ppOut, + int *pnOut +){ + Fts5Cursor *pCsr = (Fts5Cursor*)pCtx; + return sqlite3Fts5ExprQueryToken(pCsr->pExpr, iPhrase, iToken, ppOut, pnOut); +} + +/* +** xInstToken() API implemenetation. +*/ +static int fts5ApiInstToken( + Fts5Context *pCtx, + int iIdx, + int iToken, + const char **ppOut, int *pnOut +){ + Fts5Cursor *pCsr = (Fts5Cursor*)pCtx; + int rc = SQLITE_OK; + if( CsrFlagTest(pCsr, FTS5CSR_REQUIRE_INST)==0 + || SQLITE_OK==(rc = fts5CacheInstArray(pCsr)) + ){ + if( iIdx<0 || iIdx>=pCsr->nInstCount ){ + rc = SQLITE_RANGE; + }else{ + int iPhrase = pCsr->aInst[iIdx*3]; + int iCol = pCsr->aInst[iIdx*3 + 1]; + int iOff = pCsr->aInst[iIdx*3 + 2]; + i64 iRowid = fts5CursorRowid(pCsr); + rc = sqlite3Fts5ExprInstToken( + pCsr->pExpr, iRowid, iPhrase, iCol, iOff, iToken, ppOut, pnOut + ); + } + } + return rc; +} + static int fts5ApiQueryPhrase(Fts5Context*, int, void*, int(*)(const Fts5ExtensionApi*, Fts5Context*, void*) ); +/* +** The xColumnLocale() API. +*/ +static int fts5ApiColumnLocale( + Fts5Context *pCtx, + int iCol, + const char **pzLocale, + int *pnLocale +){ + int rc = SQLITE_OK; + Fts5Cursor *pCsr = (Fts5Cursor*)pCtx; + Fts5Config *pConfig = ((Fts5Table*)(pCsr->base.pVtab))->pConfig; + + *pzLocale = 0; + *pnLocale = 0; + + assert( pCsr->ePlan!=FTS5_PLAN_SPECIAL ); + if( iCol<0 || iCol>=pConfig->nCol ){ + rc = SQLITE_RANGE; + }else if( + pConfig->abUnindexed[iCol]==0 + && 0==fts5IsContentless((Fts5FullTable*)pCsr->base.pVtab, 1) + && pConfig->bLocale + ){ + rc = fts5SeekCursor(pCsr, 0); + if( rc==SQLITE_OK ){ + const char *zDummy = 0; + int nDummy = 0; + rc = fts5TextFromStmt(pConfig, pCsr->pStmt, iCol, &zDummy, &nDummy); + if( rc==SQLITE_OK ){ + *pzLocale = pConfig->t.pLocale; + *pnLocale = pConfig->t.nLocale; + } + sqlite3Fts5ClearLocale(pConfig); + } + } + + return rc; +} + static const Fts5ExtensionApi sFts5Api = { - 2, /* iVersion */ + 4, /* iVersion */ fts5ApiUserData, fts5ApiColumnCount, fts5ApiRowCount, @@ -2292,6 +2820,10 @@ static const Fts5ExtensionApi sFts5Api = { fts5ApiPhraseNext, fts5ApiPhraseFirstColumn, fts5ApiPhraseNextColumn, + fts5ApiQueryToken, + fts5ApiInstToken, + fts5ApiColumnLocale, + fts5ApiTokenize_v2 }; /* @@ -2342,6 +2874,7 @@ static void fts5ApiInvoke( sqlite3_value **argv ){ assert( pCsr->pAux==0 ); + assert( pCsr->ePlan!=FTS5_PLAN_SPECIAL ); pCsr->pAux = pAux; pAux->xFunc(&sFts5Api, (Fts5Context*)pCsr, context, argc, argv); pCsr->pAux = 0; @@ -2355,6 +2888,21 @@ static Fts5Cursor *fts5CursorFromCsrid(Fts5Global *pGlobal, i64 iCsrId){ return pCsr; } +/* +** Parameter zFmt is a printf() style formatting string. This function +** formats it using the trailing arguments and returns the result as +** an error message to the context passed as the first argument. +*/ +static void fts5ResultError(sqlite3_context *pCtx, const char *zFmt, ...){ + char *zErr = 0; + va_list ap; + va_start(ap, zFmt); + zErr = sqlite3_vmprintf(zFmt, ap); + sqlite3_result_error(pCtx, zErr, -1); + sqlite3_free(zErr); + va_end(ap); +} + static void fts5ApiCallback( sqlite3_context *context, int argc, @@ -2370,12 +2918,13 @@ static void fts5ApiCallback( iCsrId = sqlite3_value_int64(argv[0]); pCsr = fts5CursorFromCsrid(pAux->pGlobal, iCsrId); - if( pCsr==0 || pCsr->ePlan==0 ){ - char *zErr = sqlite3_mprintf("no such cursor: %lld", iCsrId); - sqlite3_result_error(context, zErr, -1); - sqlite3_free(zErr); + if( pCsr==0 || (pCsr->ePlan==0 || pCsr->ePlan==FTS5_PLAN_SPECIAL) ){ + fts5ResultError(context, "no such cursor: %lld", iCsrId); }else{ + sqlite3_vtab *pTab = pCsr->base.pVtab; fts5ApiInvoke(pAux, pCsr, context, argc-1, &argv[1]); + sqlite3_free(pTab->zErrMsg); + pTab->zErrMsg = 0; } } @@ -2493,8 +3042,8 @@ static int fts5ColumnMethod( ** auxiliary function. */ sqlite3_result_int64(pCtx, pCsr->iCsrId); }else if( iCol==pConfig->nCol+1 ){ - /* The value of the "rank" column. */ + if( pCsr->ePlan==FTS5_PLAN_SOURCE ){ fts5PoslistBlob(pCtx, pCsr); }else if( @@ -2505,14 +3054,32 @@ static int fts5ColumnMethod( fts5ApiInvoke(pCsr->pRank, pCsr, pCtx, pCsr->nRankArg, pCsr->apRankArg); } } - }else if( !fts5IsContentless(pTab) ){ - pConfig->pzErrmsg = &pTab->p.base.zErrMsg; - rc = fts5SeekCursor(pCsr, 1); - if( rc==SQLITE_OK ){ - sqlite3_result_value(pCtx, sqlite3_column_value(pCsr->pStmt, iCol+1)); + }else{ + if( !sqlite3_vtab_nochange(pCtx) && pConfig->eContent!=FTS5_CONTENT_NONE ){ + pConfig->pzErrmsg = &pTab->p.base.zErrMsg; + rc = fts5SeekCursor(pCsr, 1); + if( rc==SQLITE_OK ){ + sqlite3_value *pVal = sqlite3_column_value(pCsr->pStmt, iCol+1); + if( pConfig->bLocale + && pConfig->eContent==FTS5_CONTENT_EXTERNAL + && sqlite3Fts5IsLocaleValue(pConfig, pVal) + ){ + const char *z = 0; + int n = 0; + rc = fts5TextFromStmt(pConfig, pCsr->pStmt, iCol, &z, &n); + if( rc==SQLITE_OK ){ + sqlite3_result_text(pCtx, z, n, SQLITE_TRANSIENT); + } + sqlite3Fts5ClearLocale(pConfig); + }else{ + sqlite3_result_value(pCtx, pVal); + } + } + + pConfig->pzErrmsg = 0; } - pConfig->pzErrmsg = 0; } + return rc; } @@ -2550,8 +3117,10 @@ static int fts5RenameMethod( sqlite3_vtab *pVtab, /* Virtual table handle */ const char *zName /* New name of table */ ){ + int rc; Fts5FullTable *pTab = (Fts5FullTable*)pVtab; - return sqlite3Fts5StorageRename(pTab->pStorage, zName); + rc = sqlite3Fts5StorageRename(pTab->pStorage, zName); + return rc; } int sqlite3Fts5FlushToDisk(Fts5Table *pTab){ @@ -2565,9 +3134,15 @@ int sqlite3Fts5FlushToDisk(Fts5Table *pTab){ ** Flush the contents of the pending-terms table to disk. */ static int fts5SavepointMethod(sqlite3_vtab *pVtab, int iSavepoint){ - UNUSED_PARAM(iSavepoint); /* Call below is a no-op for NDEBUG builds */ - fts5CheckTransactionState((Fts5FullTable*)pVtab, FTS5_SAVEPOINT, iSavepoint); - return sqlite3Fts5FlushToDisk((Fts5Table*)pVtab); + Fts5FullTable *pTab = (Fts5FullTable*)pVtab; + int rc = SQLITE_OK; + + fts5CheckTransactionState(pTab, FTS5_SAVEPOINT, iSavepoint); + rc = sqlite3Fts5FlushToDisk((Fts5Table*)pVtab); + if( rc==SQLITE_OK ){ + pTab->iSavepoint = iSavepoint+1; + } + return rc; } /* @@ -2576,9 +3151,16 @@ static int fts5SavepointMethod(sqlite3_vtab *pVtab, int iSavepoint){ ** This is a no-op. */ static int fts5ReleaseMethod(sqlite3_vtab *pVtab, int iSavepoint){ - UNUSED_PARAM(iSavepoint); /* Call below is a no-op for NDEBUG builds */ - fts5CheckTransactionState((Fts5FullTable*)pVtab, FTS5_RELEASE, iSavepoint); - return sqlite3Fts5FlushToDisk((Fts5Table*)pVtab); + Fts5FullTable *pTab = (Fts5FullTable*)pVtab; + int rc = SQLITE_OK; + fts5CheckTransactionState(pTab, FTS5_RELEASE, iSavepoint); + if( (iSavepoint+1)iSavepoint ){ + rc = sqlite3Fts5FlushToDisk(&pTab->p); + if( rc==SQLITE_OK ){ + pTab->iSavepoint = iSavepoint; + } + } + return rc; } /* @@ -2588,10 +3170,14 @@ static int fts5ReleaseMethod(sqlite3_vtab *pVtab, int iSavepoint){ */ static int fts5RollbackToMethod(sqlite3_vtab *pVtab, int iSavepoint){ Fts5FullTable *pTab = (Fts5FullTable*)pVtab; - UNUSED_PARAM(iSavepoint); /* Call below is a no-op for NDEBUG builds */ + int rc = SQLITE_OK; fts5CheckTransactionState(pTab, FTS5_ROLLBACKTO, iSavepoint); fts5TripCursors(pTab); - return sqlite3Fts5StorageRollback(pTab->pStorage); + if( (iSavepoint+1)<=pTab->iSavepoint ){ + pTab->p.pConfig->pgsz = 0; + rc = sqlite3Fts5StorageRollback(pTab->pStorage); + } + return rc; } /* @@ -2633,47 +3219,210 @@ static int fts5CreateAux( } /* -** Register a new tokenizer. This is the implementation of the -** fts5_api.xCreateTokenizer() method. +** This function is used by xCreateTokenizer_v2() and xCreateTokenizer(). +** It allocates and partially populates a new Fts5TokenizerModule object. +** The new object is already linked into the Fts5Global context before +** returning. +** +** If successful, SQLITE_OK is returned and a pointer to the new +** Fts5TokenizerModule object returned via output parameter (*ppNew). All +** that is required is for the caller to fill in the methods in +** Fts5TokenizerModule.x1 and x2, and to set Fts5TokenizerModule.bV2Native +** as appropriate. +** +** If an error occurs, an SQLite error code is returned and the final value +** of (*ppNew) undefined. */ -static int fts5CreateTokenizer( - fts5_api *pApi, /* Global context (one per db handle) */ +static int fts5NewTokenizerModule( + Fts5Global *pGlobal, /* Global context (one per db handle) */ const char *zName, /* Name of new function */ void *pUserData, /* User data for aux. function */ - fts5_tokenizer *pTokenizer, /* Tokenizer implementation */ - void(*xDestroy)(void*) /* Destructor for pUserData */ + void(*xDestroy)(void*), /* Destructor for pUserData */ + Fts5TokenizerModule **ppNew ){ - Fts5Global *pGlobal = (Fts5Global*)pApi; - Fts5TokenizerModule *pNew; - sqlite3_int64 nName; /* Size of zName and its \0 terminator */ - sqlite3_int64 nByte; /* Bytes of space to allocate */ int rc = SQLITE_OK; + Fts5TokenizerModule *pNew; + sqlite3_int64 nName; /* Size of zName and its \0 terminator */ + sqlite3_int64 nByte; /* Bytes of space to allocate */ nName = strlen(zName) + 1; nByte = sizeof(Fts5TokenizerModule) + nName; - pNew = (Fts5TokenizerModule*)sqlite3_malloc64(nByte); + *ppNew = pNew = (Fts5TokenizerModule*)sqlite3Fts5MallocZero(&rc, nByte); if( pNew ){ - memset(pNew, 0, (size_t)nByte); pNew->zName = (char*)&pNew[1]; memcpy(pNew->zName, zName, nName); pNew->pUserData = pUserData; - pNew->x = *pTokenizer; pNew->xDestroy = xDestroy; pNew->pNext = pGlobal->pTok; pGlobal->pTok = pNew; if( pNew->pNext==0 ){ pGlobal->pDfltTok = pNew; } + } + + return rc; +} + +/* +** An instance of this type is used as the Fts5Tokenizer object for +** wrapper tokenizers - those that provide access to a v1 tokenizer via +** the fts5_tokenizer_v2 API, and those that provide access to a v2 tokenizer +** via the fts5_tokenizer API. +*/ +typedef struct Fts5VtoVTokenizer Fts5VtoVTokenizer; +struct Fts5VtoVTokenizer { + int bV2Native; /* True if v2 native tokenizer */ + fts5_tokenizer x1; /* Tokenizer functions */ + fts5_tokenizer_v2 x2; /* V2 tokenizer functions */ + Fts5Tokenizer *pReal; +}; + +/* +** Create a wrapper tokenizer. The context argument pCtx points to the +** Fts5TokenizerModule object. +*/ +static int fts5VtoVCreate( + void *pCtx, + const char **azArg, + int nArg, + Fts5Tokenizer **ppOut +){ + Fts5TokenizerModule *pMod = (Fts5TokenizerModule*)pCtx; + Fts5VtoVTokenizer *pNew = 0; + int rc = SQLITE_OK; + + pNew = (Fts5VtoVTokenizer*)sqlite3Fts5MallocZero(&rc, sizeof(*pNew)); + if( rc==SQLITE_OK ){ + pNew->x1 = pMod->x1; + pNew->x2 = pMod->x2; + pNew->bV2Native = pMod->bV2Native; + if( pMod->bV2Native ){ + rc = pMod->x2.xCreate(pMod->pUserData, azArg, nArg, &pNew->pReal); + }else{ + rc = pMod->x1.xCreate(pMod->pUserData, azArg, nArg, &pNew->pReal); + } + if( rc!=SQLITE_OK ){ + sqlite3_free(pNew); + pNew = 0; + } + } + + *ppOut = (Fts5Tokenizer*)pNew; + return rc; +} + +/* +** Delete an Fts5VtoVTokenizer wrapper tokenizer. +*/ +static void fts5VtoVDelete(Fts5Tokenizer *pTok){ + Fts5VtoVTokenizer *p = (Fts5VtoVTokenizer*)pTok; + if( p ){ + if( p->bV2Native ){ + p->x2.xDelete(p->pReal); + }else{ + p->x1.xDelete(p->pReal); + } + sqlite3_free(p); + } +} + + +/* +** xTokenizer method for a wrapper tokenizer that offers the v1 interface +** (no support for locales). +*/ +static int fts5V1toV2Tokenize( + Fts5Tokenizer *pTok, + void *pCtx, int flags, + const char *pText, int nText, + int (*xToken)(void*, int, const char*, int, int, int) +){ + Fts5VtoVTokenizer *p = (Fts5VtoVTokenizer*)pTok; + assert( p->bV2Native ); + return p->x2.xTokenize(p->pReal, pCtx, flags, pText, nText, 0, 0, xToken); +} + +/* +** xTokenizer method for a wrapper tokenizer that offers the v2 interface +** (with locale support). +*/ +static int fts5V2toV1Tokenize( + Fts5Tokenizer *pTok, + void *pCtx, int flags, + const char *pText, int nText, + const char *pLocale, int nLocale, + int (*xToken)(void*, int, const char*, int, int, int) +){ + Fts5VtoVTokenizer *p = (Fts5VtoVTokenizer*)pTok; + assert( p->bV2Native==0 ); + UNUSED_PARAM2(pLocale,nLocale); + return p->x1.xTokenize(p->pReal, pCtx, flags, pText, nText, xToken); +} + +/* +** Register a new tokenizer. This is the implementation of the +** fts5_api.xCreateTokenizer_v2() method. +*/ +static int fts5CreateTokenizer_v2( + fts5_api *pApi, /* Global context (one per db handle) */ + const char *zName, /* Name of new function */ + void *pUserData, /* User data for aux. function */ + fts5_tokenizer_v2 *pTokenizer, /* Tokenizer implementation */ + void(*xDestroy)(void*) /* Destructor for pUserData */ +){ + Fts5Global *pGlobal = (Fts5Global*)pApi; + int rc = SQLITE_OK; + + if( pTokenizer->iVersion>2 ){ + rc = SQLITE_ERROR; }else{ - rc = SQLITE_NOMEM; + Fts5TokenizerModule *pNew = 0; + rc = fts5NewTokenizerModule(pGlobal, zName, pUserData, xDestroy, &pNew); + if( pNew ){ + pNew->x2 = *pTokenizer; + pNew->bV2Native = 1; + pNew->x1.xCreate = fts5VtoVCreate; + pNew->x1.xTokenize = fts5V1toV2Tokenize; + pNew->x1.xDelete = fts5VtoVDelete; + } } return rc; } +/* +** The fts5_api.xCreateTokenizer() method. +*/ +static int fts5CreateTokenizer( + fts5_api *pApi, /* Global context (one per db handle) */ + const char *zName, /* Name of new function */ + void *pUserData, /* User data for aux. function */ + fts5_tokenizer *pTokenizer, /* Tokenizer implementation */ + void(*xDestroy)(void*) /* Destructor for pUserData */ +){ + Fts5TokenizerModule *pNew = 0; + int rc = SQLITE_OK; + + rc = fts5NewTokenizerModule( + (Fts5Global*)pApi, zName, pUserData, xDestroy, &pNew + ); + if( pNew ){ + pNew->x1 = *pTokenizer; + pNew->x2.xCreate = fts5VtoVCreate; + pNew->x2.xTokenize = fts5V2toV1Tokenize; + pNew->x2.xDelete = fts5VtoVDelete; + } + return rc; +} + +/* +** Search the global context passed as the first argument for a tokenizer +** module named zName. If found, return a pointer to the Fts5TokenizerModule +** object. Otherwise, return NULL. +*/ static Fts5TokenizerModule *fts5LocateTokenizer( - Fts5Global *pGlobal, - const char *zName + Fts5Global *pGlobal, /* Global (one per db handle) object */ + const char *zName /* Name of tokenizer module to find */ ){ Fts5TokenizerModule *pMod = 0; @@ -2688,6 +3437,36 @@ static Fts5TokenizerModule *fts5LocateTokenizer( return pMod; } +/* +** Find a tokenizer. This is the implementation of the +** fts5_api.xFindTokenizer_v2() method. +*/ +static int fts5FindTokenizer_v2( + fts5_api *pApi, /* Global context (one per db handle) */ + const char *zName, /* Name of tokenizer */ + void **ppUserData, + fts5_tokenizer_v2 **ppTokenizer /* Populate this object */ +){ + int rc = SQLITE_OK; + Fts5TokenizerModule *pMod; + + pMod = fts5LocateTokenizer((Fts5Global*)pApi, zName); + if( pMod ){ + if( pMod->bV2Native ){ + *ppUserData = pMod->pUserData; + }else{ + *ppUserData = (void*)pMod; + } + *ppTokenizer = &pMod->x2; + }else{ + *ppTokenizer = 0; + *ppUserData = 0; + rc = SQLITE_ERROR; + } + + return rc; +} + /* ** Find a tokenizer. This is the implementation of the ** fts5_api.xFindTokenizer() method. @@ -2703,53 +3482,75 @@ static int fts5FindTokenizer( pMod = fts5LocateTokenizer((Fts5Global*)pApi, zName); if( pMod ){ - *pTokenizer = pMod->x; - *ppUserData = pMod->pUserData; + if( pMod->bV2Native==0 ){ + *ppUserData = pMod->pUserData; + }else{ + *ppUserData = (void*)pMod; + } + *pTokenizer = pMod->x1; }else{ - memset(pTokenizer, 0, sizeof(fts5_tokenizer)); + memset(pTokenizer, 0, sizeof(*pTokenizer)); + *ppUserData = 0; rc = SQLITE_ERROR; } return rc; } -int sqlite3Fts5GetTokenizer( - Fts5Global *pGlobal, - const char **azArg, - int nArg, - Fts5Config *pConfig, - char **pzErr -){ - Fts5TokenizerModule *pMod; +/* +** Attempt to instantiate the tokenizer. +*/ +int sqlite3Fts5LoadTokenizer(Fts5Config *pConfig){ + const char **azArg = pConfig->t.azArg; + const int nArg = pConfig->t.nArg; + Fts5TokenizerModule *pMod = 0; int rc = SQLITE_OK; - pMod = fts5LocateTokenizer(pGlobal, nArg==0 ? 0 : azArg[0]); + pMod = fts5LocateTokenizer(pConfig->pGlobal, nArg==0 ? 0 : azArg[0]); if( pMod==0 ){ assert( nArg>0 ); rc = SQLITE_ERROR; - *pzErr = sqlite3_mprintf("no such tokenizer: %s", azArg[0]); + sqlite3Fts5ConfigErrmsg(pConfig, "no such tokenizer: %s", azArg[0]); }else{ - rc = pMod->x.xCreate( - pMod->pUserData, (azArg?&azArg[1]:0), (nArg?nArg-1:0), &pConfig->pTok + int (*xCreate)(void*, const char**, int, Fts5Tokenizer**) = 0; + if( pMod->bV2Native ){ + xCreate = pMod->x2.xCreate; + pConfig->t.pApi2 = &pMod->x2; + }else{ + pConfig->t.pApi1 = &pMod->x1; + xCreate = pMod->x1.xCreate; + } + + rc = xCreate(pMod->pUserData, + (azArg?&azArg[1]:0), (nArg?nArg-1:0), &pConfig->t.pTok ); - pConfig->pTokApi = &pMod->x; + if( rc!=SQLITE_OK ){ - if( pzErr ) *pzErr = sqlite3_mprintf("error in tokenizer constructor"); - }else{ - pConfig->ePattern = sqlite3Fts5TokenizerPattern( - pMod->x.xCreate, pConfig->pTok + if( rc!=SQLITE_NOMEM ){ + sqlite3Fts5ConfigErrmsg(pConfig, "error in tokenizer constructor"); + } + }else if( pMod->bV2Native==0 ){ + pConfig->t.ePattern = sqlite3Fts5TokenizerPattern( + pMod->x1.xCreate, pConfig->t.pTok ); } } if( rc!=SQLITE_OK ){ - pConfig->pTokApi = 0; - pConfig->pTok = 0; + pConfig->t.pApi1 = 0; + pConfig->t.pApi2 = 0; + pConfig->t.pTok = 0; } return rc; } + +/* +** xDestroy callback passed to sqlite3_create_module(). This is invoked +** when the db handle is being closed. Free memory associated with +** tokenizers and aux functions registered with this db handle. +*/ static void fts5ModuleDestroy(void *pCtx){ Fts5TokenizerModule *pTok, *pNextTok; Fts5Auxiliary *pAux, *pNextAux; @@ -2770,6 +3571,10 @@ static void fts5ModuleDestroy(void *pCtx){ sqlite3_free(pGlobal); } +/* +** Implementation of the fts5() function used by clients to obtain the +** API pointer. +*/ static void fts5Fts5Func( sqlite3_context *pCtx, /* Function call context */ int nArg, /* Number of args */ @@ -2796,6 +3601,81 @@ static void fts5SourceIdFunc( sqlite3_result_text(pCtx, "--FTS5-SOURCE-ID--", -1, SQLITE_TRANSIENT); } +/* +** Implementation of fts5_locale(LOCALE, TEXT) function. +** +** If parameter LOCALE is NULL, or a zero-length string, then a copy of +** TEXT is returned. Otherwise, both LOCALE and TEXT are interpreted as +** text, and the value returned is a blob consisting of: +** +** * The 4 bytes 0x00, 0xE0, 0xB2, 0xEb (FTS5_LOCALE_HEADER). +** * The LOCALE, as utf-8 text, followed by +** * 0x00, followed by +** * The TEXT, as utf-8 text. +** +** There is no final nul-terminator following the TEXT value. +*/ +static void fts5LocaleFunc( + sqlite3_context *pCtx, /* Function call context */ + int nArg, /* Number of args */ + sqlite3_value **apArg /* Function arguments */ +){ + const char *zLocale = 0; + i64 nLocale = 0; + const char *zText = 0; + i64 nText = 0; + + assert( nArg==2 ); + UNUSED_PARAM(nArg); + + zLocale = (const char*)sqlite3_value_text(apArg[0]); + nLocale = sqlite3_value_bytes(apArg[0]); + + zText = (const char*)sqlite3_value_text(apArg[1]); + nText = sqlite3_value_bytes(apArg[1]); + + if( zLocale==0 || zLocale[0]=='\0' ){ + sqlite3_result_text(pCtx, zText, nText, SQLITE_TRANSIENT); + }else{ + Fts5Global *p = (Fts5Global*)sqlite3_user_data(pCtx); + u8 *pBlob = 0; + u8 *pCsr = 0; + i64 nBlob = 0; + + nBlob = FTS5_LOCALE_HDR_SIZE + nLocale + 1 + nText; + pBlob = (u8*)sqlite3_malloc64(nBlob); + if( pBlob==0 ){ + sqlite3_result_error_nomem(pCtx); + return; + } + + pCsr = pBlob; + memcpy(pCsr, (const u8*)p->aLocaleHdr, FTS5_LOCALE_HDR_SIZE); + pCsr += FTS5_LOCALE_HDR_SIZE; + memcpy(pCsr, zLocale, nLocale); + pCsr += nLocale; + (*pCsr++) = 0x00; + if( zText ) memcpy(pCsr, zText, nText); + assert( &pCsr[nText]==&pBlob[nBlob] ); + + sqlite3_result_blob(pCtx, pBlob, nBlob, sqlite3_free); + } +} + +/* +** Implementation of fts5_insttoken() function. +*/ +static void fts5InsttokenFunc( + sqlite3_context *pCtx, /* Function call context */ + int nArg, /* Number of args */ + sqlite3_value **apArg /* Function arguments */ +){ + assert( nArg==1 ); + (void)nArg; + sqlite3_result_value(pCtx, apArg[0]); + sqlite3_result_subtype(pCtx, FTS5_INSTTOKEN_SUBTYPE); +} + /* ** Return true if zName is the extension on one of the shadow tables used ** by this module. @@ -2811,9 +3691,48 @@ static int fts5ShadowName(const char *zName){ return 0; } +/* +** Run an integrity check on the FTS5 data structures. Return a string +** if anything is found amiss. Return a NULL pointer if everything is +** OK. +*/ +static int fts5IntegrityMethod( + sqlite3_vtab *pVtab, /* the FTS5 virtual table to check */ + const char *zSchema, /* Name of schema in which this table lives */ + const char *zTabname, /* Name of the table itself */ + int isQuick, /* True if this is a quick-check */ + char **pzErr /* Write error message here */ +){ + Fts5FullTable *pTab = (Fts5FullTable*)pVtab; + int rc; + + assert( pzErr!=0 && *pzErr==0 ); + UNUSED_PARAM(isQuick); + assert( pTab->p.pConfig->pzErrmsg==0 ); + pTab->p.pConfig->pzErrmsg = pzErr; + rc = sqlite3Fts5StorageIntegrity(pTab->pStorage, 0); + if( *pzErr==0 && rc!=SQLITE_OK ){ + if( (rc&0xff)==SQLITE_CORRUPT ){ + *pzErr = sqlite3_mprintf("malformed inverted index for FTS5 table %s.%s", + zSchema, zTabname); + rc = (*pzErr) ? SQLITE_OK : SQLITE_NOMEM; + }else{ + *pzErr = sqlite3_mprintf("unable to validate the inverted index for" + " FTS5 table %s.%s: %s", + zSchema, zTabname, sqlite3_errstr(rc)); + } + }else if( (rc&0xff)==SQLITE_CORRUPT ){ + rc = SQLITE_OK; + } + sqlite3Fts5IndexCloseReader(pTab->p.pIndex); + pTab->p.pConfig->pzErrmsg = 0; + + return rc; +} + static int fts5Init(sqlite3 *db){ static const sqlite3_module fts5Mod = { - /* iVersion */ 3, + /* iVersion */ 4, /* xCreate */ fts5CreateMethod, /* xConnect */ fts5ConnectMethod, /* xBestIndex */ fts5BestIndexMethod, @@ -2836,7 +3755,8 @@ static int fts5Init(sqlite3 *db){ /* xSavepoint */ fts5SavepointMethod, /* xRelease */ fts5ReleaseMethod, /* xRollbackTo */ fts5RollbackToMethod, - /* xShadowName */ fts5ShadowName + /* xShadowName */ fts5ShadowName, + /* xIntegrity */ fts5IntegrityMethod }; int rc; @@ -2849,10 +3769,22 @@ static int fts5Init(sqlite3 *db){ void *p = (void*)pGlobal; memset(pGlobal, 0, sizeof(Fts5Global)); pGlobal->db = db; - pGlobal->api.iVersion = 2; + pGlobal->api.iVersion = 3; pGlobal->api.xCreateFunction = fts5CreateAux; pGlobal->api.xCreateTokenizer = fts5CreateTokenizer; pGlobal->api.xFindTokenizer = fts5FindTokenizer; + pGlobal->api.xCreateTokenizer_v2 = fts5CreateTokenizer_v2; + pGlobal->api.xFindTokenizer_v2 = fts5FindTokenizer_v2; + + /* Initialize pGlobal->aLocaleHdr[] to a 128-bit pseudo-random vector. + ** The constants below were generated randomly. */ + sqlite3_randomness(sizeof(pGlobal->aLocaleHdr), pGlobal->aLocaleHdr); + pGlobal->aLocaleHdr[0] ^= 0xF924976D; + pGlobal->aLocaleHdr[1] ^= 0x16596E13; + pGlobal->aLocaleHdr[2] ^= 0x7C80BEAA; + pGlobal->aLocaleHdr[3] ^= 0x9B03A67F; + assert( sizeof(pGlobal->aLocaleHdr)==16 ); + rc = sqlite3_create_module_v2(db, "fts5", &fts5Mod, p, fts5ModuleDestroy); if( rc==SQLITE_OK ) rc = sqlite3Fts5IndexInit(db); if( rc==SQLITE_OK ) rc = sqlite3Fts5ExprInit(pGlobal, db); @@ -2866,7 +3798,23 @@ static int fts5Init(sqlite3 *db){ } if( rc==SQLITE_OK ){ rc = sqlite3_create_function( - db, "fts5_source_id", 0, SQLITE_UTF8, p, fts5SourceIdFunc, 0, 0 + db, "fts5_source_id", 0, + SQLITE_UTF8|SQLITE_DETERMINISTIC|SQLITE_INNOCUOUS, + p, fts5SourceIdFunc, 0, 0 + ); + } + if( rc==SQLITE_OK ){ + rc = sqlite3_create_function( + db, "fts5_locale", 2, + SQLITE_UTF8|SQLITE_INNOCUOUS|SQLITE_RESULT_SUBTYPE|SQLITE_SUBTYPE, + p, fts5LocaleFunc, 0, 0 + ); + } + if( rc==SQLITE_OK ){ + rc = sqlite3_create_function( + db, "fts5_insttoken", 1, + SQLITE_UTF8|SQLITE_INNOCUOUS|SQLITE_RESULT_SUBTYPE, + p, fts5InsttokenFunc, 0, 0 ); } } @@ -2876,8 +3824,8 @@ static int fts5Init(sqlite3 *db){ ** its entry point to enable the matchinfo() demo. */ #ifdef SQLITE_FTS5_ENABLE_TEST_MI if( rc==SQLITE_OK ){ - extern int sqlite3Fts5TestRegisterMatchinfo(sqlite3*); - rc = sqlite3Fts5TestRegisterMatchinfo(db); + extern int sqlite3Fts5TestRegisterMatchinfoAPI(fts5_api*); + rc = sqlite3Fts5TestRegisterMatchinfoAPI(&pGlobal->api); } #endif diff --git a/ext/fts5/fts5_storage.c b/ext/fts5/fts5_storage.c index 02b98d9e44..76820e85b3 100644 --- a/ext/fts5/fts5_storage.c +++ b/ext/fts5/fts5_storage.c @@ -16,13 +16,40 @@ #include "fts5Int.h" +/* +** pSavedRow: +** SQL statement FTS5_STMT_LOOKUP2 is a copy of FTS5_STMT_LOOKUP, it +** does a by-rowid lookup to retrieve a single row from the %_content +** table or equivalent external-content table/view. +** +** However, FTS5_STMT_LOOKUP2 is only used when retrieving the original +** values for a row being UPDATEd. In that case, the SQL statement is +** not reset and pSavedRow is set to point at it. This is so that the +** insert operation that follows the delete may access the original +** row values for any new values for which sqlite3_value_nochange() returns +** true. i.e. if the user executes: +** +** CREATE VIRTUAL TABLE ft USING fts5(a, b, c, locale=1); +** ... +** UPDATE fts SET a=?, b=? WHERE rowid=?; +** +** then the value passed to the xUpdate() method of this table as the +** new.c value is an sqlite3_value_nochange() value. So in this case it +** must be read from the saved row stored in Fts5Storage.pSavedRow. +** +** This is necessary - using sqlite3_value_nochange() instead of just having +** SQLite pass the original value back via xUpdate() - so as not to discard +** any locale information associated with such values. +** +*/ struct Fts5Storage { Fts5Config *pConfig; Fts5Index *pIndex; int bTotalsValid; /* True if nTotalRow/aTotalSize[] are valid */ i64 nTotalRow; /* Total number of rows in FTS table */ i64 *aTotalSize; /* Total sizes of each column */ - sqlite3_stmt *aStmt[11]; + sqlite3_stmt *pSavedRow; + sqlite3_stmt *aStmt[12]; }; @@ -36,14 +63,15 @@ struct Fts5Storage { # error "FTS5_STMT_LOOKUP mismatch" #endif -#define FTS5_STMT_INSERT_CONTENT 3 -#define FTS5_STMT_REPLACE_CONTENT 4 -#define FTS5_STMT_DELETE_CONTENT 5 -#define FTS5_STMT_REPLACE_DOCSIZE 6 -#define FTS5_STMT_DELETE_DOCSIZE 7 -#define FTS5_STMT_LOOKUP_DOCSIZE 8 -#define FTS5_STMT_REPLACE_CONFIG 9 -#define FTS5_STMT_SCAN 10 +#define FTS5_STMT_LOOKUP2 3 +#define FTS5_STMT_INSERT_CONTENT 4 +#define FTS5_STMT_REPLACE_CONTENT 5 +#define FTS5_STMT_DELETE_CONTENT 6 +#define FTS5_STMT_REPLACE_DOCSIZE 7 +#define FTS5_STMT_DELETE_DOCSIZE 8 +#define FTS5_STMT_LOOKUP_DOCSIZE 9 +#define FTS5_STMT_REPLACE_CONFIG 10 +#define FTS5_STMT_SCAN 11 /* ** Prepare the two insert statements - Fts5Storage.pInsertContent and @@ -73,14 +101,15 @@ static int fts5StorageGetStmt( "SELECT %s FROM %s T WHERE T.%Q >= ? AND T.%Q <= ? ORDER BY T.%Q ASC", "SELECT %s FROM %s T WHERE T.%Q <= ? AND T.%Q >= ? ORDER BY T.%Q DESC", "SELECT %s FROM %s T WHERE T.%Q=?", /* LOOKUP */ + "SELECT %s FROM %s T WHERE T.%Q=?", /* LOOKUP2 */ "INSERT INTO %Q.'%q_content' VALUES(%s)", /* INSERT_CONTENT */ "REPLACE INTO %Q.'%q_content' VALUES(%s)", /* REPLACE_CONTENT */ "DELETE FROM %Q.'%q_content' WHERE id=?", /* DELETE_CONTENT */ - "REPLACE INTO %Q.'%q_docsize' VALUES(?,?)", /* REPLACE_DOCSIZE */ + "REPLACE INTO %Q.'%q_docsize' VALUES(?,?%s)", /* REPLACE_DOCSIZE */ "DELETE FROM %Q.'%q_docsize' WHERE id=?", /* DELETE_DOCSIZE */ - "SELECT sz FROM %Q.'%q_docsize' WHERE id=?", /* LOOKUP_DOCSIZE */ + "SELECT sz%s FROM %Q.'%q_docsize' WHERE id=?", /* LOOKUP_DOCSIZE */ "REPLACE INTO %Q.'%q_config' VALUES(?,?)", /* REPLACE_CONFIG */ "SELECT %s FROM %s AS T", /* SCAN */ @@ -88,6 +117,8 @@ static int fts5StorageGetStmt( Fts5Config *pC = p->pConfig; char *zSql = 0; + assert( ArraySize(azStmt)==ArraySize(p->aStmt) ); + switch( eStmt ){ case FTS5_STMT_SCAN: zSql = sqlite3_mprintf(azStmt[eStmt], @@ -104,6 +135,7 @@ static int fts5StorageGetStmt( break; case FTS5_STMT_LOOKUP: + case FTS5_STMT_LOOKUP2: zSql = sqlite3_mprintf(azStmt[eStmt], pC->zContentExprlist, pC->zContent, pC->zContentRowid ); @@ -111,23 +143,51 @@ static int fts5StorageGetStmt( case FTS5_STMT_INSERT_CONTENT: case FTS5_STMT_REPLACE_CONTENT: { - int nCol = pC->nCol + 1; - char *zBind; + char *zBind = 0; int i; - zBind = sqlite3_malloc64(1 + nCol*2); - if( zBind ){ - for(i=0; ieContent==FTS5_CONTENT_NORMAL + || pC->eContent==FTS5_CONTENT_UNINDEXED + ); + + /* Add bindings for the "c*" columns - those that store the actual + ** table content. If eContent==NORMAL, then there is one binding + ** for each column. Or, if eContent==UNINDEXED, then there are only + ** bindings for the UNINDEXED columns. */ + for(i=0; rc==SQLITE_OK && i<(pC->nCol+1); i++){ + if( !i || pC->eContent==FTS5_CONTENT_NORMAL || pC->abUnindexed[i-1] ){ + zBind = sqlite3Fts5Mprintf(&rc, "%z%s?%d", zBind, zBind?",":"",i+1); } - zBind[i*2-1] = '\0'; - zSql = sqlite3_mprintf(azStmt[eStmt], pC->zDb, pC->zName, zBind); - sqlite3_free(zBind); } + + /* Add bindings for any "l*" columns. Only non-UNINDEXED columns + ** require these. */ + if( pC->bLocale && pC->eContent==FTS5_CONTENT_NORMAL ){ + for(i=0; rc==SQLITE_OK && inCol; i++){ + if( pC->abUnindexed[i]==0 ){ + zBind = sqlite3Fts5Mprintf(&rc, "%z,?%d", zBind, pC->nCol+i+2); + } + } + } + + zSql = sqlite3Fts5Mprintf(&rc, azStmt[eStmt], pC->zDb, pC->zName,zBind); + sqlite3_free(zBind); break; } + case FTS5_STMT_REPLACE_DOCSIZE: + zSql = sqlite3_mprintf(azStmt[eStmt], pC->zDb, pC->zName, + (pC->bContentlessDelete ? ",?" : "") + ); + break; + + case FTS5_STMT_LOOKUP_DOCSIZE: + zSql = sqlite3_mprintf(azStmt[eStmt], + (pC->bContentlessDelete ? ",origin" : ""), + pC->zDb, pC->zName + ); + break; + default: zSql = sqlite3_mprintf(azStmt[eStmt], pC->zDb, pC->zName); break; @@ -137,7 +197,7 @@ static int fts5StorageGetStmt( rc = SQLITE_NOMEM; }else{ int f = SQLITE_PREPARE_PERSISTENT; - if( eStmt>FTS5_STMT_LOOKUP ) f |= SQLITE_PREPARE_NO_VTAB; + if( eStmt>FTS5_STMT_LOOKUP2 ) f |= SQLITE_PREPARE_NO_VTAB; p->pConfig->bLock++; rc = sqlite3_prepare_v3(pC->db, zSql, -1, f, &p->aStmt[eStmt], 0); p->pConfig->bLock--; @@ -145,6 +205,11 @@ static int fts5StorageGetStmt( if( rc!=SQLITE_OK && pzErrMsg ){ *pzErrMsg = sqlite3_mprintf("%s", sqlite3_errmsg(pC->db)); } + if( rc==SQLITE_ERROR && eStmt>FTS5_STMT_LOOKUP2 && eStmtpIndex = pIndex; if( bCreate ){ - if( pConfig->eContent==FTS5_CONTENT_NORMAL ){ + if( pConfig->eContent==FTS5_CONTENT_NORMAL + || pConfig->eContent==FTS5_CONTENT_UNINDEXED + ){ int nDefn = 32 + pConfig->nCol*10; - char *zDefn = sqlite3_malloc64(32 + (sqlite3_int64)pConfig->nCol * 10); + char *zDefn = sqlite3_malloc64(32 + (sqlite3_int64)pConfig->nCol * 20); if( zDefn==0 ){ rc = SQLITE_NOMEM; }else{ @@ -308,8 +375,20 @@ int sqlite3Fts5StorageOpen( sqlite3_snprintf(nDefn, zDefn, "id INTEGER PRIMARY KEY"); iOff = (int)strlen(zDefn); for(i=0; inCol; i++){ - sqlite3_snprintf(nDefn-iOff, &zDefn[iOff], ", c%d", i); - iOff += (int)strlen(&zDefn[iOff]); + if( pConfig->eContent==FTS5_CONTENT_NORMAL + || pConfig->abUnindexed[i] + ){ + sqlite3_snprintf(nDefn-iOff, &zDefn[iOff], ", c%d", i); + iOff += (int)strlen(&zDefn[iOff]); + } + } + if( pConfig->bLocale ){ + for(i=0; inCol; i++){ + if( pConfig->abUnindexed[i]==0 ){ + sqlite3_snprintf(nDefn-iOff, &zDefn[iOff], ", l%d", i); + iOff += (int)strlen(&zDefn[iOff]); + } + } } rc = sqlite3Fts5CreateTable(pConfig, "content", zDefn, 0, pzErr); } @@ -317,9 +396,11 @@ int sqlite3Fts5StorageOpen( } if( rc==SQLITE_OK && pConfig->bColumnsize ){ - rc = sqlite3Fts5CreateTable( - pConfig, "docsize", "id INTEGER PRIMARY KEY, sz BLOB", 0, pzErr - ); + const char *zCols = "id INTEGER PRIMARY KEY, sz BLOB"; + if( pConfig->bContentlessDelete ){ + zCols = "id INTEGER PRIMARY KEY, sz BLOB, origin INTEGER"; + } + rc = sqlite3Fts5CreateTable(pConfig, "docsize", zCols, 0, pzErr); } if( rc==SQLITE_OK ){ rc = sqlite3Fts5CreateTable( @@ -384,58 +465,129 @@ static int fts5StorageInsertCallback( return sqlite3Fts5IndexWrite(pIdx, pCtx->iCol, pCtx->szCol-1, pToken, nToken); } +/* +** This function is used as part of an UPDATE statement that modifies the +** rowid of a row. In that case, this function is called first to set +** Fts5Storage.pSavedRow to point to a statement that may be used to +** access the original values of the row being deleted - iDel. +** +** SQLITE_OK is returned if successful, or an SQLite error code otherwise. +** It is not considered an error if row iDel does not exist. In this case +** pSavedRow is not set and SQLITE_OK returned. +*/ +int sqlite3Fts5StorageFindDeleteRow(Fts5Storage *p, i64 iDel){ + int rc = SQLITE_OK; + sqlite3_stmt *pSeek = 0; + + assert( p->pSavedRow==0 ); + rc = fts5StorageGetStmt(p, FTS5_STMT_LOOKUP+1, &pSeek, 0); + if( rc==SQLITE_OK ){ + sqlite3_bind_int64(pSeek, 1, iDel); + if( sqlite3_step(pSeek)!=SQLITE_ROW ){ + rc = sqlite3_reset(pSeek); + }else{ + p->pSavedRow = pSeek; + } + } + + return rc; +} + /* ** If a row with rowid iDel is present in the %_content table, add the ** delete-markers to the FTS index necessary to delete it. Do not actually ** remove the %_content row at this time though. +** +** If parameter bSaveRow is true, then Fts5Storage.pSavedRow is left +** pointing to a statement (FTS5_STMT_LOOKUP2) that may be used to access +** the original values of the row being deleted. This is used by UPDATE +** statements. */ static int fts5StorageDeleteFromIndex( Fts5Storage *p, i64 iDel, - sqlite3_value **apVal + sqlite3_value **apVal, + int bSaveRow /* True to set pSavedRow */ ){ Fts5Config *pConfig = p->pConfig; sqlite3_stmt *pSeek = 0; /* SELECT to read row iDel from %_data */ - int rc; /* Return code */ + int rc = SQLITE_OK; /* Return code */ int rc2; /* sqlite3_reset() return code */ int iCol; Fts5InsertCtx ctx; + assert( bSaveRow==0 || apVal==0 ); + assert( bSaveRow==0 || bSaveRow==1 ); + assert( FTS5_STMT_LOOKUP2==FTS5_STMT_LOOKUP+1 ); + if( apVal==0 ){ - rc = fts5StorageGetStmt(p, FTS5_STMT_LOOKUP, &pSeek, 0); - if( rc!=SQLITE_OK ) return rc; - sqlite3_bind_int64(pSeek, 1, iDel); - if( sqlite3_step(pSeek)!=SQLITE_ROW ){ - return sqlite3_reset(pSeek); + if( p->pSavedRow && bSaveRow ){ + pSeek = p->pSavedRow; + p->pSavedRow = 0; + }else{ + rc = fts5StorageGetStmt(p, FTS5_STMT_LOOKUP+bSaveRow, &pSeek, 0); + if( rc!=SQLITE_OK ) return rc; + sqlite3_bind_int64(pSeek, 1, iDel); + if( sqlite3_step(pSeek)!=SQLITE_ROW ){ + return sqlite3_reset(pSeek); + } } } ctx.pStorage = p; ctx.iCol = -1; - rc = sqlite3Fts5IndexBeginWrite(p->pIndex, 1, iDel); for(iCol=1; rc==SQLITE_OK && iCol<=pConfig->nCol; iCol++){ if( pConfig->abUnindexed[iCol-1]==0 ){ - const char *zText; - int nText; + sqlite3_value *pVal = 0; + sqlite3_value *pFree = 0; + const char *pText = 0; + int nText = 0; + const char *pLoc = 0; + int nLoc = 0; + assert( pSeek==0 || apVal==0 ); assert( pSeek!=0 || apVal!=0 ); if( pSeek ){ - zText = (const char*)sqlite3_column_text(pSeek, iCol); - nText = sqlite3_column_bytes(pSeek, iCol); - }else if( ALWAYS(apVal) ){ - zText = (const char*)sqlite3_value_text(apVal[iCol-1]); - nText = sqlite3_value_bytes(apVal[iCol-1]); + pVal = sqlite3_column_value(pSeek, iCol); }else{ - continue; + pVal = apVal[iCol-1]; } - ctx.szCol = 0; - rc = sqlite3Fts5Tokenize(pConfig, FTS5_TOKENIZE_DOCUMENT, - zText, nText, (void*)&ctx, fts5StorageInsertCallback - ); - p->aTotalSize[iCol-1] -= (i64)ctx.szCol; - if( p->aTotalSize[iCol-1]<0 ){ - rc = FTS5_CORRUPT; + + if( pConfig->bLocale && sqlite3Fts5IsLocaleValue(pConfig, pVal) ){ + rc = sqlite3Fts5DecodeLocaleValue(pVal, &pText, &nText, &pLoc, &nLoc); + }else{ + if( sqlite3_value_type(pVal)!=SQLITE_TEXT ){ + /* Make a copy of the value to work with. This is because the call + ** to sqlite3_value_text() below forces the type of the value to + ** SQLITE_TEXT, and we may need to use it again later. */ + pFree = pVal = sqlite3_value_dup(pVal); + if( pVal==0 ){ + rc = SQLITE_NOMEM; + } + } + if( rc==SQLITE_OK ){ + pText = (const char*)sqlite3_value_text(pVal); + nText = sqlite3_value_bytes(pVal); + if( pConfig->bLocale && pSeek ){ + pLoc = (const char*)sqlite3_column_text(pSeek, iCol+pConfig->nCol); + nLoc = sqlite3_column_bytes(pSeek, iCol + pConfig->nCol); + } + } } + + if( rc==SQLITE_OK ){ + sqlite3Fts5SetLocale(pConfig, pLoc, nLoc); + ctx.szCol = 0; + rc = sqlite3Fts5Tokenize(pConfig, FTS5_TOKENIZE_DOCUMENT, + pText, nText, (void*)&ctx, fts5StorageInsertCallback + ); + p->aTotalSize[iCol-1] -= (i64)ctx.szCol; + if( rc==SQLITE_OK && p->aTotalSize[iCol-1]<0 ){ + rc = FTS5_CORRUPT; + } + sqlite3Fts5ClearLocale(pConfig); + } + sqlite3_value_free(pFree); } } if( rc==SQLITE_OK && p->nTotalRow<1 ){ @@ -444,11 +596,62 @@ static int fts5StorageDeleteFromIndex( p->nTotalRow--; } - rc2 = sqlite3_reset(pSeek); - if( rc==SQLITE_OK ) rc = rc2; + if( rc==SQLITE_OK && bSaveRow ){ + assert( p->pSavedRow==0 ); + p->pSavedRow = pSeek; + }else{ + rc2 = sqlite3_reset(pSeek); + if( rc==SQLITE_OK ) rc = rc2; + } return rc; } +/* +** Reset any saved statement pSavedRow. Zero pSavedRow as well. This +** should be called by the xUpdate() method of the fts5 table before +** returning from any operation that may have set Fts5Storage.pSavedRow. +*/ +void sqlite3Fts5StorageReleaseDeleteRow(Fts5Storage *pStorage){ + assert( pStorage->pSavedRow==0 + || pStorage->pSavedRow==pStorage->aStmt[FTS5_STMT_LOOKUP2] + ); + sqlite3_reset(pStorage->pSavedRow); + pStorage->pSavedRow = 0; +} + +/* +** This function is called to process a DELETE on a contentless_delete=1 +** table. It adds the tombstone required to delete the entry with rowid +** iDel. If successful, SQLITE_OK is returned. Or, if an error occurs, +** an SQLite error code. +*/ +static int fts5StorageContentlessDelete(Fts5Storage *p, i64 iDel){ + i64 iOrigin = 0; + sqlite3_stmt *pLookup = 0; + int rc = SQLITE_OK; + + assert( p->pConfig->bContentlessDelete ); + assert( p->pConfig->eContent==FTS5_CONTENT_NONE + || p->pConfig->eContent==FTS5_CONTENT_UNINDEXED + ); + + /* Look up the origin of the document in the %_docsize table. Store + ** this in stack variable iOrigin. */ + rc = fts5StorageGetStmt(p, FTS5_STMT_LOOKUP_DOCSIZE, &pLookup, 0); + if( rc==SQLITE_OK ){ + sqlite3_bind_int64(pLookup, 1, iDel); + if( SQLITE_ROW==sqlite3_step(pLookup) ){ + iOrigin = sqlite3_column_int64(pLookup, 1); + } + rc = sqlite3_reset(pLookup); + } + + if( rc==SQLITE_OK && iOrigin!=0 ){ + rc = sqlite3Fts5IndexContentlessDelete(p->pIndex, iOrigin, iDel); + } + + return rc; +} /* ** Insert a record into the %_docsize table. Specifically, do: @@ -469,6 +672,13 @@ static int fts5StorageInsertDocsize( rc = fts5StorageGetStmt(p, FTS5_STMT_REPLACE_DOCSIZE, &pReplace, 0); if( rc==SQLITE_OK ){ sqlite3_bind_int64(pReplace, 1, iRowid); + if( p->pConfig->bContentlessDelete ){ + i64 iOrigin = 0; + rc = sqlite3Fts5IndexGetOrigin(p->pIndex, &iOrigin); + sqlite3_bind_int64(pReplace, 3, iOrigin); + } + } + if( rc==SQLITE_OK ){ sqlite3_bind_blob(pReplace, 2, pBuf->p, pBuf->n, SQLITE_STATIC); sqlite3_step(pReplace); rc = sqlite3_reset(pReplace); @@ -526,7 +736,12 @@ static int fts5StorageSaveTotals(Fts5Storage *p){ /* ** Remove a row from the FTS table. */ -int sqlite3Fts5StorageDelete(Fts5Storage *p, i64 iDel, sqlite3_value **apVal){ +int sqlite3Fts5StorageDelete( + Fts5Storage *p, /* Storage object */ + i64 iDel, /* Rowid to delete from table */ + sqlite3_value **apVal, /* Optional - values to remove from index */ + int bSaveRow /* If true, set pSavedRow for deleted row */ +){ Fts5Config *pConfig = p->pConfig; int rc; sqlite3_stmt *pDel = 0; @@ -536,7 +751,21 @@ int sqlite3Fts5StorageDelete(Fts5Storage *p, i64 iDel, sqlite3_value **apVal){ /* Delete the index records */ if( rc==SQLITE_OK ){ - rc = fts5StorageDeleteFromIndex(p, iDel, apVal); + rc = sqlite3Fts5IndexBeginWrite(p->pIndex, 1, iDel); + } + + if( rc==SQLITE_OK ){ + if( p->pConfig->bContentlessDelete ){ + rc = fts5StorageContentlessDelete(p, iDel); + if( rc==SQLITE_OK + && bSaveRow + && p->pConfig->eContent==FTS5_CONTENT_UNINDEXED + ){ + rc = sqlite3Fts5StorageFindDeleteRow(p, iDel); + } + }else{ + rc = fts5StorageDeleteFromIndex(p, iDel, apVal, bSaveRow); + } } /* Delete the %_docsize record */ @@ -550,7 +779,9 @@ int sqlite3Fts5StorageDelete(Fts5Storage *p, i64 iDel, sqlite3_value **apVal){ } /* Delete the %_content record */ - if( pConfig->eContent==FTS5_CONTENT_NORMAL ){ + if( pConfig->eContent==FTS5_CONTENT_NORMAL + || pConfig->eContent==FTS5_CONTENT_UNINDEXED + ){ if( rc==SQLITE_OK ){ rc = fts5StorageGetStmt(p, FTS5_STMT_DELETE_CONTENT, &pDel, 0); } @@ -582,8 +813,13 @@ int sqlite3Fts5StorageDeleteAll(Fts5Storage *p){ ); if( rc==SQLITE_OK && pConfig->bColumnsize ){ rc = fts5ExecPrintf(pConfig->db, 0, - "DELETE FROM %Q.'%q_docsize';", - pConfig->zDb, pConfig->zName + "DELETE FROM %Q.'%q_docsize';", pConfig->zDb, pConfig->zName + ); + } + + if( rc==SQLITE_OK && pConfig->eContent==FTS5_CONTENT_UNINDEXED ){ + rc = fts5ExecPrintf(pConfig->db, 0, + "DELETE FROM %Q.'%q_content';", pConfig->zDb, pConfig->zName ); } @@ -613,7 +849,7 @@ int sqlite3Fts5StorageRebuild(Fts5Storage *p){ } if( rc==SQLITE_OK ){ - rc = fts5StorageGetStmt(p, FTS5_STMT_SCAN, &pScan, 0); + rc = fts5StorageGetStmt(p, FTS5_STMT_SCAN, &pScan, pConfig->pzErrmsg); } while( rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pScan) ){ @@ -624,14 +860,36 @@ int sqlite3Fts5StorageRebuild(Fts5Storage *p){ for(ctx.iCol=0; rc==SQLITE_OK && ctx.iColnCol; ctx.iCol++){ ctx.szCol = 0; if( pConfig->abUnindexed[ctx.iCol]==0 ){ - const char *zText = (const char*)sqlite3_column_text(pScan, ctx.iCol+1); - int nText = sqlite3_column_bytes(pScan, ctx.iCol+1); - rc = sqlite3Fts5Tokenize(pConfig, - FTS5_TOKENIZE_DOCUMENT, - zText, nText, - (void*)&ctx, - fts5StorageInsertCallback - ); + int nText = 0; /* Size of pText in bytes */ + const char *pText = 0; /* Pointer to buffer containing text value */ + int nLoc = 0; /* Size of pLoc in bytes */ + const char *pLoc = 0; /* Pointer to buffer containing text value */ + + sqlite3_value *pVal = sqlite3_column_value(pScan, ctx.iCol+1); + if( pConfig->eContent==FTS5_CONTENT_EXTERNAL + && sqlite3Fts5IsLocaleValue(pConfig, pVal) + ){ + rc = sqlite3Fts5DecodeLocaleValue(pVal, &pText, &nText, &pLoc, &nLoc); + }else{ + pText = (const char*)sqlite3_value_text(pVal); + nText = sqlite3_value_bytes(pVal); + if( pConfig->bLocale ){ + int iCol = ctx.iCol + 1 + pConfig->nCol; + pLoc = (const char*)sqlite3_column_text(pScan, iCol); + nLoc = sqlite3_column_bytes(pScan, iCol); + } + } + + if( rc==SQLITE_OK ){ + sqlite3Fts5SetLocale(pConfig, pLoc, nLoc); + rc = sqlite3Fts5Tokenize(pConfig, + FTS5_TOKENIZE_DOCUMENT, + pText, nText, + (void*)&ctx, + fts5StorageInsertCallback + ); + sqlite3Fts5ClearLocale(pConfig); + } } sqlite3Fts5BufferAppendVarint(&rc, &buf, ctx.szCol); p->aTotalSize[ctx.iCol] += (i64)ctx.szCol; @@ -697,6 +955,7 @@ static int fts5StorageNewRowid(Fts5Storage *p, i64 *piRowid){ */ int sqlite3Fts5StorageContentInsert( Fts5Storage *p, + int bReplace, /* True to use REPLACE instead of INSERT */ sqlite3_value **apVal, i64 *piRowid ){ @@ -704,7 +963,9 @@ int sqlite3Fts5StorageContentInsert( int rc = SQLITE_OK; /* Insert the new row into the %_content table. */ - if( pConfig->eContent!=FTS5_CONTENT_NORMAL ){ + if( pConfig->eContent!=FTS5_CONTENT_NORMAL + && pConfig->eContent!=FTS5_CONTENT_UNINDEXED + ){ if( sqlite3_value_type(apVal[1])==SQLITE_INTEGER ){ *piRowid = sqlite3_value_int64(apVal[1]); }else{ @@ -713,9 +974,52 @@ int sqlite3Fts5StorageContentInsert( }else{ sqlite3_stmt *pInsert = 0; /* Statement to write %_content table */ int i; /* Counter variable */ - rc = fts5StorageGetStmt(p, FTS5_STMT_INSERT_CONTENT, &pInsert, 0); - for(i=1; rc==SQLITE_OK && i<=pConfig->nCol+1; i++){ - rc = sqlite3_bind_value(pInsert, i, apVal[i]); + + assert( FTS5_STMT_INSERT_CONTENT+1==FTS5_STMT_REPLACE_CONTENT ); + assert( bReplace==0 || bReplace==1 ); + rc = fts5StorageGetStmt(p, FTS5_STMT_INSERT_CONTENT+bReplace, &pInsert, 0); + if( pInsert ) sqlite3_clear_bindings(pInsert); + + /* Bind the rowid value */ + sqlite3_bind_value(pInsert, 1, apVal[1]); + + /* Loop through values for user-defined columns. i=2 is the leftmost + ** user-defined column. As is column 1 of pSavedRow. */ + for(i=2; rc==SQLITE_OK && i<=pConfig->nCol+1; i++){ + int bUnindexed = pConfig->abUnindexed[i-2]; + if( pConfig->eContent==FTS5_CONTENT_NORMAL || bUnindexed ){ + sqlite3_value *pVal = apVal[i]; + + if( sqlite3_value_nochange(pVal) && p->pSavedRow ){ + /* This is an UPDATE statement, and user-defined column (i-2) was not + ** modified. Retrieve the value from Fts5Storage.pSavedRow. */ + pVal = sqlite3_column_value(p->pSavedRow, i-1); + if( pConfig->bLocale && bUnindexed==0 ){ + sqlite3_bind_value(pInsert, pConfig->nCol + i, + sqlite3_column_value(p->pSavedRow, pConfig->nCol + i - 1) + ); + } + }else if( sqlite3Fts5IsLocaleValue(pConfig, pVal) ){ + const char *pText = 0; + const char *pLoc = 0; + int nText = 0; + int nLoc = 0; + assert( pConfig->bLocale ); + + rc = sqlite3Fts5DecodeLocaleValue(pVal, &pText, &nText, &pLoc, &nLoc); + if( rc==SQLITE_OK ){ + sqlite3_bind_text(pInsert, i, pText, nText, SQLITE_TRANSIENT); + if( bUnindexed==0 ){ + int iLoc = pConfig->nCol + i; + sqlite3_bind_text(pInsert, iLoc, pLoc, nLoc, SQLITE_TRANSIENT); + } + } + + continue; + } + + rc = sqlite3_bind_value(pInsert, i, pVal); + } } if( rc==SQLITE_OK ){ sqlite3_step(pInsert); @@ -750,14 +1054,38 @@ int sqlite3Fts5StorageIndexInsert( for(ctx.iCol=0; rc==SQLITE_OK && ctx.iColnCol; ctx.iCol++){ ctx.szCol = 0; if( pConfig->abUnindexed[ctx.iCol]==0 ){ - const char *zText = (const char*)sqlite3_value_text(apVal[ctx.iCol+2]); - int nText = sqlite3_value_bytes(apVal[ctx.iCol+2]); - rc = sqlite3Fts5Tokenize(pConfig, - FTS5_TOKENIZE_DOCUMENT, - zText, nText, - (void*)&ctx, - fts5StorageInsertCallback - ); + int nText = 0; /* Size of pText in bytes */ + const char *pText = 0; /* Pointer to buffer containing text value */ + int nLoc = 0; /* Size of pText in bytes */ + const char *pLoc = 0; /* Pointer to buffer containing text value */ + + sqlite3_value *pVal = apVal[ctx.iCol+2]; + if( p->pSavedRow && sqlite3_value_nochange(pVal) ){ + pVal = sqlite3_column_value(p->pSavedRow, ctx.iCol+1); + if( pConfig->eContent==FTS5_CONTENT_NORMAL && pConfig->bLocale ){ + int iCol = ctx.iCol + 1 + pConfig->nCol; + pLoc = (const char*)sqlite3_column_text(p->pSavedRow, iCol); + nLoc = sqlite3_column_bytes(p->pSavedRow, iCol); + } + }else{ + pVal = apVal[ctx.iCol+2]; + } + + if( pConfig->bLocale && sqlite3Fts5IsLocaleValue(pConfig, pVal) ){ + rc = sqlite3Fts5DecodeLocaleValue(pVal, &pText, &nText, &pLoc, &nLoc); + }else{ + pText = (const char*)sqlite3_value_text(pVal); + nText = sqlite3_value_bytes(pVal); + } + + if( rc==SQLITE_OK ){ + sqlite3Fts5SetLocale(pConfig, pLoc, nLoc); + rc = sqlite3Fts5Tokenize(pConfig, + FTS5_TOKENIZE_DOCUMENT, pText, nText, (void*)&ctx, + fts5StorageInsertCallback + ); + sqlite3Fts5ClearLocale(pConfig); + } } sqlite3Fts5BufferAppendVarint(&rc, &buf, ctx.szCol); p->aTotalSize[ctx.iCol] += (i64)ctx.szCol; @@ -921,29 +1249,61 @@ int sqlite3Fts5StorageIntegrity(Fts5Storage *p, int iArg){ rc = sqlite3Fts5TermsetNew(&ctx.pTermset); } for(i=0; rc==SQLITE_OK && inCol; i++){ - if( pConfig->abUnindexed[i] ) continue; - ctx.iCol = i; - ctx.szCol = 0; - if( pConfig->eDetail==FTS5_DETAIL_COLUMNS ){ - rc = sqlite3Fts5TermsetNew(&ctx.pTermset); - } - if( rc==SQLITE_OK ){ - const char *zText = (const char*)sqlite3_column_text(pScan, i+1); - int nText = sqlite3_column_bytes(pScan, i+1); - rc = sqlite3Fts5Tokenize(pConfig, - FTS5_TOKENIZE_DOCUMENT, - zText, nText, - (void*)&ctx, - fts5StorageIntegrityCallback - ); - } - if( rc==SQLITE_OK && pConfig->bColumnsize && ctx.szCol!=aColSize[i] ){ - rc = FTS5_CORRUPT; - } - aTotalSize[i] += ctx.szCol; - if( pConfig->eDetail==FTS5_DETAIL_COLUMNS ){ - sqlite3Fts5TermsetFree(ctx.pTermset); - ctx.pTermset = 0; + if( pConfig->abUnindexed[i]==0 ){ + const char *pText = 0; + int nText = 0; + const char *pLoc = 0; + int nLoc = 0; + sqlite3_value *pVal = sqlite3_column_value(pScan, i+1); + + if( pConfig->eContent==FTS5_CONTENT_EXTERNAL + && sqlite3Fts5IsLocaleValue(pConfig, pVal) + ){ + rc = sqlite3Fts5DecodeLocaleValue( + pVal, &pText, &nText, &pLoc, &nLoc + ); + }else{ + if( pConfig->eContent==FTS5_CONTENT_NORMAL && pConfig->bLocale ){ + int iCol = i + 1 + pConfig->nCol; + pLoc = (const char*)sqlite3_column_text(pScan, iCol); + nLoc = sqlite3_column_bytes(pScan, iCol); + } + pText = (const char*)sqlite3_value_text(pVal); + nText = sqlite3_value_bytes(pVal); + } + + ctx.iCol = i; + ctx.szCol = 0; + + if( rc==SQLITE_OK && pConfig->eDetail==FTS5_DETAIL_COLUMNS ){ + rc = sqlite3Fts5TermsetNew(&ctx.pTermset); + } + + if( rc==SQLITE_OK ){ + sqlite3Fts5SetLocale(pConfig, pLoc, nLoc); + rc = sqlite3Fts5Tokenize(pConfig, + FTS5_TOKENIZE_DOCUMENT, + pText, nText, + (void*)&ctx, + fts5StorageIntegrityCallback + ); + sqlite3Fts5ClearLocale(pConfig); + } + + /* If this is not a columnsize=0 database, check that the number + ** of tokens in the value matches the aColSize[] value read from + ** the %_docsize table. */ + if( rc==SQLITE_OK + && pConfig->bColumnsize + && ctx.szCol!=aColSize[i] + ){ + rc = FTS5_CORRUPT; + } + aTotalSize[i] += ctx.szCol; + if( pConfig->eDetail==FTS5_DETAIL_COLUMNS ){ + sqlite3Fts5TermsetFree(ctx.pTermset); + ctx.pTermset = 0; + } } } sqlite3Fts5TermsetFree(ctx.pTermset); @@ -1124,7 +1484,9 @@ int sqlite3Fts5StorageSync(Fts5Storage *p){ i64 iLastRowid = sqlite3_last_insert_rowid(p->pConfig->db); if( p->bTotalsValid ){ rc = fts5StorageSaveTotals(p); - p->bTotalsValid = 0; + if( rc==SQLITE_OK ){ + p->bTotalsValid = 0; + } } if( rc==SQLITE_OK ){ rc = sqlite3Fts5IndexSync(p->pIndex); diff --git a/ext/fts5/fts5_tcl.c b/ext/fts5/fts5_tcl.c index 80c600dbb1..25cd5c0633 100644 --- a/ext/fts5/fts5_tcl.c +++ b/ext/fts5/fts5_tcl.c @@ -14,20 +14,14 @@ #ifdef SQLITE_TEST -#if defined(INCLUDE_SQLITE_TCL_H) -# include "sqlite_tcl.h" -#else -# include "tcl.h" -# ifndef SQLITE_TCLAPI -# define SQLITE_TCLAPI -# endif -#endif +#include "tclsqlite.h" #ifdef SQLITE_ENABLE_FTS5 #include "fts5.h" #include #include +#include #ifdef SQLITE_DEBUG extern int sqlite3_fts5_may_be_corrupt; @@ -103,14 +97,14 @@ static int SQLITE_TCLAPI f5tDbAndApi( rc = sqlite3_prepare_v2(db, "SELECT fts5(?1)", -1, &pStmt, 0); if( rc!=SQLITE_OK ){ - Tcl_AppendResult(interp, "error: ", sqlite3_errmsg(db), 0); + Tcl_AppendResult(interp, "error: ", sqlite3_errmsg(db), (char*)0); return TCL_ERROR; } sqlite3_bind_pointer(pStmt, 1, (void*)&pApi, "fts5_api_ptr", 0); sqlite3_step(pStmt); if( sqlite3_finalize(pStmt)!=SQLITE_OK ){ - Tcl_AppendResult(interp, "error: ", sqlite3_errmsg(db), 0); + Tcl_AppendResult(interp, "error: ", sqlite3_errmsg(db), (char*)0); return TCL_ERROR; } @@ -244,6 +238,10 @@ static int SQLITE_TCLAPI xF5tApi( { "xGetAuxdataInt", 1, "CLEAR" }, /* 15 */ { "xPhraseForeach", 4, "IPHRASE COLVAR OFFVAR SCRIPT" }, /* 16 */ { "xPhraseColumnForeach", 3, "IPHRASE COLVAR SCRIPT" }, /* 17 */ + + { "xQueryToken", 2, "IPHRASE ITERM" }, /* 18 */ + { "xInstToken", 2, "IDX ITERM" }, /* 19 */ + { "xColumnLocale", 1, "COL" }, /* 20 */ { 0, 0, 0} }; @@ -294,12 +292,12 @@ static int SQLITE_TCLAPI xF5tApi( break; } CASE(3, "xTokenize") { - int nText; + Tcl_Size nText; char *zText = Tcl_GetStringFromObj(objv[2], &nText); F5tFunction ctx; ctx.interp = interp; ctx.pScript = objv[3]; - rc = p->pApi->xTokenize(p->pFts, zText, nText, &ctx, xTokenizeCb); + rc = p->pApi->xTokenize(p->pFts, zText, (int)nText, &ctx, xTokenizeCb); if( rc==SQLITE_OK ){ Tcl_ResetResult(interp); } @@ -395,7 +393,7 @@ static int SQLITE_TCLAPI xF5tApi( CASE(12, "xSetAuxdata") { F5tAuxData *pData = (F5tAuxData*)sqlite3_malloc(sizeof(F5tAuxData)); if( pData==0 ){ - Tcl_AppendResult(interp, "out of memory", 0); + Tcl_AppendResult(interp, "out of memory", (char*)0); return TCL_ERROR; } pData->pObj = objv[2]; @@ -455,7 +453,7 @@ static int SQLITE_TCLAPI xF5tApi( rc = p->pApi->xPhraseFirst(p->pFts, iPhrase, &iter, &iCol, &iOff); if( rc!=SQLITE_OK ){ - Tcl_AppendResult(interp, sqlite3ErrName(rc), 0); + Tcl_AppendResult(interp, sqlite3ErrName(rc), (char*)0); return TCL_ERROR; } for( ;iCol>=0; p->pApi->xPhraseNext(p->pFts, &iter, &iCol, &iOff) ){ @@ -500,6 +498,52 @@ static int SQLITE_TCLAPI xF5tApi( break; } + CASE(18, "xQueryToken") { + const char *pTerm = 0; + int nTerm = 0; + int iPhrase = 0; + int iTerm = 0; + + if( Tcl_GetIntFromObj(interp, objv[2], &iPhrase) ) return TCL_ERROR; + if( Tcl_GetIntFromObj(interp, objv[3], &iTerm) ) return TCL_ERROR; + rc = p->pApi->xQueryToken(p->pFts, iPhrase, iTerm, &pTerm, &nTerm); + if( rc==SQLITE_OK ){ + Tcl_SetObjResult(interp, Tcl_NewStringObj(pTerm, nTerm)); + } + + break; + } + + CASE(19, "xInstToken") { + const char *pTerm = 0; + int nTerm = 0; + int iIdx = 0; + int iTerm = 0; + + if( Tcl_GetIntFromObj(interp, objv[2], &iIdx) ) return TCL_ERROR; + if( Tcl_GetIntFromObj(interp, objv[3], &iTerm) ) return TCL_ERROR; + rc = p->pApi->xInstToken(p->pFts, iIdx, iTerm, &pTerm, &nTerm); + if( rc==SQLITE_OK ){ + Tcl_SetObjResult(interp, Tcl_NewStringObj(pTerm, nTerm)); + } + + break; + } + + CASE(20, "xColumnLocale") { + const char *z = 0; + int n = 0; + int iCol; + if( Tcl_GetIntFromObj(interp, objv[2], &iCol) ){ + return TCL_ERROR; + } + rc = p->pApi->xColumnLocale(p->pFts, iCol, &z, &n); + if( rc==SQLITE_OK && z ){ + Tcl_SetObjResult(interp, Tcl_NewStringObj(z, n)); + } + break; + } + default: assert( 0 ); break; @@ -570,15 +614,16 @@ static void xF5tFunction( sqlite3_result_error(pCtx, Tcl_GetStringResult(p->interp), -1); }else{ Tcl_Obj *pVar = Tcl_GetObjResult(p->interp); - int n; const char *zType = (pVar->typePtr ? pVar->typePtr->name : ""); char c = zType[0]; if( c=='b' && strcmp(zType,"bytearray")==0 && pVar->bytes==0 ){ /* Only return a BLOB type if the Tcl variable is a bytearray and ** has no string representation. */ - unsigned char *data = Tcl_GetByteArrayFromObj(pVar, &n); - sqlite3_result_blob(pCtx, data, n, SQLITE_TRANSIENT); + Tcl_Size nn; + unsigned char *data = Tcl_GetByteArrayFromObj(pVar, &nn); + sqlite3_result_blob(pCtx, data, (int)nn, SQLITE_TRANSIENT); }else if( c=='b' && strcmp(zType,"boolean")==0 ){ + int n; Tcl_GetIntFromObj(0, pVar, &n); sqlite3_result_int(pCtx, n); }else if( c=='d' && strcmp(zType,"double")==0 ){ @@ -591,8 +636,9 @@ static void xF5tFunction( Tcl_GetWideIntFromObj(0, pVar, &v); sqlite3_result_int64(pCtx, v); }else{ - unsigned char *data = (unsigned char *)Tcl_GetStringFromObj(pVar, &n); - sqlite3_result_text(pCtx, (char *)data, n, SQLITE_TRANSIENT); + Tcl_Size nn; + unsigned char *data = (unsigned char *)Tcl_GetStringFromObj(pVar, &nn); + sqlite3_result_text(pCtx, (char*)data, (int)nn, SQLITE_TRANSIENT); } } } @@ -638,7 +684,7 @@ static int SQLITE_TCLAPI f5tCreateFunction( pApi, zName, (void*)pCtx, xF5tFunction, xF5tDestroy ); if( rc!=SQLITE_OK ){ - Tcl_AppendResult(interp, "error: ", sqlite3_errmsg(db), 0); + Tcl_AppendResult(interp, "error: ", sqlite3_errmsg(db), (char*)0); return TCL_ERROR; } @@ -684,8 +730,9 @@ static int SQLITE_TCLAPI f5tTokenize( int objc, Tcl_Obj *CONST objv[] ){ - char *zText; - int nText; + char *pCopy = 0; + char *zText = 0; + Tcl_Size nText = 0; sqlite3 *db = 0; fts5_api *pApi = 0; Fts5Tokenizer *pTok = 0; @@ -694,7 +741,7 @@ static int SQLITE_TCLAPI f5tTokenize( void *pUserdata; int rc; - int nArg; + Tcl_Size nArg; const char **azArg; F5tTokenizeCtx ctx; @@ -705,7 +752,7 @@ static int SQLITE_TCLAPI f5tTokenize( if( objc==5 ){ char *zOpt = Tcl_GetString(objv[1]); if( strcmp("-subst", zOpt) ){ - Tcl_AppendResult(interp, "unrecognized option: ", zOpt, 0); + Tcl_AppendResult(interp, "unrecognized option: ", zOpt, (char*)0); return TCL_ERROR; } } @@ -714,7 +761,7 @@ static int SQLITE_TCLAPI f5tTokenize( return TCL_ERROR; } if( nArg==0 ){ - Tcl_AppendResult(interp, "no such tokenizer: ", 0); + Tcl_AppendResult(interp, "no such tokenizer: ", (char*)0); Tcl_Free((void*)azArg); return TCL_ERROR; } @@ -722,32 +769,43 @@ static int SQLITE_TCLAPI f5tTokenize( rc = pApi->xFindTokenizer(pApi, azArg[0], &pUserdata, &tokenizer); if( rc!=SQLITE_OK ){ - Tcl_AppendResult(interp, "no such tokenizer: ", azArg[0], 0); + Tcl_AppendResult(interp, "no such tokenizer: ", azArg[0], (char*)0); return TCL_ERROR; } - rc = tokenizer.xCreate(pUserdata, &azArg[1], nArg-1, &pTok); + rc = tokenizer.xCreate(pUserdata, &azArg[1], (int)(nArg-1), &pTok); if( rc!=SQLITE_OK ){ - Tcl_AppendResult(interp, "error in tokenizer.xCreate()", 0); + Tcl_AppendResult(interp, "error in tokenizer.xCreate()", (char*)0); return TCL_ERROR; } + if( nText>0 ){ + pCopy = sqlite3_malloc(nText); + if( pCopy==0 ){ + tokenizer.xDelete(pTok); + Tcl_AppendResult(interp, "error in sqlite3_malloc()", (char*)0); + return TCL_ERROR; + }else{ + memcpy(pCopy, zText, nText); + } + } + pRet = Tcl_NewObj(); Tcl_IncrRefCount(pRet); ctx.bSubst = (objc==5); ctx.pRet = pRet; - ctx.zInput = zText; + ctx.zInput = pCopy; rc = tokenizer.xTokenize( - pTok, (void*)&ctx, FTS5_TOKENIZE_DOCUMENT, zText, nText, xTokenizeCb2 + pTok, (void*)&ctx, FTS5_TOKENIZE_DOCUMENT, pCopy,(int)nText, xTokenizeCb2 ); tokenizer.xDelete(pTok); + sqlite3_free(pCopy); if( rc!=SQLITE_OK ){ - Tcl_AppendResult(interp, "error in tokenizer.xTokenize()", 0); + Tcl_AppendResult(interp, "error in tokenizer.xTokenize()", (char*)0); Tcl_DecrRefCount(pRet); return TCL_ERROR; } - Tcl_Free((void*)azArg); Tcl_SetObjResult(interp, pRet); Tcl_DecrRefCount(pRet); @@ -766,18 +824,32 @@ typedef struct F5tTokenizerInstance F5tTokenizerInstance; struct F5tTokenizerContext { void *pCtx; int (*xToken)(void*, int, const char*, int, int, int); + F5tTokenizerInstance *pInst; }; struct F5tTokenizerModule { Tcl_Interp *interp; Tcl_Obj *pScript; + void *pParentCtx; + fts5_tokenizer_v2 parent_v2; + fts5_tokenizer parent; F5tTokenizerContext *pContext; }; +/* +** zLocale: +** Within a call to xTokenize_v2(), pLocale/nLocale store the locale +** passed to the call by fts5. This can be retrieved by a Tcl tokenize +** script using [sqlite3_fts5_locale]. +*/ struct F5tTokenizerInstance { Tcl_Interp *interp; Tcl_Obj *pScript; + F5tTokenizerModule *pModule; + Fts5Tokenizer *pParent; F5tTokenizerContext *pContext; + const char *pLocale; + int nLocale; }; static int f5tTokenizerCreate( @@ -786,11 +858,20 @@ static int f5tTokenizerCreate( int nArg, Fts5Tokenizer **ppOut ){ + Fts5Tokenizer *pParent = 0; F5tTokenizerModule *pMod = (F5tTokenizerModule*)pCtx; Tcl_Obj *pEval; int rc = TCL_OK; int i; + assert( pMod->parent_v2.xCreate==0 || pMod->parent.xCreate==0 ); + if( pMod->parent_v2.xCreate ){ + rc = pMod->parent_v2.xCreate(pMod->pParentCtx, 0, 0, &pParent); + } + if( pMod->parent.xCreate ){ + rc = pMod->parent.xCreate(pMod->pParentCtx, 0, 0, &pParent); + } + pEval = Tcl_DuplicateObj(pMod->pScript); Tcl_IncrRefCount(pEval); for(i=0; rc==TCL_OK && iinterp = pMod->interp; pInst->pScript = Tcl_GetObjResult(pMod->interp); pInst->pContext = pMod->pContext; + pInst->pParent = pParent; + pInst->pModule = pMod; Tcl_IncrRefCount(pInst->pScript); *ppOut = (Fts5Tokenizer*)pInst; } @@ -820,11 +903,21 @@ static int f5tTokenizerCreate( static void f5tTokenizerDelete(Fts5Tokenizer *p){ F5tTokenizerInstance *pInst = (F5tTokenizerInstance*)p; - Tcl_DecrRefCount(pInst->pScript); - ckfree((char *)pInst); + if( pInst ){ + if( pInst->pParent ){ + if( pInst->pModule->parent_v2.xDelete ){ + pInst->pModule->parent_v2.xDelete(pInst->pParent); + }else{ + pInst->pModule->parent.xDelete(pInst->pParent); + } + } + Tcl_DecrRefCount(pInst->pScript); + ckfree((char *)pInst); + } } -static int f5tTokenizerTokenize( + +static int f5tTokenizerReallyTokenize( Fts5Tokenizer *p, void *pCtx, int flags, @@ -832,6 +925,7 @@ static int f5tTokenizerTokenize( int (*xToken)(void*, int, const char*, int, int, int) ){ F5tTokenizerInstance *pInst = (F5tTokenizerInstance*)p; + F5tTokenizerInstance *pOldInst = 0; void *pOldCtx; int (*xOldToken)(void*, int, const char*, int, int, int); Tcl_Obj *pEval; @@ -840,9 +934,11 @@ static int f5tTokenizerTokenize( pOldCtx = pInst->pContext->pCtx; xOldToken = pInst->pContext->xToken; + pOldInst = pInst->pContext->pInst; pInst->pContext->pCtx = pCtx; pInst->pContext->xToken = xToken; + pInst->pContext->pInst = pInst; assert( flags==FTS5_TOKENIZE_DOCUMENT @@ -878,9 +974,105 @@ static int f5tTokenizerTokenize( pInst->pContext->pCtx = pOldCtx; pInst->pContext->xToken = xOldToken; + pInst->pContext->pInst = pOldInst; return rc; } +typedef struct CallbackCtx CallbackCtx; +struct CallbackCtx { + Fts5Tokenizer *p; + void *pCtx; + int flags; + int (*xToken)(void*, int, const char*, int, int, int); +}; + +static int f5tTokenizeCallback( + void *pCtx, + int tflags, + const char *z, int n, + int iStart, int iEnd +){ + CallbackCtx *p = (CallbackCtx*)pCtx; + return f5tTokenizerReallyTokenize(p->p, p->pCtx, p->flags, z, n, p->xToken); +} + +static int f5tTokenizerTokenize_v2( + Fts5Tokenizer *p, + void *pCtx, + int flags, + const char *pText, int nText, + const char *pLoc, int nLoc, + int (*xToken)(void*, int, const char*, int, int, int) +){ + int rc = SQLITE_OK; + F5tTokenizerInstance *pInst = (F5tTokenizerInstance*)p; + + pInst->pLocale = pLoc; + pInst->nLocale = nLoc; + + if( pInst->pParent ){ + CallbackCtx ctx; + ctx.p = p; + ctx.pCtx = pCtx; + ctx.flags = flags; + ctx.xToken = xToken; + if( pInst->pModule->parent_v2.xTokenize ){ + rc = pInst->pModule->parent_v2.xTokenize( + pInst->pParent, (void*)&ctx, flags, pText, nText, + pLoc, nLoc, f5tTokenizeCallback + ); + }else{ + rc = pInst->pModule->parent.xTokenize( + pInst->pParent, (void*)&ctx, flags, pText, nText, f5tTokenizeCallback + ); + } + }else{ + rc = f5tTokenizerReallyTokenize(p, pCtx, flags, pText, nText, xToken); + } + + pInst->pLocale = 0; + pInst->nLocale = 0; + return rc; +} +static int f5tTokenizerTokenize( + Fts5Tokenizer *p, + void *pCtx, + int flags, + const char *pText, int nText, + int (*xToken)(void*, int, const char*, int, int, int) +){ + return f5tTokenizerTokenize_v2(p, pCtx, flags, pText, nText, 0, 0, xToken); +} + +/* +** sqlite3_fts5_locale +*/ +static int SQLITE_TCLAPI f5tTokenizerLocale( + void * clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + F5tTokenizerContext *p = (F5tTokenizerContext*)clientData; + + if( objc!=1 ){ + Tcl_WrongNumArgs(interp, 1, objv, ""); + return TCL_ERROR; + } + + if( p->xToken==0 ){ + Tcl_AppendResult(interp, + "sqlite3_fts5_locale may only be used by tokenizer callback", (char*)0 + ); + return TCL_ERROR; + } + + Tcl_SetObjResult(interp, + Tcl_NewStringObj(p->pInst->pLocale, p->pInst->nLocale) + ); + return TCL_OK; +} + /* ** sqlite3_fts5_token ?-colocated? TEXT START END */ @@ -893,13 +1085,13 @@ static int SQLITE_TCLAPI f5tTokenizerReturn( F5tTokenizerContext *p = (F5tTokenizerContext*)clientData; int iStart; int iEnd; - int nToken; + Tcl_Size nToken; int tflags = 0; char *zToken; int rc; if( objc==5 ){ - int nArg; + Tcl_Size nArg; char *zArg = Tcl_GetStringFromObj(objv[1], &nArg); if( nArg<=10 && nArg>=2 && memcmp("-colocated", zArg, nArg)==0 ){ tflags |= FTS5_TOKEN_COLOCATED; @@ -919,12 +1111,12 @@ static int SQLITE_TCLAPI f5tTokenizerReturn( if( p->xToken==0 ){ Tcl_AppendResult(interp, - "sqlite3_fts5_token may only be used by tokenizer callback", 0 + "sqlite3_fts5_token may only be used by tokenizer callback", (char*)0 ); return TCL_ERROR; } - rc = p->xToken(p->pCtx, tflags, zToken, nToken, iStart, iEnd); + rc = p->xToken(p->pCtx, tflags, zToken, (int)nToken, iStart, iEnd); Tcl_SetResult(interp, (char*)sqlite3ErrName(rc), TCL_VOLATILE); return rc==SQLITE_OK ? TCL_OK : TCL_ERROR; @@ -966,32 +1158,112 @@ static int SQLITE_TCLAPI f5tCreateTokenizer( fts5_api *pApi; char *zName; Tcl_Obj *pScript; - fts5_tokenizer t; F5tTokenizerModule *pMod; - int rc; - - if( objc!=4 ){ - Tcl_WrongNumArgs(interp, 1, objv, "DB NAME SCRIPT"); + int rc = SQLITE_OK; + int bV2 = 0; /* True to use _v2 API */ + int iVersion = 2; /* Value for _v2.iVersion */ + const char *zParent = 0; /* Name of parent tokenizer, if any */ + int ii = 0; + + if( objc<4 ){ + Tcl_WrongNumArgs(interp, 1, objv, "?OPTIONS? DB NAME SCRIPT"); return TCL_ERROR; } - if( f5tDbAndApi(interp, objv[1], &db, &pApi) ){ - return TCL_ERROR; + + /* Parse any options. Set stack variables bV2 and zParent. */ + for(ii=1; iiinterp = interp; pMod->pScript = pScript; - pMod->pContext = pContext; Tcl_IncrRefCount(pScript); - rc = pApi->xCreateTokenizer(pApi, zName, (void*)pMod, &t, f5tDelTokenizer); + pMod->pContext = pContext; + if( zParent ){ + if( bV2 ){ + fts5_tokenizer_v2 *pParent = 0; + rc = pApi->xFindTokenizer_v2(pApi, zParent, &pMod->pParentCtx, &pParent); + if( rc==SQLITE_OK ){ + memcpy(&pMod->parent_v2, pParent, sizeof(fts5_tokenizer_v2)); + pMod->parent_v2.xDelete(0); + } + }else{ + rc = pApi->xFindTokenizer(pApi, zParent, &pMod->pParentCtx,&pMod->parent); + if( rc==SQLITE_OK ){ + pMod->parent.xDelete(0); + } + } + } + + if( rc==SQLITE_OK ){ + void *pModCtx = (void*)pMod; + if( bV2==0 ){ + fts5_tokenizer t; + t.xCreate = f5tTokenizerCreate; + t.xTokenize = f5tTokenizerTokenize; + t.xDelete = f5tTokenizerDelete; + rc = pApi->xCreateTokenizer(pApi, zName, pModCtx, &t, f5tDelTokenizer); + }else{ + fts5_tokenizer_v2 t2; + memset(&t2, 0, sizeof(t2)); + t2.iVersion = iVersion; + t2.xCreate = f5tTokenizerCreate; + t2.xTokenize = f5tTokenizerTokenize_v2; + t2.xDelete = f5tTokenizerDelete; + rc = pApi->xCreateTokenizer_v2(pApi, zName, pModCtx, &t2,f5tDelTokenizer); + } + } + if( rc!=SQLITE_OK ){ - Tcl_AppendResult(interp, "error in fts5_api.xCreateTokenizer()", 0); + Tcl_AppendResult(interp, ( + bV2 ? "error in fts5_api.xCreateTokenizer_v2()" + : "error in fts5_api.xCreateTokenizer()" + ), (char*)0); return TCL_ERROR; } @@ -1048,7 +1320,7 @@ static int SQLITE_TCLAPI f5tTokenHash( Tcl_Obj *CONST objv[] ){ char *z; - int n; + Tcl_Size n; unsigned int iVal; int nSlot; @@ -1061,7 +1333,7 @@ static int SQLITE_TCLAPI f5tTokenHash( } z = Tcl_GetStringFromObj(objv[2], &n); - iVal = f5t_fts5HashKey(nSlot, z, n); + iVal = f5t_fts5HashKey(nSlot, z, (int)n); Tcl_SetObjResult(interp, Tcl_NewIntObj(iVal)); return TCL_OK; } @@ -1117,6 +1389,313 @@ static int SQLITE_TCLAPI f5tRegisterTok( return TCL_OK; } +typedef struct OriginTextCtx OriginTextCtx; +struct OriginTextCtx { + sqlite3 *db; + fts5_api *pApi; +}; + +typedef struct OriginTextTokenizer OriginTextTokenizer; +struct OriginTextTokenizer { + Fts5Tokenizer *pTok; /* Underlying tokenizer object */ + fts5_tokenizer tokapi; /* API implementation for pTok */ +}; + +/* +** Delete the OriginTextCtx object indicated by the only argument. +*/ +static void f5tOrigintextTokenizerDelete(void *pCtx){ + OriginTextCtx *p = (OriginTextCtx*)pCtx; + ckfree((char*)p); +} + +static int f5tOrigintextCreate( + void *pCtx, + const char **azArg, + int nArg, + Fts5Tokenizer **ppOut +){ + OriginTextCtx *p = (OriginTextCtx*)pCtx; + OriginTextTokenizer *pTok = 0; + void *pTokCtx = 0; + int rc = SQLITE_OK; + + pTok = (OriginTextTokenizer*)sqlite3_malloc(sizeof(OriginTextTokenizer)); + if( pTok==0 ){ + rc = SQLITE_NOMEM; + }else if( nArg<1 ){ + rc = SQLITE_ERROR; + }else{ + /* Locate the underlying tokenizer */ + rc = p->pApi->xFindTokenizer(p->pApi, azArg[0], &pTokCtx, &pTok->tokapi); + } + + /* Create the new tokenizer instance */ + if( rc==SQLITE_OK ){ + rc = pTok->tokapi.xCreate(pTokCtx, &azArg[1], nArg-1, &pTok->pTok); + } + + if( rc!=SQLITE_OK ){ + sqlite3_free(pTok); + pTok = 0; + } + *ppOut = (Fts5Tokenizer*)pTok; + return rc; +} + +static void f5tOrigintextDelete(Fts5Tokenizer *pTokenizer){ + OriginTextTokenizer *p = (OriginTextTokenizer*)pTokenizer; + if( p->pTok ){ + p->tokapi.xDelete(p->pTok); + } + sqlite3_free(p); +} + +typedef struct OriginTextCb OriginTextCb; +struct OriginTextCb { + void *pCtx; + const char *pText; + int nText; + int (*xToken)(void *, int, const char *, int, int, int); + + char *aBuf; /* Buffer to use */ + int nBuf; /* Allocated size of aBuf[] */ +}; + +static int xOriginToken( + void *pCtx, /* Copy of 2nd argument to xTokenize() */ + int tflags, /* Mask of FTS5_TOKEN_* flags */ + const char *pToken, /* Pointer to buffer containing token */ + int nToken, /* Size of token in bytes */ + int iStart, /* Byte offset of token within input text */ + int iEnd /* Byte offset of end of token within input */ +){ + OriginTextCb *p = (OriginTextCb*)pCtx; + int ret = 0; + + if( nToken==(iEnd-iStart) && 0==memcmp(pToken, &p->pText[iStart], nToken) ){ + /* Token exactly matches document text. Pass it through as is. */ + ret = p->xToken(p->pCtx, tflags, pToken, nToken, iStart, iEnd); + }else{ + int nReq = nToken + 1 + (iEnd-iStart); + if( nReq>p->nBuf ){ + sqlite3_free(p->aBuf); + p->aBuf = sqlite3_malloc(nReq*2); + if( p->aBuf==0 ) return SQLITE_NOMEM; + p->nBuf = nReq*2; + } + + memcpy(p->aBuf, pToken, nToken); + p->aBuf[nToken] = '\0'; + memcpy(&p->aBuf[nToken+1], &p->pText[iStart], iEnd-iStart); + ret = p->xToken(p->pCtx, tflags, p->aBuf, nReq, iStart, iEnd); + } + + return ret; +} + + +static int f5tOrigintextTokenize( + Fts5Tokenizer *pTokenizer, + void *pCtx, + int flags, /* Mask of FTS5_TOKENIZE_* flags */ + const char *pText, int nText, + int (*xToken)(void *, int, const char *, int, int, int) +){ + OriginTextTokenizer *p = (OriginTextTokenizer*)pTokenizer; + OriginTextCb cb; + int ret; + + memset(&cb, 0, sizeof(cb)); + cb.pCtx = pCtx; + cb.pText = pText; + cb.nText = nText; + cb.xToken = xToken; + + ret = p->tokapi.xTokenize(p->pTok,(void*)&cb,flags,pText,nText,xOriginToken); + sqlite3_free(cb.aBuf); + return ret; +} + +/* +** sqlite3_fts5_register_origintext DB +** +** Description... +*/ +static int SQLITE_TCLAPI f5tRegisterOriginText( + void * clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + sqlite3 *db = 0; + fts5_api *pApi = 0; + int rc; + fts5_tokenizer tok = {0, 0, 0}; + OriginTextCtx *pCtx = 0; + + if( objc!=2 ){ + Tcl_WrongNumArgs(interp, 1, objv, "DB"); + return TCL_ERROR; + } + if( f5tDbAndApi(interp, objv[1], &db, &pApi) ) return TCL_ERROR; + + pCtx = (OriginTextCtx*)ckalloc(sizeof(OriginTextCtx)); + pCtx->db = db; + pCtx->pApi = pApi; + + tok.xCreate = f5tOrigintextCreate; + tok.xDelete = f5tOrigintextDelete; + tok.xTokenize = f5tOrigintextTokenize; + rc = pApi->xCreateTokenizer( + pApi, "origintext", (void*)pCtx, &tok, f5tOrigintextTokenizerDelete + ); + + Tcl_ResetResult(interp); + if( rc!=SQLITE_OK ){ + Tcl_AppendResult(interp, "error: ", sqlite3_errmsg(db), (void*)0); + return TCL_ERROR; + } + return TCL_OK; +} + +/* +** This function is used to DROP an fts5 table. It works even if the data +** structures fts5 stores within the database are corrupt, which sometimes +** prevents a straight "DROP TABLE" command from succeeding. +** +** The first parameter is the database handle to use for the DROP TABLE +** operation. The second is the name of the database to drop the fts5 table +** from (i.e. "main", "temp" or the name of an attached database). The +** third parameter is the name of the fts5 table to drop. +** +** SQLITE_OK is returned if the table is successfully dropped. Or, if an +** error occurs, an SQLite error code. +*/ +static int sqlite3_fts5_drop_corrupt_table( + sqlite3 *db, /* Database handle */ + const char *zDb, /* Database name ("main", "temp" etc.) */ + const char *zTab /* Name of fts5 table to drop */ +){ + int rc = SQLITE_OK; + int bDef = 0; + + rc = sqlite3_db_config(db, SQLITE_DBCONFIG_DEFENSIVE, -1, &bDef); + if( rc==SQLITE_OK ){ + char *zScript = sqlite3_mprintf( + "DELETE FROM %Q.'%q_data';" + "DELETE FROM %Q.'%q_config';" + "INSERT INTO %Q.'%q_data' VALUES(10, X'0000000000');" + "INSERT INTO %Q.'%q_config' VALUES('version', 4);" + "DROP TABLE %Q.'%q';", + zDb, zTab, zDb, zTab, zDb, zTab, zDb, zTab, zDb, zTab + ); + + if( zScript==0 ){ + rc = SQLITE_NOMEM; + }else{ + if( bDef ) sqlite3_db_config(db, SQLITE_DBCONFIG_DEFENSIVE, 0, 0); + rc = sqlite3_exec(db, zScript, 0, 0, 0); + if( bDef ) sqlite3_db_config(db, SQLITE_DBCONFIG_DEFENSIVE, 1, 0); + sqlite3_free(zScript); + } + } + + return rc; +} + +/* +** sqlite3_fts5_drop_corrupt_table DB DATABASE TABLE +** +** Description... +*/ +static int SQLITE_TCLAPI f5tDropCorruptTable( + void * clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + sqlite3 *db = 0; + const char *zDb = 0; + const char *zTab = 0; + int rc = SQLITE_OK; + + if( objc!=4 ){ + Tcl_WrongNumArgs(interp, 1, objv, "DB DATABASE TABLE"); + return TCL_ERROR; + } + if( f5tDbPointer(interp, objv[1], &db) ){ + return TCL_ERROR; + } + zDb = Tcl_GetString(objv[2]); + zTab = Tcl_GetString(objv[3]); + + rc = sqlite3_fts5_drop_corrupt_table(db, zDb, zTab); + if( rc!=SQLITE_OK ){ + Tcl_AppendResult(interp, "error: ", sqlite3_errmsg(db), (void*)0); + return TCL_ERROR; + } + + return TCL_OK; +} + +/* +** Free a buffer returned to SQLite by the str() function. +*/ +void f5tFree(void *p){ + char *x = (char *)p; + ckfree(&x[-8]); +} + +/* +** Implementation of str(). +*/ +void f5tStrFunc(sqlite3_context *pCtx, int nArg, sqlite3_value **apArg){ + const char *zText = 0; + assert( nArg==1 ); + + zText = (const char*)sqlite3_value_text(apArg[0]); + if( zText ){ + sqlite3_int64 nText = strlen(zText); + char *zCopy = (char*)ckalloc(nText+8); + if( zCopy==0 ){ + sqlite3_result_error_nomem(pCtx); + }else{ + zCopy += 8; + memcpy(zCopy, zText, nText); + sqlite3_result_text64(pCtx, zCopy, nText, f5tFree, SQLITE_UTF8); + } + } +} + +/* +** sqlite3_fts5_register_str DB +** +** Register the str() function with database handle DB. str() interprets +** its only argument as text and returns a copy of the value in a +** non-nul-terminated buffer. +*/ +static int SQLITE_TCLAPI f5tRegisterStr( + void * clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + sqlite3 *db = 0; + + if( objc!=2 ){ + Tcl_WrongNumArgs(interp, 1, objv, "DB"); + return TCL_ERROR; + } + if( f5tDbPointer(interp, objv[1], &db) ){ + return TCL_ERROR; + } + + sqlite3_create_function(db, "str", 1, SQLITE_UTF8, 0, f5tStrFunc, 0, 0); + + return TCL_OK; +} + /* ** Entry point. */ @@ -1128,12 +1707,16 @@ int Fts5tcl_Init(Tcl_Interp *interp){ } aCmd[] = { { "sqlite3_fts5_create_tokenizer", f5tCreateTokenizer, 1 }, { "sqlite3_fts5_token", f5tTokenizerReturn, 1 }, + { "sqlite3_fts5_locale", f5tTokenizerLocale, 1 }, { "sqlite3_fts5_tokenize", f5tTokenize, 0 }, { "sqlite3_fts5_create_function", f5tCreateFunction, 0 }, { "sqlite3_fts5_may_be_corrupt", f5tMayBeCorrupt, 0 }, { "sqlite3_fts5_token_hash", f5tTokenHash, 0 }, { "sqlite3_fts5_register_matchinfo", f5tRegisterMatchinfo, 0 }, - { "sqlite3_fts5_register_fts5tokenize", f5tRegisterTok, 0 } + { "sqlite3_fts5_register_fts5tokenize", f5tRegisterTok, 0 }, + { "sqlite3_fts5_register_origintext",f5tRegisterOriginText, 0 }, + { "sqlite3_fts5_drop_corrupt_table", f5tDropCorruptTable, 0 }, + { "sqlite3_fts5_register_str", f5tRegisterStr, 0 } }; int i; F5tTokenizerContext *pContext; diff --git a/ext/fts5/fts5_test_mi.c b/ext/fts5/fts5_test_mi.c index 6f2d6e7ea2..e8648a4d22 100644 --- a/ext/fts5/fts5_test_mi.c +++ b/ext/fts5/fts5_test_mi.c @@ -14,7 +14,7 @@ ** versions of FTS5. It contains the implementation of an FTS5 auxiliary ** function very similar to the FTS4 function matchinfo(): ** -** https://www.sqlite.org/fts3.html#matchinfo +** https://sqlite.org/fts3.html#matchinfo ** ** Known differences are that: ** @@ -393,17 +393,13 @@ static void fts5MatchinfoFunc( } } -int sqlite3Fts5TestRegisterMatchinfo(sqlite3 *db){ - int rc; /* Return code */ - fts5_api *pApi; /* FTS5 API functions */ - - /* Extract the FTS5 API pointer from the database handle. The - ** fts5_api_from_db() function above is copied verbatim from the - ** FTS5 documentation. Refer there for details. */ - rc = fts5_api_from_db(db, &pApi); - if( rc!=SQLITE_OK ) return rc; +/* +** Register "matchinfo" with global API object pApi. +*/ +int sqlite3Fts5TestRegisterMatchinfoAPI(fts5_api *pApi){ + int rc; - /* If fts5_api_from_db() returns NULL, then either FTS5 is not registered + /* If fts5_api_from_db() returned NULL, then either FTS5 is not registered ** with this database handle, or an error (OOM perhaps?) has occurred. ** ** Also check that the fts5_api object is version 2 or newer. @@ -418,4 +414,20 @@ int sqlite3Fts5TestRegisterMatchinfo(sqlite3 *db){ return rc; } +/* +** Register "matchinfo" with database handle db. +*/ +int sqlite3Fts5TestRegisterMatchinfo(sqlite3 *db){ + int rc; /* Return code */ + fts5_api *pApi; /* FTS5 API functions */ + + /* Extract the FTS5 API pointer from the database handle. The + ** fts5_api_from_db() function above is copied verbatim from the + ** FTS5 documentation. Refer there for details. */ + rc = fts5_api_from_db(db, &pApi); + if( rc!=SQLITE_OK ) return rc; + + return sqlite3Fts5TestRegisterMatchinfoAPI(pApi); +} + #endif /* SQLITE_ENABLE_FTS5 */ diff --git a/ext/fts5/fts5_test_tok.c b/ext/fts5/fts5_test_tok.c index a5d839da66..994d304dc6 100644 --- a/ext/fts5/fts5_test_tok.c +++ b/ext/fts5/fts5_test_tok.c @@ -472,7 +472,8 @@ int sqlite3Fts5TestRegisterTok(sqlite3 *db, fts5_api *pApi){ 0, /* xSavepoint */ 0, /* xRelease */ 0, /* xRollbackTo */ - 0 /* xShadowName */ + 0, /* xShadowName */ + 0 /* xIntegrity */ }; int rc; /* Return code */ diff --git a/ext/fts5/fts5_tokenize.c b/ext/fts5/fts5_tokenize.c index e61d6b1edd..b8a1136465 100644 --- a/ext/fts5/fts5_tokenize.c +++ b/ext/fts5/fts5_tokenize.c @@ -198,7 +198,7 @@ static const unsigned char sqlite3Utf8Trans1[] = { c = *(zIn++); \ if( c>=0xc0 ){ \ c = sqlite3Utf8Trans1[c-0xc0]; \ - while( zIn!=zTerm && (*zIn & 0xc0)==0x80 ){ \ + while( zIn=0xc0 ){ \ + while( (((unsigned char)*zIn) & 0xc0)==0x80 ){ zIn++; } \ + } \ +} + typedef struct Unicode61Tokenizer Unicode61Tokenizer; struct Unicode61Tokenizer { unsigned char aTokenChar[128]; /* ASCII range token characters */ @@ -380,7 +386,6 @@ static int fts5UnicodeCreate( zCat = azArg[i+1]; } } - if( rc==SQLITE_OK ){ rc = unicodeSetCategories(p, zCat); } @@ -410,7 +415,6 @@ static int fts5UnicodeCreate( rc = SQLITE_ERROR; } } - }else{ rc = SQLITE_NOMEM; } @@ -549,7 +553,7 @@ static int fts5UnicodeTokenize( typedef struct PorterTokenizer PorterTokenizer; struct PorterTokenizer { - fts5_tokenizer tokenizer; /* Parent tokenizer module */ + fts5_tokenizer_v2 tokenizer_v2; /* Parent tokenizer module */ Fts5Tokenizer *pTokenizer; /* Parent tokenizer instance */ char aBuf[FTS5_PORTER_MAX_TOKEN + 64]; }; @@ -561,7 +565,7 @@ static void fts5PorterDelete(Fts5Tokenizer *pTok){ if( pTok ){ PorterTokenizer *p = (PorterTokenizer*)pTok; if( p->pTokenizer ){ - p->tokenizer.xDelete(p->pTokenizer); + p->tokenizer_v2.xDelete(p->pTokenizer); } sqlite3_free(p); } @@ -580,6 +584,7 @@ static int fts5PorterCreate( PorterTokenizer *pRet; void *pUserdata = 0; const char *zBase = "unicode61"; + fts5_tokenizer_v2 *pV2 = 0; if( nArg>0 ){ zBase = azArg[0]; @@ -588,14 +593,15 @@ static int fts5PorterCreate( pRet = (PorterTokenizer*)sqlite3_malloc(sizeof(PorterTokenizer)); if( pRet ){ memset(pRet, 0, sizeof(PorterTokenizer)); - rc = pApi->xFindTokenizer(pApi, zBase, &pUserdata, &pRet->tokenizer); + rc = pApi->xFindTokenizer_v2(pApi, zBase, &pUserdata, &pV2); }else{ rc = SQLITE_NOMEM; } if( rc==SQLITE_OK ){ int nArg2 = (nArg>0 ? nArg-1 : 0); - const char **azArg2 = (nArg2 ? &azArg[1] : 0); - rc = pRet->tokenizer.xCreate(pUserdata, azArg2, nArg2, &pRet->pTokenizer); + const char **az2 = (nArg2 ? &azArg[1] : 0); + memcpy(&pRet->tokenizer_v2, pV2, sizeof(fts5_tokenizer_v2)); + rc = pRet->tokenizer_v2.xCreate(pUserdata, az2, nArg2, &pRet->pTokenizer); } if( rc!=SQLITE_OK ){ @@ -1246,6 +1252,7 @@ static int fts5PorterTokenize( void *pCtx, int flags, const char *pText, int nText, + const char *pLoc, int nLoc, int (*xToken)(void*, int, const char*, int nToken, int iStart, int iEnd) ){ PorterTokenizer *p = (PorterTokenizer*)pTokenizer; @@ -1253,8 +1260,8 @@ static int fts5PorterTokenize( sCtx.xToken = xToken; sCtx.pCtx = pCtx; sCtx.aBuf = p->aBuf; - return p->tokenizer.xTokenize( - p->pTokenizer, (void*)&sCtx, flags, pText, nText, fts5PorterCb + return p->tokenizer_v2.xTokenize( + p->pTokenizer, (void*)&sCtx, flags, pText, nText, pLoc, nLoc, fts5PorterCb ); } @@ -1264,6 +1271,7 @@ static int fts5PorterTokenize( typedef struct TrigramTokenizer TrigramTokenizer; struct TrigramTokenizer { int bFold; /* True to fold to lower-case */ + int iFoldParam; /* Parameter to pass to Fts5UnicodeFold() */ }; /* @@ -1283,28 +1291,46 @@ static int fts5TriCreate( Fts5Tokenizer **ppOut ){ int rc = SQLITE_OK; - TrigramTokenizer *pNew = (TrigramTokenizer*)sqlite3_malloc(sizeof(*pNew)); + TrigramTokenizer *pNew = 0; UNUSED_PARAM(pUnused); - if( pNew==0 ){ - rc = SQLITE_NOMEM; + if( nArg%2 ){ + rc = SQLITE_ERROR; }else{ int i; - pNew->bFold = 1; - for(i=0; rc==SQLITE_OK && ibFold = 1; + pNew->iFoldParam = 0; + + for(i=0; rc==SQLITE_OK && ibFold = (zArg[0]=='0'); + } + }else if( 0==sqlite3_stricmp(azArg[i], "remove_diacritics") ){ + if( (zArg[0]!='0' && zArg[0]!='1' && zArg[0]!='2') || zArg[1] ){ + rc = SQLITE_ERROR; + }else{ + pNew->iFoldParam = (zArg[0]!='0') ? 2 : 0; + } }else{ - pNew->bFold = (zArg[0]=='0'); + rc = SQLITE_ERROR; } - }else{ + } + + if( pNew->iFoldParam!=0 && pNew->bFold==0 ){ rc = SQLITE_ERROR; } - } - if( rc!=SQLITE_OK ){ - fts5TriDelete((Fts5Tokenizer*)pNew); - pNew = 0; + + if( rc!=SQLITE_OK ){ + fts5TriDelete((Fts5Tokenizer*)pNew); + pNew = 0; + } } } *ppOut = (Fts5Tokenizer*)pNew; @@ -1324,40 +1350,65 @@ static int fts5TriTokenize( TrigramTokenizer *p = (TrigramTokenizer*)pTok; int rc = SQLITE_OK; char aBuf[32]; + char *zOut = aBuf; + int ii; const unsigned char *zIn = (const unsigned char*)pText; - const unsigned char *zEof = &zIn[nText]; - u32 iCode; + const unsigned char *zEof = (zIn ? &zIn[nText] : 0); + u32 iCode = 0; + int aStart[3]; /* Input offset of each character in aBuf[] */ UNUSED_PARAM(unusedFlags); - while( 1 ){ - char *zOut = aBuf; - int iStart = zIn - (const unsigned char*)pText; - const unsigned char *zNext; - - READ_UTF8(zIn, zEof, iCode); - if( iCode==0 ) break; - zNext = zIn; - if( zInbFold ) iCode = sqlite3Fts5UnicodeFold(iCode, 0); - WRITE_UTF8(zOut, iCode); + + /* Populate aBuf[] with the characters for the first trigram. */ + for(ii=0; ii<3; ii++){ + do { + aStart[ii] = zIn - (const unsigned char*)pText; + if( zIn>=zEof ) return SQLITE_OK; READ_UTF8(zIn, zEof, iCode); - if( iCode==0 ) break; - }else{ - break; - } - if( zInbFold ) iCode = sqlite3Fts5UnicodeFold(iCode, 0); - WRITE_UTF8(zOut, iCode); + if( p->bFold ) iCode = sqlite3Fts5UnicodeFold(iCode, p->iFoldParam); + }while( iCode==0 ); + WRITE_UTF8(zOut, iCode); + } + + /* At the start of each iteration of this loop: + ** + ** aBuf: Contains 3 characters. The 3 characters of the next trigram. + ** zOut: Points to the byte following the last character in aBuf. + ** aStart[3]: Contains the byte offset in the input text corresponding + ** to the start of each of the three characters in the buffer. + */ + assert( zIn<=zEof ); + while( 1 ){ + int iNext; /* Start of character following current tri */ + const char *z1; + + /* Read characters from the input up until the first non-diacritic */ + do { + iNext = zIn - (const unsigned char*)pText; + if( zIn>=zEof ){ + iCode = 0; + break; + } READ_UTF8(zIn, zEof, iCode); - if( iCode==0 ) break; - if( p->bFold ) iCode = sqlite3Fts5UnicodeFold(iCode, 0); - WRITE_UTF8(zOut, iCode); - }else{ - break; - } - rc = xToken(pCtx, 0, aBuf, zOut-aBuf, iStart, iStart + zOut-aBuf); - if( rc!=SQLITE_OK ) break; - zIn = zNext; + if( p->bFold ) iCode = sqlite3Fts5UnicodeFold(iCode, p->iFoldParam); + }while( iCode==0 ); + + /* Pass the current trigram back to fts5 */ + rc = xToken(pCtx, 0, aBuf, zOut-aBuf, aStart[0], iNext); + if( iCode==0 || rc!=SQLITE_OK ) break; + + /* Remove the first character from buffer aBuf[]. Append the character + ** with codepoint iCode. */ + z1 = aBuf; + FTS5_SKIP_UTF8(z1); + memmove(aBuf, z1, zOut - z1); + zOut -= (z1 - aBuf); + WRITE_UTF8(zOut, iCode); + + /* Update the aStart[] array */ + aStart[0] = aStart[1]; + aStart[1] = aStart[2]; + aStart[2] = iNext; } return rc; @@ -1380,11 +1431,23 @@ int sqlite3Fts5TokenizerPattern( ){ if( xCreate==fts5TriCreate ){ TrigramTokenizer *p = (TrigramTokenizer*)pTok; - return p->bFold ? FTS5_PATTERN_LIKE : FTS5_PATTERN_GLOB; + if( p->iFoldParam==0 ){ + return p->bFold ? FTS5_PATTERN_LIKE : FTS5_PATTERN_GLOB; + } } return FTS5_PATTERN_NONE; } +/* +** Return true if the tokenizer described by p->azArg[] is the trigram +** tokenizer. This tokenizer needs to be loaded before xBestIndex is +** called for the first time in order to correctly handle LIKE/GLOB. +*/ +int sqlite3Fts5TokenizerPreload(Fts5TokenizerConfig *p){ + return (p->nArg>=1 && 0==sqlite3_stricmp(p->azArg[0], "trigram")); +} + + /* ** Register all built-in tokenizers with FTS5. */ @@ -1395,7 +1458,6 @@ int sqlite3Fts5TokenizerInit(fts5_api *pApi){ } aBuiltin[] = { { "unicode61", {fts5UnicodeCreate, fts5UnicodeDelete, fts5UnicodeTokenize}}, { "ascii", {fts5AsciiCreate, fts5AsciiDelete, fts5AsciiTokenize }}, - { "porter", {fts5PorterCreate, fts5PorterDelete, fts5PorterTokenize }}, { "trigram", {fts5TriCreate, fts5TriDelete, fts5TriTokenize}}, }; @@ -1410,6 +1472,19 @@ int sqlite3Fts5TokenizerInit(fts5_api *pApi){ 0 ); } - + if( rc==SQLITE_OK ){ + fts5_tokenizer_v2 sPorter = { + 2, + fts5PorterCreate, + fts5PorterDelete, + fts5PorterTokenize + }; + rc = pApi->xCreateTokenizer_v2(pApi, + "porter", + (void*)pApi, + &sPorter, + 0 + ); + } return rc; } diff --git a/ext/fts5/fts5_unicode2.c b/ext/fts5/fts5_unicode2.c index 3e97264fa8..2133d5d5b8 100644 --- a/ext/fts5/fts5_unicode2.c +++ b/ext/fts5/fts5_unicode2.c @@ -364,6 +364,9 @@ int sqlite3Fts5UnicodeCatParse(const char *zCat, u8 *aArray){ default: return 1; } break; + + default: + return 1; } return 0; } @@ -775,4 +778,3 @@ void sqlite3Fts5UnicodeAscii(u8 *aArray, u8 *aAscii){ } aAscii[0] = 0; /* 0x00 is never a token character */ } - diff --git a/ext/fts5/fts5_vocab.c b/ext/fts5/fts5_vocab.c index 18774c4e4a..3a6a968f7c 100644 --- a/ext/fts5/fts5_vocab.c +++ b/ext/fts5/fts5_vocab.c @@ -64,6 +64,7 @@ struct Fts5VocabCursor { int nLeTerm; /* Size of zLeTerm in bytes */ char *zLeTerm; /* (term <= $zLeTerm) paramater, or NULL */ + int colUsed; /* Copy of sqlite3_index_info.colUsed */ /* These are used by 'col' tables only */ int iCol; @@ -90,9 +91,11 @@ struct Fts5VocabCursor { /* ** Bits for the mask used as the idxNum value by xBestIndex/xFilter. */ -#define FTS5_VOCAB_TERM_EQ 0x01 -#define FTS5_VOCAB_TERM_GE 0x02 -#define FTS5_VOCAB_TERM_LE 0x04 +#define FTS5_VOCAB_TERM_EQ 0x0100 +#define FTS5_VOCAB_TERM_GE 0x0200 +#define FTS5_VOCAB_TERM_LE 0x0400 + +#define FTS5_VOCAB_COLUSED_MASK 0xFF /* @@ -190,12 +193,12 @@ static int fts5VocabInitVtab( *pzErr = sqlite3_mprintf("wrong number of vtable arguments"); rc = SQLITE_ERROR; }else{ - int nByte; /* Bytes of space to allocate */ + i64 nByte; /* Bytes of space to allocate */ const char *zDb = bDb ? argv[3] : argv[1]; const char *zTab = bDb ? argv[4] : argv[3]; const char *zType = bDb ? argv[5] : argv[4]; - int nDb = (int)strlen(zDb)+1; - int nTab = (int)strlen(zTab)+1; + i64 nDb = strlen(zDb)+1; + i64 nTab = strlen(zTab)+1; int eType = 0; rc = fts5VocabTableType(zType, pzErr, &eType); @@ -269,11 +272,13 @@ static int fts5VocabBestIndexMethod( int iTermEq = -1; int iTermGe = -1; int iTermLe = -1; - int idxNum = 0; + int idxNum = (int)pInfo->colUsed; int nArg = 0; UNUSED_PARAM(pUnused); + assert( (pInfo->colUsed & FTS5_VOCAB_COLUSED_MASK)==pInfo->colUsed ); + for(i=0; inConstraint; i++){ struct sqlite3_index_constraint *p = &pInfo->aConstraint[i]; if( p->usable==0 ) continue; @@ -365,7 +370,7 @@ static int fts5VocabOpenMethod( if( rc==SQLITE_OK ){ pVTab->zErrMsg = sqlite3_mprintf( "no such fts5 table: %s.%s", pTab->zFts5Db, pTab->zFts5Tbl - ); + ); rc = SQLITE_ERROR; } }else{ @@ -391,7 +396,12 @@ static int fts5VocabOpenMethod( return rc; } +/* +** Restore cursor pCsr to the state it was in immediately after being +** created by the xOpen() method. +*/ static void fts5VocabResetCursor(Fts5VocabCursor *pCsr){ + int nCol = pCsr->pFts5->pConfig->nCol; pCsr->rowid = 0; sqlite3Fts5IterClose(pCsr->pIter); sqlite3Fts5StructureRelease(pCsr->pStruct); @@ -401,6 +411,12 @@ static void fts5VocabResetCursor(Fts5VocabCursor *pCsr){ pCsr->nLeTerm = -1; pCsr->zLeTerm = 0; pCsr->bEof = 0; + pCsr->iCol = 0; + pCsr->iInstPos = 0; + pCsr->iInstOff = 0; + pCsr->colUsed = 0; + memset(pCsr->aCnt, 0, sizeof(i64)*nCol); + memset(pCsr->aDoc, 0, sizeof(i64)*nCol); } /* @@ -525,9 +541,19 @@ static int fts5VocabNextMethod(sqlite3_vtab_cursor *pCursor){ switch( pTab->eType ){ case FTS5_VOCAB_ROW: - if( eDetail==FTS5_DETAIL_FULL ){ - while( 0==sqlite3Fts5PoslistNext64(pPos, nPos, &iOff, &iPos) ){ - pCsr->aCnt[0]++; + /* Do not bother counting the number of instances if the "cnt" + ** column is not being read (according to colUsed). */ + if( eDetail==FTS5_DETAIL_FULL && (pCsr->colUsed & 0x04) ){ + while( iPosaCnt[] */ + pCsr->aCnt[0]++; + } } } pCsr->aDoc[0]++; @@ -625,11 +651,12 @@ static int fts5VocabFilterMethod( if( idxNum & FTS5_VOCAB_TERM_EQ ) pEq = apVal[iVal++]; if( idxNum & FTS5_VOCAB_TERM_GE ) pGe = apVal[iVal++]; if( idxNum & FTS5_VOCAB_TERM_LE ) pLe = apVal[iVal++]; + pCsr->colUsed = (idxNum & FTS5_VOCAB_COLUSED_MASK); if( pEq ){ zTerm = (const char *)sqlite3_value_text(pEq); nTerm = sqlite3_value_bytes(pEq); - f = 0; + f = FTS5INDEX_QUERY_NOTOKENDATA; }else{ if( pGe ){ zTerm = (const char *)sqlite3_value_text(pGe); @@ -783,7 +810,8 @@ int sqlite3Fts5VocabInit(Fts5Global *pGlobal, sqlite3 *db){ /* xSavepoint */ 0, /* xRelease */ 0, /* xRollbackTo */ 0, - /* xShadowName */ 0 + /* xShadowName */ 0, + /* xIntegrity */ 0 }; void *p = (void*)pGlobal; diff --git a/ext/fts5/test/fts5_common.tcl b/ext/fts5/test/fts5_common.tcl index 0f371dcfd9..8ea87dbdd1 100644 --- a/ext/fts5/test/fts5_common.tcl +++ b/ext/fts5/test/fts5_common.tcl @@ -51,6 +51,10 @@ proc fts5_test_poslist2 {cmd} { sort_poslist $res } +proc fts5_test_insttoken {cmd iInst iToken} { + $cmd xInstToken $iInst $iToken +} + proc fts5_test_collist {cmd} { set res [list] @@ -61,6 +65,12 @@ proc fts5_test_collist {cmd} { set res } +proc fts5_collist {cmd iPhrase} { + set res [list] + $cmd xPhraseColumnForeach $iPhrase c { lappend res $c } + set res +} + proc fts5_test_columnsize {cmd} { set res [list] for {set i 0} {$i < [$cmd xColumnCount]} {incr i} { @@ -69,6 +79,13 @@ proc fts5_test_columnsize {cmd} { set res } +proc fts5_columntext {cmd iCol} { + $cmd xColumnText $iCol +} +proc fts5_columnlocale {cmd iCol} { + $cmd xColumnLocale $iCol +} + proc fts5_test_columntext {cmd} { set res [list] for {set i 0} {$i < [$cmd xColumnCount]} {incr i} { @@ -77,6 +94,14 @@ proc fts5_test_columntext {cmd} { set res } +proc fts5_test_columnlocale {cmd} { + set res [list] + for {set i 0} {$i < [$cmd xColumnCount]} {incr i} { + lappend res [$cmd xColumnLocale $i] + } + set res +} + proc fts5_test_columntotalsize {cmd} { set res [list] for {set i 0} {$i < [$cmd xColumnCount]} {incr i} { @@ -104,6 +129,10 @@ proc fts5_test_rowcount {cmd} { $cmd xRowCount } +proc fts5_test_rowid {cmd} { + $cmd xRowid +} + proc test_queryphrase_cb {cnt cmd} { upvar $cnt L for {set i 0} {$i < [$cmd xInstCount]} {incr i} { @@ -125,6 +154,13 @@ proc fts5_test_queryphrase {cmd} { set res } +proc fts5_queryphrase {cmd iPhrase} { + set cnt [list] + for {set j 0} {$j < [$cmd xColumnCount]} {incr j} { lappend cnt 0 } + $cmd xQueryPhrase $iPhrase [list test_queryphrase_cb cnt] + set cnt +} + proc fts5_test_phrasecount {cmd} { $cmd xPhraseCount } @@ -144,16 +180,23 @@ proc fts5_aux_test_functions {db} { foreach f { fts5_test_columnsize fts5_test_columntext + fts5_test_columnlocale fts5_test_columntotalsize fts5_test_poslist fts5_test_poslist2 fts5_test_collist + fts5_test_insttoken fts5_test_tokenize fts5_test_rowcount + fts5_test_rowid fts5_test_all fts5_test_queryphrase fts5_test_phrasecount + fts5_columntext + fts5_columnlocale + fts5_queryphrase + fts5_collist } { sqlite3_fts5_create_function $db $f $f } @@ -438,6 +481,20 @@ proc detail_is_none {} { detail_check ; expr {$::detail == "none"} } proc detail_is_col {} { detail_check ; expr {$::detail == "col" } } proc detail_is_full {} { detail_check ; expr {$::detail == "full"} } +proc foreach_tokenizer_mode {prefix script} { + set saved $::testprefix + foreach {d mapping} { + "" {} + "-origintext" {, tokenize="origintext unicode61", tokendata=1} + } { + set s [string map [list %TOKENIZER% $mapping] $script] + set ::testprefix "$prefix$d" + reset_db + sqlite3_fts5_register_origintext db + uplevel $s + } + set ::testprefix $saved +} #------------------------------------------------------------------------- # Convert a poslist of the type returned by fts5_test_poslist() to a @@ -594,6 +651,10 @@ proc nearset_rc {aCol args} { list } +proc dump {tname} { + execsql_pp "SELECT * FROM ${tname}_idx" + execsql_pp "SELECT id, quote(block), fts5_decode(id,block) FROM ${tname}_data" +} #------------------------------------------------------------------------- # Code for a simple Tcl tokenizer that supports synonyms at query time. diff --git a/ext/fts5/test/fts5aa.test b/ext/fts5/test/fts5aa.test index 59ce4f6a1f..184cb77b84 100644 --- a/ext/fts5/test/fts5aa.test +++ b/ext/fts5/test/fts5aa.test @@ -22,6 +22,7 @@ ifcapable !fts5 { } foreach_detail_mode $::testprefix { +foreach_tokenizer_mode $::testprefix { do_execsql_test 1.0 { CREATE VIRTUAL TABLE t1 USING fts5(a, b, c); @@ -44,7 +45,7 @@ do_execsql_test 1.1 { # do_execsql_test 2.0 { - CREATE VIRTUAL TABLE t1 USING fts5(x, y, detail=%DETAIL%); + CREATE VIRTUAL TABLE t1 USING fts5(x, y, detail=%DETAIL% %TOKENIZER%); } do_execsql_test 2.1 { INSERT INTO t1 VALUES('a b c', 'd e f'); @@ -65,14 +66,17 @@ foreach w {a b c d e f} { do_execsql_test 2.4 { INSERT INTO t1(t1) VALUES('integrity-check'); -} + PRAGMA integrity_check; + PRAGMA integrity_check(t1); +} {ok ok} #------------------------------------------------------------------------- # reset_db +sqlite3_fts5_register_origintext db do_execsql_test 3.0 { - CREATE VIRTUAL TABLE t1 USING fts5(x,y, detail=%DETAIL%); + CREATE VIRTUAL TABLE t1 USING fts5(x,y, detail=%DETAIL% %TOKENIZER%); } foreach {i x y} { 1 {g f d b f} {h h e i a} @@ -88,14 +92,16 @@ foreach {i x y} { } { do_execsql_test 3.$i.1 { INSERT INTO t1 VALUES($x, $y) } do_execsql_test 3.$i.2 { INSERT INTO t1(t1) VALUES('integrity-check') } + do_execsql_test 3.$i.3 { PRAGMA integrity_check(t1) } ok if {[set_test_counter errors]} break } #------------------------------------------------------------------------- # reset_db +sqlite3_fts5_register_origintext db do_execsql_test 4.0 { - CREATE VIRTUAL TABLE t1 USING fts5(x,y, detail=%DETAIL%); + CREATE VIRTUAL TABLE t1 USING fts5(x,y, detail=%DETAIL% %TOKENIZER%); INSERT INTO t1(t1, rank) VALUES('pgsz', 32); } foreach {i x y} { @@ -118,8 +124,9 @@ foreach {i x y} { #------------------------------------------------------------------------- # reset_db +sqlite3_fts5_register_origintext db do_execsql_test 5.0 { - CREATE VIRTUAL TABLE t1 USING fts5(x,y, detail=%DETAIL%); + CREATE VIRTUAL TABLE t1 USING fts5(x,y, detail=%DETAIL% %TOKENIZER%); INSERT INTO t1(t1, rank) VALUES('pgsz', 32); } foreach {i x y} { @@ -135,15 +142,16 @@ foreach {i x y} { 10 {ddd abcde dddd dd c} {dddd c c d abcde} } { do_execsql_test 5.$i.1 { INSERT INTO t1 VALUES($x, $y) } - do_execsql_test 5.$i.2 { INSERT INTO t1(t1) VALUES('integrity-check') } + do_execsql_test 5.$i.2 { PRAGMA integrity_check(t1) } ok if {[set_test_counter errors]} break } #------------------------------------------------------------------------- # reset_db +sqlite3_fts5_register_origintext db do_execsql_test 6.0 { - CREATE VIRTUAL TABLE t1 USING fts5(x,y, detail=%DETAIL%); + CREATE VIRTUAL TABLE t1 USING fts5(x,y, detail=%DETAIL% %TOKENIZER%); INSERT INTO t1(t1, rank) VALUES('pgsz', 32); } @@ -178,6 +186,7 @@ do_execsql_test 6.6 { #------------------------------------------------------------------------- # reset_db +sqlite3_fts5_register_origintext db expr srand(0) do_execsql_test 7.0 { CREATE VIRTUAL TABLE t1 USING fts5(x,y,z); @@ -219,6 +228,7 @@ for {set i 1} {$i <= 10} {incr i} { #------------------------------------------------------------------------- # reset_db +sqlite3_fts5_register_origintext db do_execsql_test 8.0 { CREATE VIRTUAL TABLE t1 USING fts5(x, prefix="1,2,3"); INSERT INTO t1(t1, rank) VALUES('pgsz', 32); @@ -233,6 +243,7 @@ do_execsql_test 8.1 { #------------------------------------------------------------------------- # reset_db +sqlite3_fts5_register_origintext db expr srand(0) @@ -277,8 +288,9 @@ for {set i 1} {$i <= 10} {incr i} { #------------------------------------------------------------------------- # reset_db +sqlite3_fts5_register_origintext db do_execsql_test 10.0 { - CREATE VIRTUAL TABLE t1 USING fts5(x,y, detail=%DETAIL%); + CREATE VIRTUAL TABLE t1 USING fts5(x,y, detail=%DETAIL% %TOKENIZER%); } set d10 { 1 {g f d b f} {h h e i a} @@ -311,19 +323,19 @@ do_execsql_test 10.4.2 { INSERT INTO t1(t1) VALUES('integrity-check') } #------------------------------------------------------------------------- # do_catchsql_test 11.1 { - CREATE VIRTUAL TABLE t2 USING fts5(a, b, c, rank, detail=%DETAIL%); + CREATE VIRTUAL TABLE t2 USING fts5(a, b, c, rank, detail=%DETAIL% %TOKENIZER%); } {1 {reserved fts5 column name: rank}} do_catchsql_test 11.2 { - CREATE VIRTUAL TABLE rank USING fts5(a, b, c, detail=%DETAIL%); + CREATE VIRTUAL TABLE rank USING fts5(a, b, c, detail=%DETAIL% %TOKENIZER%); } {1 {reserved fts5 table name: rank}} do_catchsql_test 11.3 { - CREATE VIRTUAL TABLE t2 USING fts5(a, b, c, rowid, detail=%DETAIL%); + CREATE VIRTUAL TABLE t2 USING fts5(a, b, c, rowid, detail=%DETAIL% %TOKENIZER%); } {1 {reserved fts5 column name: rowid}} #------------------------------------------------------------------------- # do_execsql_test 12.1 { - CREATE VIRTUAL TABLE t2 USING fts5(x,y, detail=%DETAIL%); + CREATE VIRTUAL TABLE t2 USING fts5(x,y, detail=%DETAIL% %TOKENIZER%); } {} do_catchsql_test 12.2 { @@ -338,8 +350,9 @@ do_test 12.3 { #------------------------------------------------------------------------- # reset_db +sqlite3_fts5_register_origintext db do_execsql_test 13.1 { - CREATE VIRTUAL TABLE t1 USING fts5(x, detail=%DETAIL%); + CREATE VIRTUAL TABLE t1 USING fts5(x, detail=%DETAIL% %TOKENIZER%); INSERT INTO t1(rowid, x) VALUES(1, 'o n e'), (2, 't w o'); } {} @@ -362,8 +375,9 @@ do_execsql_test 13.6 { #------------------------------------------------------------------------- # reset_db +sqlite3_fts5_register_origintext db do_execsql_test 14.1 { - CREATE VIRTUAL TABLE t1 USING fts5(x, y, detail=%DETAIL%); + CREATE VIRTUAL TABLE t1 USING fts5(x, y, detail=%DETAIL% %TOKENIZER%); INSERT INTO t1(t1, rank) VALUES('pgsz', 32); WITH d(x,y) AS ( SELECT NULL, 'xyz xyz xyz xyz xyz xyz' @@ -414,7 +428,7 @@ do_execsql_test 15.1 { } do_catchsql_test 15.2 { INSERT INTO t1(t1) VALUES('integrity-check'); -} {1 {database disk image is malformed}} +} {1 {fts5: checksum mismatch for table "t1"}} #------------------------------------------------------------------------- # @@ -426,7 +440,7 @@ do_execsql_test 16.1 { proc funk {} { db eval { UPDATE n1_config SET v=50 WHERE k='version' } set fd [db incrblob main n1_data block 10] - fconfigure $fd -encoding binary -translation binary + fconfigure $fd -translation binary # puts -nonewline $fd "\x44\x45" close $fd } @@ -439,15 +453,16 @@ db func funk funk # statement no longer fails. # do_catchsql_test 16.2 { - SELECT funk(), bm25(n1), funk() FROM n1 WHERE n1 MATCH 'a+b+c+d' + SELECT funk(), format('%g',bm25(n1)), funk() FROM n1 WHERE n1 MATCH 'a+b+c+d' } {0 {{} -1e-06 {}}} # {1 {SQL logic error}} #------------------------------------------------------------------------- # reset_db +sqlite3_fts5_register_origintext db do_execsql_test 17.1 { - CREATE VIRTUAL TABLE b2 USING fts5(x, detail=%DETAIL%); + CREATE VIRTUAL TABLE b2 USING fts5(x, detail=%DETAIL% %TOKENIZER%); INSERT INTO b2 VALUES('a'); INSERT INTO b2 VALUES('b'); INSERT INTO b2 VALUES('c'); @@ -463,8 +478,9 @@ do_test 17.2 { if {[string match n* %DETAIL%]==0} { reset_db + sqlite3_fts5_register_origintext db do_execsql_test 17.3 { - CREATE VIRTUAL TABLE c2 USING fts5(x, y, detail=%DETAIL%); + CREATE VIRTUAL TABLE c2 USING fts5(x, y, detail=%DETAIL% %TOKENIZER%); INSERT INTO c2 VALUES('x x x', 'x x x'); SELECT rowid FROM c2 WHERE c2 MATCH 'y:x'; } {1} @@ -473,8 +489,9 @@ if {[string match n* %DETAIL%]==0} { #------------------------------------------------------------------------- # reset_db +sqlite3_fts5_register_origintext db do_execsql_test 17.1 { - CREATE VIRTUAL TABLE uio USING fts5(ttt, detail=%DETAIL%); + CREATE VIRTUAL TABLE uio USING fts5(ttt, detail=%DETAIL% %TOKENIZER%); INSERT INTO uio VALUES(NULL); INSERT INTO uio SELECT NULL FROM uio; INSERT INTO uio SELECT NULL FROM uio; @@ -521,8 +538,8 @@ do_execsql_test 17.9 { #-------------------------------------------------------------------- # do_execsql_test 18.1 { - CREATE VIRTUAL TABLE t1 USING fts5(a, b, detail=%DETAIL%); - CREATE VIRTUAL TABLE t2 USING fts5(c, d, detail=%DETAIL%); + CREATE VIRTUAL TABLE t1 USING fts5(a, b, detail=%DETAIL% %TOKENIZER%); + CREATE VIRTUAL TABLE t2 USING fts5(c, d, detail=%DETAIL% %TOKENIZER%); INSERT INTO t1 VALUES('abc*', NULL); INSERT INTO t2 VALUES(1, 'abcdefg'); } @@ -537,8 +554,9 @@ do_execsql_test 18.3 { # fts5 table in the temp schema. # reset_db +sqlite3_fts5_register_origintext db do_execsql_test 19.0 { - CREATE VIRTUAL TABLE temp.t1 USING fts5(x, detail=%DETAIL%); + CREATE VIRTUAL TABLE temp.t1 USING fts5(x, detail=%DETAIL% %TOKENIZER%); INSERT INTO t1 VALUES('x y z'); INSERT INTO t1 VALUES('w x 1'); SELECT rowid FROM t1 WHERE t1 MATCH 'x'; @@ -548,8 +566,9 @@ do_execsql_test 19.0 { # Test that 6 and 7 byte varints can be read. # reset_db +sqlite3_fts5_register_origintext db do_execsql_test 20.0 { - CREATE VIRTUAL TABLE temp.tmp USING fts5(x, detail=%DETAIL%); + CREATE VIRTUAL TABLE temp.tmp USING fts5(x, detail=%DETAIL% %TOKENIZER%); } set ::ids [list \ 0 [expr 1<<36] [expr 2<<36] [expr 1<<43] [expr 2<<43] @@ -567,7 +586,7 @@ do_test 20.1 { # do_execsql_test 21.0 { CREATE TEMP TABLE t8(a, b); - CREATE VIRTUAL TABLE ft USING fts5(x, detail=%DETAIL%); + CREATE VIRTUAL TABLE ft USING fts5(x, detail=%DETAIL% %TOKENIZER%); } do_execsql_test 21.1 { @@ -578,7 +597,7 @@ do_execsql_test 21.1 { } do_execsql_test 22.0 { - CREATE VIRTUAL TABLE t9 USING fts5(x, detail=%DETAIL%); + CREATE VIRTUAL TABLE t9 USING fts5(x, detail=%DETAIL% %TOKENIZER%); INSERT INTO t9(rowid, x) VALUES(2, 'bbb'); BEGIN; INSERT INTO t9(rowid, x) VALUES(1, 'aaa'); @@ -593,7 +612,7 @@ do_execsql_test 22.1 { #------------------------------------------------------------------------- do_execsql_test 23.0 { - CREATE VIRTUAL TABLE t10 USING fts5(x, detail=%DETAIL%); + CREATE VIRTUAL TABLE t10 USING fts5(x, detail=%DETAIL% %TOKENIZER%); CREATE TABLE t11(x); } do_execsql_test 23.1 { @@ -605,7 +624,7 @@ do_execsql_test 23.2 { #------------------------------------------------------------------------- do_execsql_test 24.0 { - CREATE VIRTUAL TABLE t12 USING fts5(x, detail=%DETAIL%); + CREATE VIRTUAL TABLE t12 USING fts5(x, detail=%DETAIL% %TOKENIZER%); INSERT INTO t12 VALUES('aaaa'); } do_execsql_test 24.1 { @@ -615,6 +634,9 @@ do_execsql_test 24.1 { INSERT INTO t12 VALUES('aaaa'); END; } +execsql_pp { + SELECT rowid, hex(block) FROM t12_data +} do_execsql_test 24.2 { INSERT INTO t12(t12) VALUES('integrity-check'); } @@ -624,7 +646,7 @@ do_execsql_test 24.3 { #------------------------------------------------------------------------- do_execsql_test 25.0 { - CREATE VIRTUAL TABLE t13 USING fts5(x, detail=%DETAIL%); + CREATE VIRTUAL TABLE t13 USING fts5(x, detail=%DETAIL% %TOKENIZER%); } do_execsql_test 25.1 { BEGIN; @@ -635,6 +657,7 @@ SELECT * FROM t13('BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB } +} } expand_all_sql db diff --git a/ext/fts5/test/fts5ab.test b/ext/fts5/test/fts5ab.test index 3979dd44be..a74c0f8884 100644 --- a/ext/fts5/test/fts5ab.test +++ b/ext/fts5/test/fts5ab.test @@ -16,7 +16,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5ab -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return @@ -180,7 +180,11 @@ if {[detail_is_full]} { } {1 2} } -do_execsql_test 4.5 { +do_execsql_test 4.5.1 { + SELECT rowid FROM s1 WHERE s1 MATCH 'a AND x' +} {1 2} + +do_execsql_test 4.5.2 { SELECT rowid FROM s1 WHERE s1 MATCH 'a x' } {1 2} @@ -290,6 +294,39 @@ do_execsql_test 7.0 { INSERT INTO x1 VALUES($doc); } +#------------------------------------------------------------------------- +# Forum post: https://sqlite.org/forum/forumpost/ea4d8c9acb +# +reset_db +do_execsql_test 8.0 { + PRAGMA encoding = 'UTF-16le'; + CREATE VIRTUAL TABLE vt0 USING fts5(c0); +} +set v [db one {SELECT x'2a12'}] +do_execsql_test 8.1 { + INSERT INTO vt0 VALUES ($v); +} +do_execsql_test 8.2 { + SELECT quote(c0) FROM vt0 +} {X'2A12'} +do_execsql_test 8.3 { + INSERT INTO vt0(vt0) VALUES('integrity-check'); +} {} +reset_db +do_execsql_test 8.4 { + PRAGMA encoding = 'UTF-16le'; + CREATE VIRTUAL TABLE vt0 USING fts5(c0); +} +do_execsql_test 8.5 { + INSERT INTO vt0 VALUES (x'2a12'); +} +do_execsql_test 8.6 { + SELECT quote(c0) FROM vt0 +} {X'2A12'} +do_execsql_test 8.7 { + INSERT INTO vt0(vt0) VALUES('integrity-check'); +} {} + } ;# foreach_detail_mode... diff --git a/ext/fts5/test/fts5ac.test b/ext/fts5/test/fts5ac.test index f3a914653f..4628e909c1 100644 --- a/ext/fts5/test/fts5ac.test +++ b/ext/fts5/test/fts5ac.test @@ -16,7 +16,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5ac -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return diff --git a/ext/fts5/test/fts5ad.test b/ext/fts5/test/fts5ad.test index 524da6deae..27806a4c0c 100644 --- a/ext/fts5/test/fts5ad.test +++ b/ext/fts5/test/fts5ad.test @@ -18,7 +18,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5ad -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return diff --git a/ext/fts5/test/fts5ae.test b/ext/fts5/test/fts5ae.test index d9f132ca97..205a59a69f 100644 --- a/ext/fts5/test/fts5ae.test +++ b/ext/fts5/test/fts5ae.test @@ -16,7 +16,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5ae -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return diff --git a/ext/fts5/test/fts5af.test b/ext/fts5/test/fts5af.test index a3ff330ef3..9c95ef2daa 100644 --- a/ext/fts5/test/fts5af.test +++ b/ext/fts5/test/fts5af.test @@ -18,7 +18,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5af -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return @@ -193,4 +193,34 @@ do_execsql_test 5.6 { } ;# foreach_detail_mode +reset_db +do_execsql_test 6.0 { + CREATE VIRTUAL TABLE t1 USING fts5(colA, colB); + INSERT INTO t1 VALUES('A B C', 'D E F'); +} + +do_execsql_test 6.1 { + SELECT colA, colB, snippet(t1,0,'[', ']','...',1) FROM t1 WHERE t1 MATCH 'B'; +} {{A B C} {D E F} ...[B]...} +breakpoint +do_execsql_test 6.2 { + SELECT colA, colB, snippet(t1, 1,'[',']','...',2) FROM t1 WHERE t1 MATCH 'B'; +} {{A B C} {D E F} {D E...}} +do_execsql_test 6.3 { + SELECT colA, colB, snippet(t1, 1,'[',']','...',1) FROM t1 WHERE t1 MATCH 'B'; +} {{A B C} {D E F} {D...}} + +do_execsql_test 6.1 { + SELECT colA, colB, snippet(t1,0,'[', ']','...',1) FROM t1 WHERE t1 MATCH 'A'; +} {{A B C} {D E F} [A]...} +breakpoint +do_execsql_test 6.2 { + SELECT colA, colB, snippet(t1, 1,'[',']','...',2) FROM t1 WHERE t1 MATCH 'A'; +} {{A B C} {D E F} {D E...}} +do_execsql_test 6.3 { + SELECT colA, colB, snippet(t1, 1,'[',']','...',1) FROM t1 WHERE t1 MATCH 'A'; +} {{A B C} {D E F} {D...}} + + + finish_test diff --git a/ext/fts5/test/fts5ag.test b/ext/fts5/test/fts5ag.test index 9ead957c9d..42cd913784 100644 --- a/ext/fts5/test/fts5ag.test +++ b/ext/fts5/test/fts5ag.test @@ -15,7 +15,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5ag -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return diff --git a/ext/fts5/test/fts5ah.test b/ext/fts5/test/fts5ah.test index 24613f5c41..bf9c9e9dbc 100644 --- a/ext/fts5/test/fts5ah.test +++ b/ext/fts5/test/fts5ah.test @@ -11,11 +11,12 @@ # This file implements regression tests for SQLite library. The # focus of this script is testing the FTS5 module. # +# TESTRUNNER: slow source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5ah -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return @@ -162,6 +163,17 @@ do_execsql_test 1.8.2 { SELECT count(*) FROM t1 WHERE t1 MATCH 'x' AND rowid < 'text'; } {10000} +do_execsql_test 1.8.3 { + SELECT count(*) FROM t1 WHERE t1 MATCH 'x' AND rowid<5000 AND rowid < 'text'; +} {4999} +do_execsql_test 1.8.4 { + SELECT count(*) FROM t1 WHERE t1 MATCH 'x' AND rowid>5000 AND rowid > 'text'; +} {0} + +do_catchsql_test 1.9 { + SELECT * FROM t1('*xy'); +} {1 {unknown special query: xy}} + } ;# foreach_detail_mode #db eval {SELECT rowid, fts5_decode(rowid, block) aS r FROM t1_data} {puts $r} diff --git a/ext/fts5/test/fts5ai.test b/ext/fts5/test/fts5ai.test index 20e1069398..a6576d3afc 100644 --- a/ext/fts5/test/fts5ai.test +++ b/ext/fts5/test/fts5ai.test @@ -17,7 +17,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5ai -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return diff --git a/ext/fts5/test/fts5aj.test b/ext/fts5/test/fts5aj.test index 50dae20162..e802306b38 100644 --- a/ext/fts5/test/fts5aj.test +++ b/ext/fts5/test/fts5aj.test @@ -19,7 +19,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5aj -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return diff --git a/ext/fts5/test/fts5ak.test b/ext/fts5/test/fts5ak.test index 0a3cd6a783..253f14fc79 100644 --- a/ext/fts5/test/fts5ak.test +++ b/ext/fts5/test/fts5ak.test @@ -17,7 +17,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5ak -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return @@ -154,4 +154,30 @@ do_execsql_test 3.2 { } +# 2023-04-06 https://sqlite.org/forum/forumpost/cae4367d9b +# +# This is not a test of FTS5, but rather a test of the of what happens to +# prepared statements that encounter SQLITE_SCHEMA while other prepared +# statements are running. The original problem POC used FTS5, and so +# is seems reasonable to put the test here. +# +# The vdbeaux24.test module in TH3 also tests this same behavior but +# without requiring FTS5 or an other extension. +# +reset_db +db null NULL +do_execsql_test 4.0 { + CREATE TABLE t5(a PRIMARY KEY); + INSERT INTO t5 VALUES(0); + CREATE VIRTUAL TABLE t6 USING fts5(0); + DELETE FROM t6; + CREATE TABLE t7(x); + WITH cte(a) AS ( + SELECT a FROM t5 + WHERE ((0,0) IN (SELECT 0, LAG(0) OVER (PARTITION BY 0) FROM t6), 0) + < (a,0) + ) + SELECT max(a) FROM cte; +} NULL + finish_test diff --git a/ext/fts5/test/fts5al.test b/ext/fts5/test/fts5al.test index 842d991a37..7187ad67c7 100644 --- a/ext/fts5/test/fts5al.test +++ b/ext/fts5/test/fts5al.test @@ -17,7 +17,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5al -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return @@ -293,6 +293,16 @@ do_catchsql_test 4.4.4 { SELECT *, rank FROM t3 WHERE t3 MATCH 'a' AND rank MATCH NULL } {1 {parse error in rank function: }} +# Check that the second and subsequent rank= constraints are ignored. +# +do_catchsql_test 4.3.3 { + SELECT *, rank FROM t3 + WHERE t3 MATCH 'a' AND + rank MATCH 'nosuch()' AND + rank MATCH 'rowidmod(3)' + ORDER BY rank ASC +} {1 {unable to use function MATCH in the requested context}} + } ;# foreach_detail_mode diff --git a/ext/fts5/test/fts5alter.test b/ext/fts5/test/fts5alter.test index 67f948cbbe..bb5f78dc86 100644 --- a/ext/fts5/test/fts5alter.test +++ b/ext/fts5/test/fts5alter.test @@ -16,7 +16,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5alter -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return diff --git a/ext/fts5/test/fts5auto.test b/ext/fts5/test/fts5auto.test index 79d432b812..b771af912e 100644 --- a/ext/fts5/test/fts5auto.test +++ b/ext/fts5/test/fts5auto.test @@ -16,7 +16,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5auto -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return diff --git a/ext/fts5/test/fts5aux.test b/ext/fts5/test/fts5aux.test index 561067c4bc..960dbc5117 100644 --- a/ext/fts5/test/fts5aux.test +++ b/ext/fts5/test/fts5aux.test @@ -15,7 +15,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5aux -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return @@ -307,5 +307,98 @@ do_catchsql_test 10.1.4 { SELECT group_concat(firstcol(t1), '.') FROM t1 GROUP BY rowid } {1 {unable to use function firstcol in the requested context}} -finish_test +#------------------------------------------------------------------------- +# Test that xInstCount() works from within an xPhraseQuery() callback. +# +reset_db + +proc xCallback {cmd} { + incr ::hitcount [$cmd xInstCount] + return SQLITE_OK +} +proc fts5_hitcount {cmd} { + set ::hitcount 0 + $cmd xQueryPhrase 0 xCallback + return $::hitcount +} +sqlite3_fts5_create_function db fts5_hitcount fts5_hitcount + +do_execsql_test 11.1 { + CREATE VIRTUAL TABLE x1 USING fts5(z); + INSERT INTO x1 VALUES('one two three'); + INSERT INTO x1 VALUES('one two one three one'); + INSERT INTO x1 VALUES('one two three'); +} + +do_execsql_test 11.2 { + SELECT fts5_hitcount(x1) FROM x1('one') LIMIT 1; +} {5} + +#------------------------------------------------------------------------- +# Test that xColumnText returns SQLITE_RANGE when it should. +# +reset_db +fts5_aux_test_functions db +do_execsql_test 12.0 { + CREATE VIRTUAL TABLE t1 USING fts5(a, b, c); + INSERT INTO t1 VALUES('one', 'two', 'three'); + INSERT INTO t1 VALUES('one', 'one', 'one'); + INSERT INTO t1 VALUES('two', 'two', 'two'); + INSERT INTO t1 VALUES('three', 'three', 'three'); +} +do_catchsql_test 12.1.1 { + SELECT fts5_columntext(t1, -1) FROM t1('two'); +} {1 SQLITE_RANGE} +do_catchsql_test 12.1.2 { + SELECT fts5_columntext(t1, 3) FROM t1('two'); +} {1 SQLITE_RANGE} +do_catchsql_test 12.1.2 { + SELECT fts5_columntext(t1, 1) FROM t1('one AND two'); +} {0 two} + +do_catchsql_test 12.2.1 { + SELECT fts5_queryphrase(t1, -1) FROM t1('one AND two'); +} {1 SQLITE_RANGE} +do_catchsql_test 12.2.2 { + SELECT fts5_queryphrase(t1, 2) FROM t1('one AND two'); +} {1 SQLITE_RANGE} +do_catchsql_test 12.2.3 { + SELECT fts5_queryphrase(t1, 1) FROM t1('one AND two'); +} {0 {{1 2 1}}} + +do_catchsql_test 12.3.1 { + SELECT fts5_collist(t1, -1) FROM t1('one AND two'); +} {1 SQLITE_RANGE} +do_catchsql_test 12.3.2 { + SELECT fts5_collist(t1, 2) FROM t1('one AND two'); +} {1 SQLITE_RANGE} +do_catchsql_test 12.3.3 { + SELECT fts5_collist(t1, 1) FROM t1('one AND two'); +} {0 1} + +#------------------------------------------------------------------------- +reset_db +do_execsql_test 13.1 { + CREATE VIRTUAL TABLE t1 USING fts5(a, tokenize=ascii); + INSERT INTO t1 VALUES('a b c'), ('d e f'); + PRAGMA integrity_check; +} {ok} + +do_catchsql_test 13.2 { + SELECT highlight(t1, 0, '[', ']') FROM t1 +} {0 {{a b c} {d e f}}} + +do_execsql_test 13.3 { + PRAGMA writable_schema = 1; + UPDATE sqlite_schema SET sql = 'CREATE VIRTUAL TABLE t1 USING fts5(a, tokenize=blah)' + WHERE name = 't1'; +} + +db close +sqlite3 db test.db +do_catchsql_test 13.4 { + SELECT highlight(t1, 0, '[', ']') FROM t1 +} {1 {SQL logic error}} + +finish_test diff --git a/ext/fts5/test/fts5aux2.test b/ext/fts5/test/fts5aux2.test new file mode 100644 index 0000000000..2352970ec7 --- /dev/null +++ b/ext/fts5/test/fts5aux2.test @@ -0,0 +1,71 @@ +# 2024 June 24 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# Tests focusing on the auxiliary function APIs. +# + +source [file join [file dirname [info script]] fts5_common.tcl] +set testprefix fts5aux + +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. +ifcapable !fts5 { + finish_test + return +} + +do_execsql_test 1.0 { + CREATE VIRTUAL TABLE x1 USING fts5(a, b); + INSERT INTO x1 VALUES('a b', 'c d'); + INSERT INTO x1 VALUES('d e', 'a b'); + INSERT INTO x1 VALUES('a b', 'e f'); + INSERT INTO x1 VALUES('d e', 'c d'); +} + +fts5_aux_test_functions db +do_execsql_test 1.1 { + SELECT fts5_test_all(x1) FROM x1 WHERE rowid=2 +} [list [list {*}{ + columnsize {2 2} + columntext {{d e} {a b}} + columntotalsize {8 8} + poslist {} + tokenize {{d e} {a b}} + rowcount 4 +}]] + +do_execsql_test 1.2 { + SELECT fts5_test_columntext(x1) FROM x1 +} { + {{a b} {c d}} + {{d e} {a b}} + {{a b} {e f}} + {{d e} {c d}} +} + +do_execsql_test 1.3 { + SELECT fts5_test_rowid(x1) FROM x1 +} { + 1 2 3 4 +} +do_execsql_test 1.4 { + SELECT fts5_test_phrasecount(x1) FROM x1 +} { + 0 0 0 0 +} +do_catchsql_test 1.5 { + SELECT fts5_queryphrase(x1, 0) FROM x1 +} {1 SQLITE_RANGE} +do_execsql_test 1.6 { + SELECT fts5_test_rowcount(x1) FROM x1 +} {4 4 4 4} + + +finish_test diff --git a/ext/fts5/test/fts5auxdata.test b/ext/fts5/test/fts5auxdata.test index a2a41704c5..7f99fed316 100644 --- a/ext/fts5/test/fts5auxdata.test +++ b/ext/fts5/test/fts5auxdata.test @@ -15,7 +15,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5auxdata -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return diff --git a/ext/fts5/test/fts5bigid.test b/ext/fts5/test/fts5bigid.test new file mode 100644 index 0000000000..ae20ec641e --- /dev/null +++ b/ext/fts5/test/fts5bigid.test @@ -0,0 +1,62 @@ +# 2023 May 28 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# + +source [file join [file dirname [info script]] fts5_common.tcl] +set testprefix fts5bigid + +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. +ifcapable !fts5 { + finish_test + return +} + +set nRow 20000 + +proc do_ascdesc_test {tn query} { + set ::lAsc [db eval { SELECT rowid FROM x1($query) }] + set ::lDesc [db eval { SELECT rowid FROM x1($query) ORDER BY rowid DESC }] + do_test $tn.1 { lsort -integer $::lAsc } $::lAsc + do_test $tn.2 { lsort -integer -decr $::lDesc } $::lDesc + do_test $tn.3 { lsort -integer $::lDesc } $::lAsc +} + +do_execsql_test 1.0 { + CREATE VIRTUAL TABLE x1 USING fts5(a); +} + +do_test 1.1 { + for {set ii 0} {$ii < $nRow} {incr ii} { + db eval { + REPLACE INTO x1(rowid, a) VALUES(random(), 'movement at the station'); + } + } +} {} + +do_ascdesc_test 1.2 "the" + +do_execsql_test 1.3 { + DELETE FROM x1 +} + +do_test 1.4 { + for {set ii 0} {$ii < $nRow} {incr ii} { + db eval { + INSERT INTO x1(rowid, a) VALUES( + $ii + 0x6FFFFFFFFFFFFFFF, 'movement at the station' + ); + } + } +} {} + +do_ascdesc_test 1.5 "movement" + +finish_test diff --git a/ext/fts5/test/fts5bigpl.test b/ext/fts5/test/fts5bigpl.test index 2c9df11b1f..9e3d86c0e6 100644 --- a/ext/fts5/test/fts5bigpl.test +++ b/ext/fts5/test/fts5bigpl.test @@ -17,7 +17,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5bigpl -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return diff --git a/ext/fts5/test/fts5blob.test b/ext/fts5/test/fts5blob.test new file mode 100644 index 0000000000..9348554104 --- /dev/null +++ b/ext/fts5/test/fts5blob.test @@ -0,0 +1,166 @@ +# 2024 July 30 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file verifies that: +# +# * blob values may be written to locale=0 tables. +# +# * blob values - other than fts5_locale() values - may not be written +# to locale=0 tables. This is an SQLITE_MISMATCH error +# +# * blob values may be returned by queries on the external-content table +# of a locale=0 table. +# +# * blob values not may be returned by queries on the external-content +# table of a locale=1 table, apart from fts5_locale() blobs. This is an +# SQLITE_MISMATCH error. +# + +source [file join [file dirname [info script]] fts5_common.tcl] +set testprefix fts5blob + +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. +ifcapable !fts5 { + finish_test + return +} + +# Test that blobs may be stored in normal locale=0 tables. +# +foreach {tn enc} { + 1 utf8 + 2 utf16 +} { + reset_db + fts5_aux_test_functions db + + execsql "PRAGMA encoding = $enc" + + execsql " + CREATE VIRTUAL TABLE t1 USING fts5(x, y); + " + do_execsql_test 1.$tn.0 { + CREATE VIRTUAL TABLE tt USING fts5vocab('t1', 'instance'); + INSERT INTO t1(rowid, x, y) VALUES(1, 555, X'0000000041424320444546'); + INSERT INTO t1(rowid, x, y) VALUES(2, 666, X'41424300444546'); + INSERT INTO t1(rowid, x, y) VALUES(3, 777, 'xyz'); + } + + do_execsql_test 1.$tn.1 { + SELECT rowid, quote(x), quote(y) FROM t1 + } { + 1 555 X'0000000041424320444546' + 2 666 X'41424300444546' + 3 777 'xyz' + } + + do_execsql_test 1.$tn.2 { + DELETE FROM t1 WHERE rowid=2; + DELETE FROM t1 WHERE rowid=1; + } + + do_execsql_test 1.$tn.3 { + PRAGMA integrity_check; + } {ok} +} + +#-------------------------------------------------------------------------- +# Test that a blob may be stored and retrieved in an unindexed column of +# a regular table with locale=1. +# +reset_db +do_execsql_test 2.0 { + CREATE VIRTUAL TABLE t1 USING fts5(x, y UNINDEXED, locale=1); + INSERT INTO t1(rowid, x, y) VALUES(12, 'twelve', X'0000000041424320444546'); +} + +do_execsql_test 2.1 { + select rowid, x, quote(y) FROM t1 +} { + 12 twelve X'0000000041424320444546' +} + +#-------------------------------------------------------------------------- +# Test that blobs may not be written to any type of table with locale=1 +# set. Except, they may be written to UNINDEXED columns. +# +reset_db +do_execsql_test 3.0 { + CREATE TABLE t1(a, b); + + CREATE VIRTUAL TABLE x1 USING fts5(a, b, locale=1); + CREATE VIRTUAL TABLE x2 USING fts5(a, b, locale=1, content=t2); + CREATE VIRTUAL TABLE x3 USING fts5(a, b, locale=1, content=); +} + +do_catchsql_test 3.1 { + INSERT INTO x1(rowid, a, b) VALUES(113, 'hello world', X'123456'); +} {0 {}} +do_catchsql_test 3.2 { + INSERT INTO x2(rowid, a, b) VALUES(113, 'hello world', X'123456'); +} {0 {}} +do_catchsql_test 3.3 { + INSERT INTO x3(rowid, a, b) VALUES(113, 'hello world', X'123456'); +} {0 {}} + + +#-------------------------------------------------------------------------- +# Test that fts5_locale() values may not be written to any type of table +# without locale=1 set. Even to an UNINDEXED column. +# +reset_db +do_execsql_test 3.0 { + CREATE TABLE t1(a, b); + + CREATE VIRTUAL TABLE x1 USING fts5(a, b); + CREATE VIRTUAL TABLE x2 USING fts5(a, b, content=t2); + CREATE VIRTUAL TABLE x3 USING fts5(a, b, content=); + + CREATE VIRTUAL TABLE x4 USING fts5(a, b, c UNINDEXED); +} + +do_catchsql_test 3.1 { + INSERT INTO x1(rowid, a, b) + VALUES(113, 'hello world', fts5_locale('en_AU', 'abc')); +} {1 {fts5_locale() requires locale=1}} +do_catchsql_test 3.2 { + INSERT INTO x2(rowid, a, b) + VALUES(113, 'hello world', fts5_locale('en_AU', 'abc')); +} {1 {fts5_locale() requires locale=1}} +do_catchsql_test 3.3 { + INSERT INTO x3(rowid, a, b) + VALUES(113, 'hello world', fts5_locale('en_AU', 'abc')); +} {1 {fts5_locale() requires locale=1}} +do_catchsql_test 3.4 { + INSERT INTO x4(rowid, a, b, c) + VALUES(113, 'hello world', 'yesno', fts5_locale('en_AU', 'abc')); +} {1 {fts5_locale() requires locale=1}} + + +#------------------------------------------------------------------------- +# +reset_db +do_execsql_test 4.0 { + CREATE VIRTUAL TABLE x1 USING fts5(x); +} + +foreach {tn sql} { + 1 { INSERT INTO x1(rowid, x) VALUES(4.5, 'abcd') } + 2 { INSERT INTO x1(rowid, x) VALUES('xyz', 'abcd') } + 3 { INSERT INTO x1(rowid, x) VALUES(X'001122', 'abcd') } +} { + do_catchsql_test 4.1.$tn $sql {1 {datatype mismatch}} +} + + +finish_test + + diff --git a/ext/fts5/test/fts5cat.test b/ext/fts5/test/fts5cat.test index 483f64bfef..71e2abe3ae 100644 --- a/ext/fts5/test/fts5cat.test +++ b/ext/fts5/test/fts5cat.test @@ -55,5 +55,22 @@ do_execsql_test 1.5 { SELECT * FROM t4t } {สนามกีฬา 1 1} +#------------------------------------------------------------------------- +reset_db +do_execsql_test 2.0 " + CREATE VIRTUAL TABLE x1 USING fts5(c, + tokenize=\"unicode61 categories ' \t'\"); +" + +do_catchsql_test 2.1 " + CREATE VIRTUAL TABLE x2 USING fts5(c, + tokenize=\"unicode61 categories 'N*\t\tMYZ'\"); +" {1 {error in tokenizer constructor}} + +do_catchsql_test 2.2 " + CREATE VIRTUAL TABLE x2 USING fts5(c, + tokenize=\"unicode61 categories 'N*\t\tXYZ'\"); +" {1 {error in tokenizer constructor}} + finish_test diff --git a/ext/fts5/test/fts5circref.test b/ext/fts5/test/fts5circref.test index ea992195af..8732fa17dd 100644 --- a/ext/fts5/test/fts5circref.test +++ b/ext/fts5/test/fts5circref.test @@ -72,7 +72,7 @@ foreach {tn schema sql} { } { db_restore_and_reopen do_execsql_test 1.1.$tn.1 $schema - do_catchsql_test 1.1.$tn.2 $sql {1 {SQL logic error}} + do_catchsql_test 1.1.$tn.2 $sql {1 {database disk image is malformed}} db close } diff --git a/ext/fts5/test/fts5colset.test b/ext/fts5/test/fts5colset.test index 7243743b51..e5429572c5 100644 --- a/ext/fts5/test/fts5colset.test +++ b/ext/fts5/test/fts5colset.test @@ -79,7 +79,7 @@ foreach_detail_mode $::testprefix { do_catchsql_test 4.1 { SELECT * FROM t1 WHERE rowid MATCH 'a' - } {1 {unable to use function MATCH in the requested context}} + } {1 {no query solution}} } #------------------------------------------------------------------------- diff --git a/ext/fts5/test/fts5columnsize.test b/ext/fts5/test/fts5columnsize.test index 2b03d575aa..7af49184b8 100644 --- a/ext/fts5/test/fts5columnsize.test +++ b/ext/fts5/test/fts5columnsize.test @@ -15,7 +15,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5columnsize -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return diff --git a/ext/fts5/test/fts5config.test b/ext/fts5/test/fts5config.test index 35894c6bb0..28f3146ea3 100644 --- a/ext/fts5/test/fts5config.test +++ b/ext/fts5/test/fts5config.test @@ -16,7 +16,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5config -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return diff --git a/ext/fts5/test/fts5conflict.test b/ext/fts5/test/fts5conflict.test index 644db53a1e..b5bf0a1160 100644 --- a/ext/fts5/test/fts5conflict.test +++ b/ext/fts5/test/fts5conflict.test @@ -65,4 +65,44 @@ do_execsql_test 2.1 { INSERT INTO fts_idx(fts_idx) VALUES('integrity-check'); } +#------------------------------------------------------------------------- +# Tests for OR IGNORE conflict handling. +# +reset_db +foreach_detail_mode $::testprefix { + + do_execsql_test 3.0 { + CREATE VIRTUAL TABLE t1 USING fts5(xyz, detail=%DETAIL%); + + BEGIN; + INSERT INTO t1(rowid, xyz) VALUES(13, 'thirteen documents'); + INSERT INTO t1(rowid, xyz) VALUES(14, 'fourteen documents'); + INSERT INTO t1(rowid, xyz) VALUES(15, 'fifteen documents'); + COMMIT; + } + + set db_cksum [cksum] + foreach {tn sql} { + 1 { + INSERT OR IGNORE INTO t1(rowid, xyz) VALUES(14, 'new text'); + } + 2 { + UPDATE OR IGNORE t1 SET rowid=13 WHERE rowid=15; + } + 3 { + INSERT OR IGNORE INTO t1(rowid, xyz) + SELECT 13, 'some text' + UNION ALL + SELECT 14, 'some text' + UNION ALL + SELECT 15, 'some text' + } + } { + do_execsql_test 3.1.$tn.1 $sql + do_test 3.1.$tn.2 { cksum } $db_cksum + } + +} + + finish_test diff --git a/ext/fts5/test/fts5content.test b/ext/fts5/test/fts5content.test index 74a74e2ad0..05b5cc6113 100644 --- a/ext/fts5/test/fts5content.test +++ b/ext/fts5/test/fts5content.test @@ -15,7 +15,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5content -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return @@ -293,5 +293,76 @@ do_catchsql_test 7.2.5 { SELECT * FROM t1('abc') ORDER BY rank; } {1 {recursively defined fts5 content table}} +#--------------------------------------------------------------------------- +# Check that if the content table is a view, and that view contains an +# error, a reasonable error message is returned if the user tries to +# read from the view via the fts5 table. +# +reset_db +do_execsql_test 8.1 { + CREATE VIEW a1 AS + SELECT 1 AS r, text_value(1) AS t + UNION ALL + SELECT 2 AS r, text_value(2) AS t; + + CREATE VIRTUAL TABLE t1 USING fts5(t, content='a1', content_rowid='r'); +} + +foreach {tn sql} { + 1 "SELECT * FROM t1" + 2 "INSERT INTO t1(t1) VALUES('rebuild')" + 3 "SELECT * FROM t1 WHERE rowid=1" +} { + do_catchsql_test 8.2.$tn $sql {1 {no such function: text_value}} +} + +proc text_value {i} { + if {$i==1} { return "one" } + if {$i==2} { return "two" } + return "many" +} +db func text_value text_value + +do_execsql_test 8.3.1 { SELECT * FROM t1 } {one two} +do_execsql_test 8.3.2 { INSERT INTO t1(t1) VALUES('rebuild') } +do_execsql_test 8.3.3 { SELECT * FROM t1 WHERE rowid=1 } {one} +do_execsql_test 8.3.4 { SELECT rowid FROM t1('two') } {2} + +#------------------------------------------------------------------------- +reset_db +do_execsql_test 9.1 { + CREATE TABLE t1(a INTEGER PRIMARY KEY, b); + INSERT INTO t1 VALUES(1, 'one two three'); + INSERT INTO t1 VALUES(2, 'one two three'); + + CREATE VIRTUAL TABLE ft USING fts5(b, content=t1, content_rowid=a); + INSERT INTO ft(ft) VALUES('rebuild'); +} + +do_execsql_test 9.2 { + SELECT rowid, b FROM ft('two'); +} { + 1 {one two three} + 2 {one two three} +} + +do_execsql_test 9.3 { + DELETE FROM t1 WHERE a=2; +} + +do_catchsql_test 9.4 { + SELECT rowid FROM ft('two'); +} {0 {1 2}} + +do_catchsql_test 9.5 { + SELECT * FROM ft('two'); +} {1 {fts5: missing row 2 from content table 'main'.'t1'}} + +fts5_aux_test_functions db + +do_catchsql_test 9.6 { + SELECT rowid, fts5_columntext(ft, 0) FROM ft('two'); +} {1 SQLITE_CORRUPT_VTAB} + finish_test diff --git a/ext/fts5/test/fts5contentless.test b/ext/fts5/test/fts5contentless.test new file mode 100644 index 0000000000..991e9888fc --- /dev/null +++ b/ext/fts5/test/fts5contentless.test @@ -0,0 +1,290 @@ +# 2014 Dec 20 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file contains tests for the content= and content_rowid= options. +# + +source [file join [file dirname [info script]] fts5_common.tcl] +set testprefix fts5contentless + +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. +ifcapable !fts5 { + finish_test + return +} + +# Check that it is not possible to specify "contentless_delete=1" for +# anything other than a contentless table. +# +set res(0) {0 {}} +set res(1) {1 {contentless_delete=1 requires a contentless table}} +foreach {tn sql bError} { + 1 "(a, b, contentless_delete=1)" 1 + 2 "(a, b, contentless_delete=1, content=abc)" 1 + 3 "(a, b, contentless_delete=1, content=)" 0 + 4 "(content=, contentless_delete=1, a)" 0 + 5 "(content='', contentless_delete=1, hello)" 0 +} { + execsql { BEGIN } + do_catchsql_test 1.$tn "CREATE VIRTUAL TABLE t1 USING fts5 $sql" $res($bError) + execsql { ROLLBACK } +} + +# Check that it is not possible to specify "contentless_delete=1" +# along with columnsize=1. +# +set res(0) {0 {}} +set res(1) {1 {contentless_delete=1 is incompatible with columnsize=0}} +foreach {tn sql bError} { + 2 "(a, b, content='', contentless_delete=1, columnsize=0)" 1 +} { + execsql { BEGIN } + do_catchsql_test 1.$tn "CREATE VIRTUAL TABLE t1 USING fts5 $sql" $res($bError) + execsql { ROLLBACK } +} + +# Check that if contentless_delete=1 is specified, then the "origin" +# column is added to the %_docsize table. +reset_db +do_execsql_test 3.0 { + CREATE VIRTUAL TABLE x1 USING fts5(c, content=''); + CREATE VIRTUAL TABLE x2 USING fts5(c, content='', contentless_delete=1); +} +do_execsql_test 3.1 { + SELECT sql FROM sqlite_schema WHERE name IN ('x1_docsize', 'x2_docsize'); +} { + {CREATE TABLE 'x1_docsize'(id INTEGER PRIMARY KEY, sz BLOB)} + {CREATE TABLE 'x2_docsize'(id INTEGER PRIMARY KEY, sz BLOB, origin INTEGER)} +} + +do_execsql_test 3.2.1 { + SELECT hex(block) FROM x1_data WHERE id=10 +} {00000000000000} +do_execsql_test 3.2.2 { + SELECT hex(block) FROM x2_data WHERE id=10 +} {00000000FF000001000000} + +do_execsql_test 3.3 { + INSERT INTO x2 VALUES('first text'); + INSERT INTO x2 VALUES('second text'); +} +do_execsql_test 3.4 { + SELECT id, origin FROM x2_docsize +} {1 1 2 2} +do_execsql_test 3.5 { + SELECT level, segment, loc1, loc2 FROM fts5_structure( + (SELECT block FROM x2_data WHERE id=10) + ) +} { + 0 0 1 1 + 0 1 2 2 +} +do_execsql_test 3.6 { + INSERT INTO x2(x2) VALUES('optimize'); +} +do_execsql_test 3.7 { + SELECT level, segment, loc1, loc2 FROM fts5_structure( + (SELECT block FROM x2_data WHERE id=10) + ) +} { + 1 0 1 2 +} + +do_execsql_test 3.8 { + DELETE FROM x2 WHERE rowid=2; +} + +do_execsql_test 3.9 { + SELECT rowid FROM x2('text') +} {1} + +#-------------------------------------------------------------------------- +reset_db +proc document {n} { + set vocab [list A B C D E F G H I J K L M N O P Q R S T U V W X Y Z] + set ret [list] + for {set ii 0} {$ii < $n} {incr ii} { + lappend ret [lindex $vocab [expr int(rand()*[llength $vocab])]] + } + set ret +} + +set nRow 1000 + +do_execsql_test 4.0 { + CREATE TABLE t1(x); + CREATE VIRTUAL TABLE ft USING fts5(x, content='', contentless_delete=1); + INSERT INTO ft(ft, rank) VALUES('pgsz', 100); +} +do_test 4.1 { + for {set ii 0} {$ii < $nRow} {incr ii} { + set doc [document 6] + execsql { + INSERT INTO t1 VALUES($doc); + INSERT INTO ft VALUES($doc); + } + } +} {} + +foreach v {A B C D E F G H I J K L M N O P Q R S T U V W X Y Z} { + set L1 [execsql {SELECT rowid FROM t1 WHERE x LIKE '%'||$v||'%'}] + set L2 [execsql {SELECT rowid FROM ft($v)}] + do_test 4.2.$v { set L1 } $L2 +} + +do_test 4.3 { + for {set ii 1} {$ii < $nRow} {incr ii 2} { + execsql { + DELETE FROM ft WHERE rowid=$ii; + DELETE FROM t1 WHERE rowid=$ii; + } + } +} {} + +foreach v {A B C D E F G H I J K L M N O P Q R S T U V W X Y Z} { + set L1 [execsql {SELECT rowid FROM t1 WHERE x LIKE '%'||$v||'%'}] + set L2 [execsql {SELECT rowid FROM ft($v)}] + do_test 4.4.$v { set L1 } $L2 +} + +do_execsql_test 4.5 { + INSERT INTO ft(ft) VALUES('optimize'); +} {} + +foreach v {A B C D E F G H I J K L M N O P Q R S T U V W X Y Z} { + set L1 [execsql {SELECT rowid FROM t1 WHERE x LIKE '%'||$v||'%'}] + set L2 [execsql {SELECT rowid FROM ft($v)}] + do_test 4.6.$v { set L1 } $L2 +} + +#execsql_pp { SELECT fts5_decode(id, block) FROM ft_data } + +#------------------------------------------------------------------------- +reset_db +do_execsql_test 5.0 { + CREATE VIRTUAL TABLE ft USING fts5(x, content='', contentless_delete=1); + INSERT INTO ft(rowid, x) VALUES(1, 'one two three'); + INSERT INTO ft(rowid, x) VALUES(2, 'one two four'); + INSERT INTO ft(rowid, x) VALUES(3, 'one two five'); + INSERT INTO ft(rowid, x) VALUES(4, 'one two seven'); + INSERT INTO ft(rowid, x) VALUES(5, 'one two eight'); +} + +do_execsql_test 5.1 { + DELETE FROM ft WHERE rowid=2 +} + +do_execsql_test 5.2 { + SELECT rowid FROM ft +} {1 3 4 5} + +do_catchsql_test 5.3 { + UPDATE ft SET x='four six' WHERE rowid=3 +} {0 {}} + +do_execsql_test 5.4 { + SELECT rowid FROM ft('one'); +} {1 4 5} + +do_execsql_test 5.5 { + REPLACE INTO ft(rowid, x) VALUES(3, 'four six'); + SELECT rowid FROM ft('one'); +} {1 4 5} + +do_execsql_test 5.6 { + REPLACE INTO ft(rowid, x) VALUES(6, 'one two eleven'); + SELECT rowid FROM ft('one'); +} {1 4 5 6} + +#------------------------------------------------------------------------- +reset_db +do_execsql_test 6.0 { + CREATE VIRTUAL TABLE ft USING fts5(x, content='', contentless_delete=1); + INSERT INTO ft(rowid, x) VALUES(1, 'one two three'); + INSERT INTO ft(rowid, x) VALUES(2, 'one two four'); +} + +do_test 6.1 { + db eval { SELECT rowid FROM ft('one two') } { + if {$rowid==1} { + db eval { INSERT INTO ft(rowid, x) VALUES(3, 'one two four') } + } + } +} {} + +#------------------------------------------------------------------------- +reset_db +do_execsql_test 7.0 { + CREATE VIRTUAL TABLE ft USING fts5(x, content='', contentless_delete=1); +} + +set lRowid [list -450 0 1 2 42] + +do_test 7.1 { + execsql BEGIN + foreach r $lRowid { + execsql { INSERT INTO ft(rowid, x) VALUES($r, 'one one one'); } + } + execsql COMMIT +} {} + +do_test 7.2 { + execsql BEGIN + foreach r $lRowid { + execsql { REPLACE INTO ft(rowid, x) VALUES($r, 'two two two'); } + } + execsql COMMIT +} {} + +do_execsql_test 7.3 { SELECT rowid FROM ft('one'); } {} +do_execsql_test 7.4 { SELECT rowid FROM ft('two'); } $lRowid + +#------------------------------------------------------------------------- +reset_db +do_execsql_test 8.0 { + CREATE VIRTUAL TABLE ft USING fts5(x, content='', contentless_delete=1); + INSERT INTO ft VALUES('hello world'); + INSERT INTO ft VALUES('one two three'); +} + +do_catchsql_test 8.1 { + INSERT INTO ft(ft, rowid, x) VALUES('delete', 1, 'hello world'); +} {1 {'delete' may not be used with a contentless_delete=1 table}} + +do_execsql_test 8.2 { + BEGIN; + INSERT INTO ft(rowid, x) VALUES(3, 'four four four'); + DELETE FROM ft WHERE rowid=3; + COMMIT; + SELECT rowid FROM ft('four'); +} {} + +#------------------------------------------------------------------------- +reset_db +do_execsql_test 9.0 { + CREATE VIRTUAL TABLE ft USING fts5(x, content='', contentless_delete=0); + INSERT INTO ft VALUES('hello world'); + INSERT INTO ft VALUES('one two three'); +} + +do_catchsql_test 9.1 { + INSERT INTO ft(ft, rowid, x) VALUES('delete', 1, 'hello world'); +} {0 {}} + +do_catchsql_test 9.2 { + CREATE VIRTUAL TABLE ft2 USING fts5(x, content='', contentless_delete=2); +} {1 {malformed contentless_delete=... directive}} + +do_catchsql_test 9.3 { + CREATE VIRTUAL TABLE ft2 USING fts5(x, content='', contentless_delete=11); +} {1 {malformed contentless_delete=... directive}} + +finish_test diff --git a/ext/fts5/test/fts5contentless2.test b/ext/fts5/test/fts5contentless2.test new file mode 100644 index 0000000000..248534bce4 --- /dev/null +++ b/ext/fts5/test/fts5contentless2.test @@ -0,0 +1,207 @@ +# 2023 July 19 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file contains tests for the content= and content_rowid= options. +# + +source [file join [file dirname [info script]] fts5_common.tcl] +set testprefix fts5contentless2 + +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. +ifcapable !fts5 { + finish_test + return +} + +proc vocab {} { + list aaa bbb ccc ddd eee fff ggg hhh iii jjj kkk lll mmm nnn ooo ppp +} + +proc document {nToken} { + set doc [list] + set vocab [vocab] + for {set ii 0} {$ii < $nToken} {incr ii} { + lappend doc [lindex $vocab [expr int(rand()*[llength $vocab])]] + } + set doc +} +db func document document + +proc contains {doc token} { + expr {[lsearch $doc $token]>=0} +} +db func contains contains + +proc do_compare_tables_test {tn} { + uplevel [list do_test $tn { + foreach v [vocab] { + set l1 [execsql { SELECT rowid FROM t1 WHERE contains(doc, $v) }] + set l2 [execsql { SELECT rowid FROM t2($v) }] + if {$l1!=$l2} { error "1: query mismatch ($l1) ($l2)" } + + set w "[string range $v 0 1]*" + set l1 [execsql { SELECT rowid FROM t1 WHERE contains(doc, $w) }] + set l2 [execsql { SELECT rowid FROM t2($w) }] + if {$l1!=$l2} { error "2: query mismatch ($l1) ($l2)" } + + set w "[string range $v 0 0]*" + set l1 [execsql { SELECT rowid FROM t1 WHERE contains(doc, $w) }] + set l2 [execsql { SELECT rowid FROM t2($w) }] + if {$l1!=$l2} { error "2: query mismatch ($l1) ($l2)" } + + set l1 [execsql { + SELECT rowid FROM t1 WHERE contains(doc, $v) ORDER BY rowid DESC + }] + set l2 [execsql { SELECT rowid FROM t2($v) ORDER BY rowid DESC }] + if {$l1!=$l2} { error "1: query mismatch ($l1) ($l2)" } + } + set {} {} + } {}] +} + +proc lshuffle {in} { + set L [list] + set ret [list] + foreach elem $in { lappend L [list [expr rand()] $elem] } + foreach pair [lsort -index 0 $L] { lappend ret [lindex $pair 1] } + set ret +} + +expr srand(0) + +do_execsql_test 1.0 { + CREATE VIRTUAL TABLE t2 USING fts5( + doc, prefix=2, content=, contentless_delete=1 + ); + + CREATE TABLE t1(doc); + CREATE TRIGGER tr1 AFTER DELETE ON t1 BEGIN + DELETE FROM t2 WHERE rowid = old.rowid; + END; +} + +set SMALLEST64 -9223372036854775808 +set LARGEST64 9223372036854775807 + +foreach {tn r1 r2} { + 1 0 50 + 2 $SMALLEST64 $SMALLEST64+50 + 3 $LARGEST64-50 $LARGEST64 + 4 -50 -1 +} { + set r1 [expr $r1] + set r2 [expr $r2] + + do_test 1.1.$tn { + execsql BEGIN + for {set ii $r1} {$ii <= $r2} {incr ii} { + execsql { INSERT INTO t1(rowid, doc) VALUES ($ii, document(8)); } + } + execsql COMMIT + } {} +} +do_test 1.2 { + db eval { SELECT rowid, doc FROM t1 } { + execsql { INSERT INTO t2(rowid, doc) VALUES($rowid, $doc) } + } +} {} + +foreach {tn rowid} { + 1 $SMALLEST64 + 2 0 + 3 -5 + 4 -30 + 5 $LARGEST64 + 6 $LARGEST64-1 +} { + set rowid [expr $rowid] + do_execsql_test 1.3.$tn.1 { + DELETE FROM t1 WHERE rowid=$rowid + } + do_compare_tables_test 1.3.$tn.2 +} + +set iTest 1 +foreach r [lshuffle [execsql {SELECT rowid FROM t1}]] { + if {($iTest % 50)==0} { + execsql { INSERT INTO t2(t2) VALUES('optimize') } + } + if {($iTest % 5)==0} { + execsql { INSERT INTO t2(t2, rank) VALUES('merge', 5) } + } + do_execsql_test 1.4.$iTest.1($r) { + DELETE FROM t1 WHERE rowid=$r + } + do_compare_tables_test 1.4.$iTest.2 + incr iTest +} + +do_execsql_test 1.5 { + SELECT * FROM t1 +} {} + +#------------------------------------------------------------------------- +reset_db +db func document document + +do_execsql_test 2.0 { + CREATE VIRTUAL TABLE t2 USING fts5(doc, content=, contentless_delete=1); + WITH s(i) AS ( + SELECT 1 UNION ALL SELECT i+1 FROM s WHERE i<1000 + ) + INSERT INTO t2(rowid, doc) SELECT i, i || ' ' || i FROM s; +} + +do_execsql_test 2.1 { + BEGIN; + DELETE FROM t2 WHERE rowid=32; + DELETE FROM t2 WHERE rowid=64; + DELETE FROM t2 WHERE rowid=96; + DELETE FROM t2 WHERE rowid=128; + DELETE FROM t2 WHERE rowid=160; + DELETE FROM t2 WHERE rowid=192; + COMMIT; +} + +do_execsql_test 2.2 { + SELECT * FROM t2('128'); +} {} + +#------------------------------------------------------------------------- + +foreach {tn step} { + 1 3 + 2 7 + 3 15 +} { + set step [expr $step] + + reset_db + db func document document + do_execsql_test 3.$tn.0 { + CREATE VIRTUAL TABLE t2 USING fts5(doc, content=, contentless_delete=1); + INSERT INTO t2(t2, rank) VALUES('pgsz', 100); + WITH s(i) AS ( + SELECT 1 UNION ALL SELECT i+1 FROM s WHERE i<1000 + ) + INSERT INTO t2(rowid, doc) SELECT i, i || ' ' || i FROM s; + } + do_execsql_test 3.$tn.1 { + DELETE FROM t2 WHERE (rowid % $step)==0 + } + do_execsql_test 3.$tn.2 { + SELECT * FROM t2( $step * 5 ) + } {} +} + + + +finish_test diff --git a/ext/fts5/test/fts5contentless3.test b/ext/fts5/test/fts5contentless3.test new file mode 100644 index 0000000000..693840da82 --- /dev/null +++ b/ext/fts5/test/fts5contentless3.test @@ -0,0 +1,195 @@ +# 2023 July 21 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file contains tests for the content= and content_rowid= options. +# + +source [file join [file dirname [info script]] fts5_common.tcl] +set testprefix fts5contentless3 + +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. +ifcapable !fts5 { + finish_test + return +} + +do_execsql_test 1.0 { + CREATE VIRTUAL TABLE ft USING fts5(x, content=, contentless_delete=1); + BEGIN; + INSERT INTO ft VALUES('one one one'); + INSERT INTO ft VALUES('two two two'); + INSERT INTO ft VALUES('three three three'); + INSERT INTO ft VALUES('four four four'); + INSERT INTO ft VALUES('five five five'); + INSERT INTO ft VALUES('six six six'); + INSERT INTO ft VALUES('seven seven seven'); + INSERT INTO ft VALUES('eight eight eight'); + INSERT INTO ft VALUES('nine nine nine'); + COMMIT; + + DELETE FROM ft WHERE rowid=3; +} + +proc myhex {hex} { binary decode hex $hex } +db func myhex myhex + +do_execsql_test 1.1 { + UPDATE ft_data SET block = + myhex('04000000 00000001' || + '01020304 01020304 01020304 01020304' || + '01020304 01020304 01020304 01020304' + ) + WHERE id = (SELECT max(id) FROM ft_data); +} + +do_execsql_test 1.2 { + DELETE FROM ft WHERE rowid=1 +} + +do_execsql_test 1.3 { + SELECT rowid FROM ft('two'); +} {2} + +do_execsql_test 1.3 { + UPDATE ft_data SET block = + myhex('08000000 00000001' || + '0000000001020304 0000000001020304 0000000001020304 0000000001020304' || + '0000000001020304 0000000001020304 0000000001020304 0000000001020304' + ) + WHERE id = (SELECT max(id) FROM ft_data); +} + +do_execsql_test 1.4 { + SELECT rowid FROM ft('two'); +} {2} + +do_execsql_test 1.5 { + DELETE FROM ft WHERE rowid=4 +} + +do_execsql_test 1.6 { + UPDATE ft_data SET block = myhex('04000000 00000000') + WHERE id = (SELECT max(id) FROM ft_data); +} +do_execsql_test 1.7 { + SELECT rowid FROM ft('two'); +} {2} + +do_execsql_test 1.8 { + UPDATE ft_data SET block = myhex('04000000 00000000') + WHERE id = (SELECT max(id) FROM ft_data); +} +do_execsql_test 1.9 { + DELETE FROM ft WHERE rowid=8 +} {} + +#------------------------------------------------------------------------- +reset_db +do_execsql_test 2.0 { + CREATE VIRTUAL TABLE ft USING fts5(x, content=, contentless_delete=1); + INSERT INTO ft VALUES('one one one'); + INSERT INTO ft VALUES('two two two'); + INSERT INTO ft VALUES('three three three'); + INSERT INTO ft VALUES('four four four'); + INSERT INTO ft VALUES('five five five'); + INSERT INTO ft VALUES('six six six'); + INSERT INTO ft VALUES('seven seven seven'); + INSERT INTO ft VALUES('eight eight eight'); + INSERT INTO ft VALUES('nine nine nine'); +} + +do_execsql_test 2.1 { + INSERT INTO ft(ft) VALUES('optimize'); +} +do_execsql_test 2.2 { + SELECT count(*) FROM ft_data +} {3} +do_execsql_test 2.3 { + DELETE FROM ft WHERE rowid=5 +} +do_execsql_test 2.4 { + SELECT count(*) FROM ft_data +} {4} + +# Check that an 'optimize' works (rewrites the index) if there is a single +# segment with one or more tombstone hash pages. +do_execsql_test 2.5 { + INSERT INTO ft(ft) VALUES('optimize'); +} +do_execsql_test 2.6 { + SELECT count(*) FROM ft_data +} {3} + +# Check that an 'optimize' is a no-op if there is a single segment +# and no tombstone hash pages. +do_execsql_test 2.7 { + INSERT INTO ft(ft) VALUES('optimize'); + SELECT rowid FROM ft_data; +} [db eval {SELECT rowid FROM ft_data}] + +#------------------------------------------------------------------------- +reset_db +do_execsql_test 3.0 { + CREATE VIRTUAL TABLE ft USING fts5(x, content=, contentless_delete=1); + INSERT INTO ft(ft, rank) VALUES('pgsz', 64); + WITH s(i) AS ( + SELECT 1 UNION ALL SELECT i+1 FROM s WHERE i<1000 + ) + INSERT INTO ft(rowid, x) SELECT i, i||' '||i||' '||i||' '||i FROM s; + INSERT INTO ft(ft) VALUES('optimize'); +} + +do_execsql_test 3.1 { + SELECT count(*) FROM ft_data +} {200} + +do_execsql_test 3.2 { + DELETE FROM ft WHERE (rowid % 50)==0; + SELECT count(*) FROM ft_data; +} {203} + +do_execsql_test 3.3 { + INSERT INTO ft(ft, rank) VALUES('merge', 500); + SELECT rowid FROM ft_data; +} [db eval {SELECT rowid FROM ft_data}] + +do_execsql_test 3.4 { + INSERT INTO ft(ft, rank) VALUES('merge', -1000); + SELECT count(*) FROM ft_data; +} {197} + +do_execsql_test 3.5 { + DELETE FROM ft WHERE (rowid % 50)==1; + SELECT count(*) FROM ft_data; +} {200} + +do_execsql_test 3.6 { + SELECT level, segment, npgtombstone FROM fts5_structure( + (SELECT block FROM ft_data WHERE id=10) + ) +} {1 0 3} + +do_test 3.6 { + while 1 { + set nChange [db total_changes] + execsql { INSERT INTO ft(ft, rank) VALUES('merge', -5) } + if {([db total_changes] - $nChange)<2} break + } +} {} + +do_execsql_test 3.7 { + SELECT level, segment, npgtombstone FROM fts5_structure( + (SELECT block FROM ft_data WHERE id=10) + ) +} {2 0 0} + + +finish_test diff --git a/ext/fts5/test/fts5contentless4.test b/ext/fts5/test/fts5contentless4.test new file mode 100644 index 0000000000..7fdf8c4b01 --- /dev/null +++ b/ext/fts5/test/fts5contentless4.test @@ -0,0 +1,247 @@ +# 2023 July 21 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file contains tests for the content= and content_rowid= options. +# + +source [file join [file dirname [info script]] fts5_common.tcl] +set testprefix fts5contentless4 + +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. +ifcapable !fts5 { + finish_test + return +} + +proc document {n} { + set vocab [list A B C D E F G H I J K L M N O P Q R S T U V W X Y Z] + set ret [list] + for {set ii 0} {$ii < $n} {incr ii} { + lappend ret [lindex $vocab [expr int(rand()*[llength $vocab])]] + } + set ret +} +db func document document + +do_execsql_test 1.0 { + CREATE VIRTUAL TABLE ft USING fts5(x, content='', contentless_delete=1); + INSERT INTO ft(ft, rank) VALUES('pgsz', 240); + WITH s(i) AS ( + SELECT 1 UNION ALL SELECT i+1 FROM s WHERE i<1000 + ) + INSERT INTO ft SELECT document(12) FROM s; +} + +do_execsql_test 1.1 { + INSERT INTO ft(ft) VALUES('optimize'); +} + +do_execsql_test 1.2 { + SELECT level, segment, nentry, nentrytombstone FROM fts5_structure(( + SELECT block FROM ft_data WHERE id=10 + )) +} {0 0 1000 0} + +do_execsql_test 1.3 { + DELETE FROM ft WHERE rowid < 50 +} + +do_execsql_test 1.4 { + SELECT level, segment, nentry, nentrytombstone FROM fts5_structure(( + SELECT block FROM ft_data WHERE id=10 + )) +} {0 0 1000 49} + +do_execsql_test 1.5 { + DELETE FROM ft WHERE rowid < 1000 +} + +do_execsql_test 1.6 { + SELECT level, segment, nentry, nentrytombstone FROM fts5_structure(( + SELECT block FROM ft_data WHERE id=10 + )) +} {1 0 1 0} + +#-------------------------------------------------------------------------- +reset_db +db func document document + +do_execsql_test 2.0 { + CREATE VIRTUAL TABLE ft USING fts5(x, content='', contentless_delete=1); +} + +do_test 2.1 { + for {set ii 0} {$ii < 5000} {incr ii} { + execsql { INSERT INTO ft VALUES( document(12) ) } + } +} {} + +do_execsql_test 2.2 { + SELECT sum(nentry) - sum(nentrytombstone) FROM fts5_structure(( + SELECT block FROM ft_data WHERE id=10 + )) +} {5000} + +for {set ii 5000} {$ii >= 0} {incr ii -100} { + do_execsql_test 2.3.$ii { + DELETE FROM ft WHERE rowid > $ii + } + do_execsql_test 2.3.$ii.2 { + SELECT + CAST((total(nentry) - total(nentrytombstone)) AS integer) + FROM + fts5_structure( (SELECT block FROM ft_data WHERE id=10) ) + } $ii +} + +execsql_pp { + SELECT * FROM fts5_structure(( + SELECT block FROM ft_data WHERE id=10 + )) +} + +do_test 2.4 { + for {set ii 0} {$ii < 5000} {incr ii} { + execsql { INSERT INTO ft VALUES( document(12) ) } + } +} {} + +for {set ii 1} {$ii <= 5000} {incr ii 10} { + do_execsql_test 2.3.$ii { + DELETE FROM ft WHERE rowid = $ii; + INSERT INTO ft VALUES( document(12) ); + INSERT INTO ft(ft, rank) VALUES('merge', -10); + } + + do_execsql_test 2.3.$ii.2 { + SELECT + CAST((total(nentry) - total(nentrytombstone)) AS integer) + FROM + fts5_structure( (SELECT block FROM ft_data WHERE id=10) ) + } 5000 +} + +#------------------------------------------------------------------------- +reset_db +db func document document +do_execsql_test 3.0 { + CREATE VIRTUAL TABLE ft USING fts5(x, content='', contentless_delete=1); + WITH s(i) AS ( + SELECT 1 UNION ALL SELECT i+1 FROM s WHERE i<100 + ) + INSERT INTO ft SELECT document(12) FROM s; +} + +do_catchsql_test 3.1 { + INSERT INTO ft(ft, rank) VALUES('deletemerge', 'text'); +} {1 {SQL logic error}} +do_catchsql_test 3.2 { + INSERT INTO ft(ft, rank) VALUES('deletemerge', 50); +} {0 {}} +do_execsql_test 3.3 { + SELECT * FROM ft_config WHERE k='deletemerge' +} {deletemerge 50} +do_catchsql_test 3.4 { + INSERT INTO ft(ft, rank) VALUES('deletemerge', 101); +} {0 {}} +do_execsql_test 3.5 { + SELECT * FROM ft_config WHERE k='deletemerge' +} {deletemerge 101} + +do_execsql_test 3.6 { + DELETE FROM ft WHERE rowid<95 +} + +do_execsql_test 3.7 { + SELECT nentrytombstone, nentry FROM fts5_structure(( + SELECT block FROM ft_data WHERE id=10 + )) +} {94 100} + +do_execsql_test 3.8 { + DELETE FROM ft WHERE rowid=95 +} + +do_execsql_test 3.9 { + SELECT nentrytombstone, nentry FROM fts5_structure(( + SELECT block FROM ft_data WHERE id=10 + )) +} {95 100} + +do_execsql_test 3.10 { + DELETE FROM ft; + WITH s(i) AS ( + SELECT 1 UNION ALL SELECT i+1 FROM s WHERE i<100 + ) + INSERT INTO ft SELECT document(12) FROM s; + INSERT INTO ft(ft, rank) VALUES('deletemerge', 50); +} + +do_execsql_test 3.11 { + DELETE FROM ft WHERE rowid<95 +} + +do_execsql_test 3.12 { + SELECT nentrytombstone, nentry FROM fts5_structure(( + SELECT block FROM ft_data WHERE id=10 + )) +} {0 6} + +#------------------------------------------------------------------------- +reset_db +db func document document +do_execsql_test 4.0 { + CREATE VIRTUAL TABLE x1 USING fts5(x, content='', contentless_delete=1); + INSERT INTO x1(x1, rank) VALUES('usermerge', 16); + INSERT INTO x1(x1, rank) VALUES('deletemerge', 40); + INSERT INTO x1 VALUES('one'); + INSERT INTO x1 VALUES('two'); + INSERT INTO x1 VALUES('three'); + INSERT INTO x1 VALUES('four'); + INSERT INTO x1 VALUES('five'); + INSERT INTO x1 VALUES('six'); + INSERT INTO x1 VALUES('seven'); + INSERT INTO x1 VALUES('eight'); + INSERT INTO x1 VALUES('nine'); + INSERT INTO x1 VALUES('ten'); +} + +do_execsql_test 4.1 { + SELECT level, segment FROM fts5_structure(( + SELECT block FROM x1_data WHERE id=10 + )) +} { + 0 0 0 1 0 2 0 3 0 4 0 5 0 6 0 7 0 8 0 9 +} + +for {set ii 1} {$ii < 4} {incr ii} { + do_execsql_test 4.2.$ii { + DELETE FROM x1 WHERE rowid = $ii; + INSERT INTO x1(x1, rank) VALUES('merge', 5); + SELECT level, segment FROM fts5_structure(( + SELECT block FROM x1_data WHERE id=10 + )) + } { + 0 0 0 1 0 2 0 3 0 4 0 5 0 6 0 7 0 8 0 9 + } +} + +do_execsql_test 4.3 { + DELETE FROM x1 WHERE rowid = $ii; + INSERT INTO x1(x1, rank) VALUES('merge', 5); + SELECT level, segment, nentry FROM fts5_structure(( + SELECT block FROM x1_data WHERE id=10 + )) +} { + 1 0 6 +} + +finish_test diff --git a/ext/fts5/test/fts5contentless5.test b/ext/fts5/test/fts5contentless5.test new file mode 100644 index 0000000000..86d0753286 --- /dev/null +++ b/ext/fts5/test/fts5contentless5.test @@ -0,0 +1,111 @@ +# 2023 August 7 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file contains tests for the content= and content_rowid= options. +# + +source [file join [file dirname [info script]] fts5_common.tcl] +set testprefix fts5contentless5 + +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. +ifcapable !fts5 { + finish_test + return +} +unset -nocomplain res + +do_execsql_test 1.0 { + CREATE VIRTUAL TABLE t1 USING fts5(a, b, c, content='', contentless_delete=1); + INSERT INTO t1 VALUES('A', 'B', 'C'); + INSERT INTO t1 VALUES('D', 'E', 'F'); + INSERT INTO t1 VALUES('G', 'H', 'I'); +} + +do_execsql_test 1.01 { + CREATE TABLE t2(x, y); + INSERT INTO t2 VALUES('x', 'y'); +} + +# explain_i "UPDATE t1 SET a='a' WHERE t1.rowid=1" +#breakpoint +#explain_i "UPDATE t1 SET a='a' FROM t2 WHERE t1.rowid=1 AND b IS NULL" + +#breakpoint +#explain_i "UPDATE t1 SET a='a' WHERE b IS NULL AND rowid=?" + +foreach {tn up err} { + 1 "UPDATE t1 SET a='a', b='b', c='c' WHERE rowid=1" 0 + 2 "UPDATE t1 SET a='a', b='b' WHERE rowid=1" 1 + 3 "UPDATE t1 SET b='b', c='c' WHERE rowid=1" 1 + 4 "UPDATE t1 SET a='a', c='c' WHERE rowid=1" 1 + 5 "UPDATE t1 SET a='a', c='c' WHERE t1.rowid=1 AND b IS NULL" 1 + 6 "UPDATE t1 SET a='a' FROM t2 WHERE t1.rowid=1" 1 + 7 "UPDATE t1 SET a='a', b='b', c='c' FROM t2 WHERE t1.rowid=1" 0 +} { + + set res(0) {0 {}} + set res(1) {1 {cannot UPDATE a subset of columns on fts5 contentless-delete table: t1}} + do_catchsql_test 1.$tn $up $res($err) +} + +#------------------------------------------------------------------------- +reset_db + +proc random {n} { expr {abs(int(rand()*$n))} } +proc select_one {list} { + set n [llength $list] + lindex $list [random $n] +} +proc vocab {} { + list abc def ghi jkl mno pqr stu vwx yza +} +proc term {} { + select_one [vocab] +} +proc document {} { + set nTerm [expr [random 3] + 7] + set doc "" + for {set ii 0} {$ii < $nTerm} {incr ii} { + lappend doc [term] + } + set doc +} +db func document document + +do_execsql_test 2.0 { + CREATE VIRTUAL TABLE ft USING fts5(a, contentless_delete=1, content=''); + INSERT INTO ft(ft, rank) VALUES('pgsz', 64); +} + +do_test 2.1 { + for {set ii 1} {$ii < 12} {incr ii} { + db transaction { + for {set jj 0} {$jj < 10} {incr jj} { + set doc [document] + execsql { INSERT INTO ft VALUES($doc); } + } + } + } +} {} + +do_test 2.2 { + foreach r [db eval {SELECT rowid FROM ft}] { + execsql { DELETE FROM ft WHERE rowid=$r } + } +} {} + +set doc [document] +do_execsql_test 2.3 { + INSERT INTO ft VALUES($doc) +} + + +finish_test diff --git a/ext/fts5/test/fts5corrupt.test b/ext/fts5/test/fts5corrupt.test index 5f13513ec7..8788bc2ed6 100644 --- a/ext/fts5/test/fts5corrupt.test +++ b/ext/fts5/test/fts5corrupt.test @@ -16,7 +16,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5corrupt -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return @@ -47,7 +47,10 @@ do_test 1.3 { DELETE FROM t1_data WHERE rowid = fts5_rowid('segment', $segid, 4); } catchsql { INSERT INTO t1(t1) VALUES('integrity-check') } -} {1 {database disk image is malformed}} +} {1 {fts5: corruption found reading blob 137438953476 from table "t1"}} +do_execsql_test 1.3b { + PRAGMA integrity_check(t1); +} {{fts5: corruption found reading blob 137438953476 from table "t1"}} do_test 1.4 { db_restore_and_reopen @@ -57,7 +60,7 @@ do_test 1.4 { rowid = fts5_rowid('segment', $segid, 4); } catchsql { INSERT INTO t1(t1) VALUES('integrity-check') } -} {1 {database disk image is malformed}} +} {1 {fts5: corruption found reading blob 137438953476 from table "t1"}} db_restore_and_reopen #db eval {SELECT rowid, fts5_decode(rowid, block) aS r FROM t1_data} {puts $r} @@ -95,6 +98,27 @@ sqlite3_db_config db DEFENSIVE 0 do_catchsql_test 3.1 { DELETE FROM t3_content WHERE rowid = 3; SELECT * FROM t3 WHERE t3 MATCH 'o'; +} {1 {fts5: missing row 3 from content table 'main'.'t3_content'}} + +#-------------------------------------------------------------------- +# +reset_db +do_execsql_test 4.0 { + CREATE VIRTUAL TABLE t2 USING fts5(x); + INSERT INTO t2 VALUES('one two three'); + INSERT INTO t2 VALUES('four five six'); + INSERT INTO t2 VALUES('seven eight nine'); + INSERT INTO t2 VALUES('ten eleven twelve'); +} +do_execsql_test 4.1 { + SELECT hex(block) FROM t2_data WHERE id=1; +} {040C} +do_execsql_test 4.2 { + UPDATE t2_data SET block = X'0402' WHERE id=1 +} +breakpoint +do_catchsql_test 4.3 { + DELETE FROM t2 WHERE rowid=3 } {1 {database disk image is malformed}} finish_test diff --git a/ext/fts5/test/fts5corrupt2.test b/ext/fts5/test/fts5corrupt2.test index a815320b76..fd2a841c7e 100644 --- a/ext/fts5/test/fts5corrupt2.test +++ b/ext/fts5/test/fts5corrupt2.test @@ -17,7 +17,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5corrupt2 -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return @@ -100,6 +100,7 @@ set lrowid [db one {SELECT max(rowid) FROM t1_data WHERE (rowid & $mask)=0}] set nbyte [db one {SELECT length(block) FROM t1_data WHERE rowid=$lrowid}] set all [db eval {SELECT rowid FROM t1}] sqlite3_db_config db DEFENSIVE 0 +unset -nocomplain res for {set i [expr $nbyte-2]} {$i>=0} {incr i -1} { do_execsql_test 2.$i.1 { BEGIN; @@ -108,12 +109,12 @@ for {set i [expr $nbyte-2]} {$i>=0} {incr i -1} { do_catchsql_test 2.$i.2 { INSERT INTO t1(t1) VALUES('integrity-check'); - } {1 {database disk image is malformed}} + } {/1.*fts5: corruption.*/} do_test 2.$i.3 { set res [catchsql {SELECT rowid FROM t1 WHERE t1 MATCH 'x*'}] expr { - $res=="1 {database disk image is malformed}" + [string match {*fts5: corruption*} $res] || $res=="0 {$all}" } } 1 @@ -152,21 +153,24 @@ foreach {tn hdr} { execsql BEGIN set fd [db incrblob main x3_data block $rowid] - fconfigure $fd -encoding binary -translation binary + fconfigure $fd -translation binary set existing [read $fd [string length $hdr]] seek $fd 0 puts -nonewline $fd $hdr close $fd set res [catchsql {SELECT rowid FROM x3 WHERE x3 MATCH 'x AND a'}] - if {$res == "1 {database disk image is malformed}"} {incr nCorrupt} + if {[string match {*fts5: corruption*} $res]} {incr nCorrupt} set {} 1 } {1} if {($tn2 % 10)==0 && $existing != $hdr} { do_test 3.$tn.$tn2.2 { catchsql { INSERT INTO x3(x3) VALUES('integrity-check') } - } {1 {database disk image is malformed}} + } {/.*fts5: corruption.*/} + do_execsql_test 3.$tn.$tn2.3 { + PRAGMA integrity_check(x3); + } {/.*fts5: corruption.*/} } execsql ROLLBACK @@ -205,7 +209,7 @@ foreach {tn nCut} { set res [catchsql { SELECT rowid FROM x4 WHERE x4 MATCH 'a' ORDER BY 1 DESC }] - if {$res == "1 {database disk image is malformed}"} {incr nCorrupt} + if {[string match {*fts5: corruption*} $res]} {incr nCorrupt} set {} 1 } {1} @@ -235,7 +239,7 @@ foreach {tn hdr} { execsql BEGIN set fd [db incrblob main x5_data block $rowid] - fconfigure $fd -encoding binary -translation binary + fconfigure $fd -translation binary puts -nonewline $fd $hdr close $fd diff --git a/ext/fts5/test/fts5corrupt3.test b/ext/fts5/test/fts5corrupt3.test index adfaa6d85b..20be7c45cf 100644 --- a/ext/fts5/test/fts5corrupt3.test +++ b/ext/fts5/test/fts5corrupt3.test @@ -17,7 +17,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5corrupt3 -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return @@ -102,7 +102,7 @@ proc do_3_test {tn} { list [ catch { db eval {SELECT rowid FROM t1 WHERE t1 MATCH 'x*'} } msg ] $msg - } {1 {database disk image is malformed}} + } {/.*fts5: corruption.*/} catch { db eval ROLLBACK } } } @@ -273,7 +273,7 @@ do_execsql_test 6.1.1 { } do_catchsql_test 6.1.2 { INSERT INTO t1(t1) VALUES('integrity-check'); -} {1 {database disk image is malformed}} +} {/.*fts5: corruption.*/} #------- reset_db @@ -289,7 +289,7 @@ do_execsql_test 6.2.1 { } do_catchsql_test 6.2.2 { INSERT INTO t1(t1) VALUES('integrity-check'); -} {1 {database disk image is malformed}} +} {/.*fts5: corruption.*/} #------- reset_db @@ -308,7 +308,7 @@ do_execsql_test 6.3.1 { } do_catchsql_test 6.3.2 { INSERT INTO t1(t1) VALUES('integrity-check'); -} {1 {database disk image is malformed}} +} {/.*fts5: corruption.*/} do_execsql_test 6.3.3 { ROLLBACK; BEGIN; @@ -319,7 +319,7 @@ do_execsql_test 6.3.3 { } do_catchsql_test 6.3.3 { INSERT INTO t1(t1) VALUES('integrity-check'); -} {1 {database disk image is malformed}} +} {/.*fts5: corruption.*/} do_execsql_test 6.3.4 { ROLLBACK; BEGIN; @@ -330,7 +330,7 @@ do_execsql_test 6.3.4 { } do_catchsql_test 6.3.5 { INSERT INTO t1(t1) VALUES('integrity-check'); -} {1 {database disk image is malformed}} +} {/.*fts5: corruption.*/} do_execsql_test 6.3.6 { ROLLBACK; BEGIN; @@ -341,7 +341,7 @@ do_execsql_test 6.3.6 { } do_catchsql_test 6.3.5 { INSERT INTO t1(t1) VALUES('integrity-check'); -} {1 {database disk image is malformed}} +} {/.*fts5: corruption.*/} #------------------------------------------------------------------------ @@ -374,7 +374,7 @@ do_test 7.1 { db eval BEGIN db eval {DELETE FROM t5_data WHERE rowid = $i} set r [catchsql { INSERT INTO t5(t5) VALUES('integrity-check')} ] - if {$r != "1 {database disk image is malformed}"} { error $r } + if {![string match {*fts5: corruption*} $r]} { error $r } db eval ROLLBACK } } {} @@ -399,7 +399,7 @@ do_test 9.1.1 { } {} do_catchsql_test 9.1.2 { SELECT * FROM t1('one AND two'); -} {1 {database disk image is malformed}} +} {/.*fts5: corrupt.*/} do_test 9.2.1 { set blob "12345678" ;# cookie @@ -411,7 +411,7 @@ do_test 9.2.1 { } {} do_catchsql_test 9.2.2 { SELECT * FROM t1('one AND two'); -} {1 {database disk image is malformed}} +} {/.*fts5: corrupt.*/} #------------------------------------------------------------------------- reset_db @@ -497,7 +497,7 @@ do_test 10.0 { } {} do_catchsql_test 10.1 { SELECT * FROM t1 WHERE t1 MATCH 'abandon'; -} {1 {database disk image is malformed}} +} {/.*fts5: corrupt.*/} #------------------------------------------------------------------------- # @@ -678,13 +678,13 @@ do_test 12.0 { | end c2.db }]} {} -do_catchsql_test 11.1 { +do_catchsql_test 12.1 { SELECT * FROM t1 WHERE t1 MATCH 'abandon'; -} {1 {vtable constructor failed: t1}} +} {/.*fts5: corrupt.*/} -do_catchsql_test 11.2 { +do_catchsql_test 12.2 { INSERT INTO t1(t1, rank) VALUES('merge', 500); -} {1 {vtable constructor failed: t1}} +} {/.*fts5: corrupt.*/} #------------------------------------------------------------------------- # @@ -870,7 +870,7 @@ do_test 14.0 { do_catchsql_test 14.1 { INSERT INTO t1(t1) VALUES('optimize'); -} {1 {database disk image is malformed}} +} {/.*fts5: corrupt.*/} #--------------------------------------------------------------------------- # @@ -1040,7 +1040,7 @@ do_test 16.0 { do_catchsql_test 16.1 { INSERT INTO t1(t1) VALUES('integrity-check'); -} {1 {vtable constructor failed: t1}} +} {/.*fts5: corrupt.*/} #-------------------------------------------------------------------------- reset_db @@ -1126,7 +1126,7 @@ do_test 17.0 { do_catchsql_test 17.1 { SELECT * FROM t1 WHERE t1 MATCH 'abandon'; -} {1 {vtable constructor failed: t1}} +} {/.*fts5: corrupt.*/} #-------------------------------------------------------------------------- reset_db @@ -1435,7 +1435,7 @@ do_test 18.0 { do_catchsql_test 18.1 { INSERT INTO t1(t1) VALUES('optimize'); -} {1 {database disk image is malformed}} +} {/.*fts5: corrupt.*/} #-------------------------------------------------------------------------- reset_db @@ -1546,7 +1546,7 @@ do_test 19.0 { do_catchsql_test 19.1 { INSERT INTO t1(t1) VALUES('optimize'); -} {1 {database disk image is malformed}} +} {/.*fts5: corrupt.*/} #-------------------------------------------------------------------------- reset_db @@ -1630,7 +1630,7 @@ do_test 20.0 { do_catchsql_test 20.1 { SELECT * FROM t1 WHERE t1 MATCH 'abandon'; -} {1 {vtable constructor failed: t1}} +} {/.*fts5: corrupt.*/} #------------------------------------------------------------------------- reset_db @@ -1764,7 +1764,7 @@ do_test 21.0 { do_catchsql_test 21.1 { DELETE FROM t1 WHERE t1 MATCH 'ab*ndon'; -} {1 {database disk image is malformed}} +} {/.*fts5: corrupt.*/} #------------------------------------------------------------------------- # @@ -2100,7 +2100,7 @@ do_test 22.0 { do_catchsql_test 22.1 { INSERT INTO t1(t1) VALUES('optimize'); -} {1 {vtable constructor failed: t1}} +} {/.*fts5: corrupt.*/} #-------------------------------------------------------------------------- reset_db @@ -2211,7 +2211,7 @@ do_test 23.0 { do_catchsql_test 23.1 { INSERT INTO t1(t1) VALUES('optimize'); -} {1 {database disk image is malformed}} +} {/.*fts5: corrupt.*/} #-------------------------------------------------------------------------- reset_db @@ -2429,7 +2429,7 @@ do_test 24.0 { do_catchsql_test 24.1 { UPDATE t1 SET b=quote(zeroblob(200)) WHERE a MATCH 'thread*'; -} {1 {database disk image is malformed}} +} {/.*fts5: corrupt.*/} do_catchsql_test 24.2 { INSERT INTO t1(t1) VALUES('integrity-check'); @@ -2518,7 +2518,7 @@ do_test 25.0 { do_catchsql_test 25.1 { INSERT INTO t1(t1) VALUES('rebuild'); -} {1 {database disk image is malformed}} +} {/.*fts5: corrupt.*/} do_execsql_test 25.2 { PRAGMA page_size=512; @@ -3011,7 +3011,7 @@ do_test 27.0 { do_catchsql_test 27.1 { DELETE FROM t1 WHERE a MATCH 'fts*'; -} {1 {database disk image is malformed}} +} {/.*fts5: corrupt.*/} #------------------------------------------------------------------------- reset_db @@ -3700,7 +3700,7 @@ do_catchsql_test 32.1 { highlight(t1, 2, '[', ']') FROM t1('g + h') WHERE rank MATCH 'bm25(1.0, 1.0)' ORDER BY rank; -} {1 {vtable constructor failed: t1}} +} {/.*fts5: corrupt.*/} do_catchsql_test 32.2 { SELECT * FROM t3; @@ -4267,7 +4267,7 @@ do_test 35.0 { do_catchsql_test 35.1 { SELECT * FROM t1 WHERE t1 MATCH 'e*'; -} {1 {database disk image is malformed}} +} {1 {fts5: missing row 14 from content table 'main'.'t1_content'}} #------------------------------------------------------------------------- reset_db @@ -5351,7 +5351,7 @@ do_execsql_test 41.0 { do_catchsql_test 41.1 { INSERT INTO t1(t1) VALUES('optimize'); -} {1 {database disk image is malformed}} +} {/.*fts5: corrupt.*/} do_catchsql_test 41.2 { INSERT INTO t1(t1) VALUES('integrity-check'); @@ -5573,7 +5573,7 @@ do_test 42.0 { do_catchsql_test 42.1 { INSERT INTO t1(t1) VALUES('integrity-check'); -} {1 {database disk image is malformed}} +} {1 {fts5: checksum mismatch for table "t1"}} #------------------------------------------------------------------------- reset_db @@ -5813,7 +5813,7 @@ do_execsql_test 44.1 { do_catchsql_test 44.2 { INSERT INTO t1(t1) VALUES('integrity-check'); -} {1 {database disk image is malformed}} +} {/.*fts5: corrupt.*/} do_catchsql_test 44.3 { SELECT snippet(t1, -1, '.', '..', '', 2 ) FROM t1('g h') ORDER BY rank; @@ -6644,7 +6644,7 @@ do_test 48.0 { do_catchsql_test 48.1 { INSERT INTO t1(t1) VALUES('integrity-check'); -} {1 {database disk image is malformed}} +} {1 {fts5: corruption on page 1, segment 1, table "t1"}} #-------------------------------------------------------------------------- reset_db @@ -6917,7 +6917,6 @@ REPLACE INTO t1_data VALUES(1,X'2eb1182424'); REPLACE INTO t1_data VALUES(10,X'000000000102080002010101020107'); INSERT INTO t1_data VALUES(137438953473,X'0000032b0230300102060102060102061f0203010203010203010832303136303630390102070102070102070101340102050102050102050101350102040102040102040207303030303030301c023d010204010204010662696e6172790306010202030601020203060102020306010202030601020203060102020306010202030601020203060102020306010202030601020203060102020108636f6d70696c657201020201020201020201066462737461740702030102030102030204656275670402020102020102020107656e61626c6507020201020201020201020201020201020201020201020201020201020201020201020201020201020201020201020201020201020201020201020201020202087874656e73696f6e1f02040102040102040104667473340a02030102030102030401350d020301020301020301036763630102030102030102030206656f706f6c7910020301020301020301056a736f6e3113020301020301020301046c6f61641f020301020301020301036d61781c02020102020102020205656d6f72791c020301020301020304047379733516020301020301020301066e6f6361736502060102020306010202030601020213060102020306010202030601020203060102020306010202030601020203060102020306010202030601020201046f6d69741f0202010202010202010572747265651902030102030102030402696d010601020203060102020306010202030601020203060102020306010202030601020203060102020306010202030601020203060102020306010202010a7468726561647361666522020201020201020201047674616207020401020401020401017801060101020106010102010601010201060101020106010102010601010201060101020106010102010601010201060101020106010102010601010201060101020106010102010601010201060101020106010102010601010201060101020106010102ad060101020106010102010601010201060101020106010101010601010201060101020106010102010601010201060101020106010102010601010201060101020106010102010601010201060101020415130c0c124413110f47130efc0e11100f0e100f440f1040150f'); INSERT INTO t1_data VALUES(274877906945,X'00000e96023030011a042319320d3b123d812b5a31120110446e581b66814a05010a4537814274010e8102815c810f3d0104846d01081581204401103741043c59416b44010a404655265301103f73811a11114213010a821235820f020135030484320201360104816a020162020484550302390301710a04824a020166030483690201670704837d0201690404822602016a0504825c02026b620504817502016f0904810d0303e79c88060482760201700a04826302017204048155020373c2be050481130201770204846202027962050482710202c2ba010482140203e58496070483330204e8b2b879010483710101310110545c0c814b0e3a6501082c815d5b011a2a0e2f0d765c3d686014061d0d0112810733112c2e82141101048313010e5c6f632e813e42010c811882370548010e19158146822f1f01104d364a708146135a010a237b0a55210201610904841703027678090481270201620304810002026374060484660202657a0704827602016601048351090483540301660704814b02016b03025f0304c582caba0204816602016c01025f0302cebc0904843e02016e0804821802016f0a04817503016f070483100201720304822c020484380201740404842e0102460201770104812f0204836c02027a6f040483110202cebc02048267040161020484650205d5bd62cebc0604845b0204f2a580880204842a0206f38184a179670502750204f696a3aa0a04814601013201063330390110812281378114600d010c03716c5e822d010e81226b542a814d010a72740f83000108813a1e0b010c5681046f812c010c07814a777328011664244219531b1a2f811e4a010c4d81557c7f1b0201300704810702013307048230010484050202367807048175020239710804832502016204026b0204814d020363306108048262020265650504817602026667070483150201690704832f0301360a04814d02016c08024702016d0304843e0303cfb2630204814002016e0804837503016203048416030370c2be0304821b02016f0604834b0201700504816b030175070210020273660604841c020676c2bac2b2640604830a02017704027d02017808048141010482700201790504811d0202c2ba0502470206ca8d73ecbab9010483340204f09e9ab504048367010133010c3e04814f82250114812e814b2e0411811305010c811337811e6e010c82085e2b0e5d010c61812054811c01148122451b0781050c813d010c17823762643e011e080c1720814a10364306143b0d33260112810f0c2a810c816b13010a8163810e470201370404811102056176c2aa36050481530202646e0904846202026a730404827402016b0a020c02036f616b0404830a0201750504820d02017605025e0201770a04820702027a73050482460202c2ba0604824a020483330203cba434040483200203cebc790304847e01013401124181442c1d091f81580108601d8336011081320a2b8125820001123b0b81158116811f070110078112817a817308010e6682410d810e2601122d0d6413378147351e01105081021d3525812d01128246510a622204054101105c1b620e81302b05020130020483480104822702013102027e020132030483270201350304844802023770030207020261710604823f0201640802570204830a020265770304831f020168070483210201690204825b02016f020481280704835402057037e18b8d0904810802017304048439030172020481440303c2bd6b0a02630201760202490804815e0201770304816e050483550201780704816902017a070483280207c2b2f093aabc780a0482240301b303024e0301bc0604837a0202cb800204834a0202cebc04048201040484410203d3ad770a04814f0202d5a508048371010135011630817e0f81040d2c041552010c813d3b7e8115010c40692182693a01121f810d810d0a32814701101d1d1f642281742e01068229240110811231810a387f4c01100f50810f8165810d0114811f26443152593c104a010e641e1a3357820e020132030481540201340204815402023778040207020163060481020204815b020164020483010201670a0481540304f0948f870904811a0201690704835502016d0604832203027b01022803017a0a025102016e020484260404816002026f690502680301720104834e02017208024f02021d020475e69c8e0504814c0202767602025f0302de870a04837b020178090483200104835003026a72090484400201790504837204022302017a0104836b0202c2aa0a0481070303bcc2b3090484370203c7866403027501013601087c158303011212814305813e7b0e090118141a1c49713a211e0c74630f010a59826d8113011203328166037781561a01101d7f1d2a1f822533010e820e070f7b40160110811f40292c813226010a2d20824a32010a81418158670201300304840a0201660404817d02016d0804826902016e080626817f0104820b02016f0404825a02017003023b020272750804840c0301760a04844d02017403025b020175080484200202766e060482360303c2ba6c030482220202c2aa0204810e0301b20204835703048421040484240301bc0802410202d2a1010482630204e1b18f3704048354010137010c08816337812f01101382211532424d39010881248123010e7724810267815f011081236029813e273301101b7b29812a5b813b01128150810324814b220b01060c8417010e165b6c81708117010a1782346f6c0201380804816803016b0604840102013908023f0201610204816c0201630302760201640304832604023e0204833902016502048203020266770804821a0201670a04830002026964080484500304f29e9eb70802250201700204811b0201710904832d0201730304826b05048403020174010481000205776a62c2b2050482630201790202260206c2b2eaaeb464050485020301b3080482480303bac7af0a0484550203c695650804822a0202ceb90a0481170202dbae05023f010138011819814a2703390a61090c6912011a21181304812523811b5f164e050114110f35128123423f810c010c817573817c03010e7182590c812b0401142503597e6e0e2f3a3759011252813a811a2b75091a010882162a31010e17450a81048279010858658208020231670a048205020361c2b906023b02016301048236020164050482520201650904833d0201670904811b0201690604825002026a7a0604837c02016e0204832002017002026d0302c789010481020201720504835e0201740604810002017502020a0702630302676306025f0303e4a0a70102640203786a75010484440201790104841402047ac2be72070481340207c2ba3766673576090482790301bd07048142010481600202c7b309027a0401740604823b0202d2bf0304830f0204e989a6300a02600204f4bd91b60702120101390106518369010e19254641823711010c258267288121010e817c810d2b17250110810a578133812f4c0110415681067b288121010e0881208119347101140b8131543c8100343d1101088203813e01100d742e3230820f3802013006048107020134080481440202356501021a02013808048147020162020482230201630304833c030162050483390206656cf093b5bd070484140202677303048502020769e3ad9669c2b90304847402016b0804836402016c0404841f02016d010481250904825202016e06025402016f0704842a0201700a04834a0201720a0483530201750304822e020676f097b18374030482140201780604833d0202c2b9010481550301be070483720204826f0202ca80060481630202d5a80504833f0101610114551047810e130a78660c011a364611206c0b13080705733d5501240f08070c090b0c20813d1471042e4351131e011204412d814f0913104201263036110d060b1f811a301b0f4e1a29092f181c012808071e221a2a81075b320503065a0f140c1a0a26011c07231d0e6f3715063b760c6b091501121111303e3a71566d6d010e0867814d816a0c01181e18240d41724d221b3f384b0201300204830b060483080302c2b30204837f0201310a026e020134050481560301730902690201360406827c0f0201370204825d06020a0201390904815b0304f3bfb2a70a04822c0201610404810e020262660604841a03017608025b02016309088112825f020164090482310201650a0484480302396f01025d02026774010482090302df9b06048321020168070661813303016f03048248020169030483610504814001048401010483460301300704824203016a0204824402016a0504813303021d03017209048412030277380804824502016c0104814c02036ec2ba0804835702016f0204811d030176020238020270360a04840d02017107048201030469ca99690602350201720304812d0104845e02017304022c02017409022b020175050484140302caaf0402410201760404831f020177010484180704845203026f6d04024b02017a030482660202c2aa0204810a0301b30a04817b0302bd76020483780302be6a0302440202c58207025e0202c69901027a0302ad77010483200206c993f099b183070484270203caa1660204841a0204f29788ac0804831d0204f4ba9f950504843c010162011c0e33810216341c2413042130780501184d373e53131f2f052907423e010c830e3781390e011c1320461f81041b811b041e15243d011e241b10816c310b130c3133033b0741011a11816d3139100c13140b395848011c580e411a06304306810a3138330d011a441707092c70140c1643813920010c73653581374f010c826c81210f0402013803024f07048172020161020481650204810c020162030483470301370404813602016307048379020483280201640304815501048176010481060201650104812a0104841f0201660504821f0201670302700201680804846403016b0a04831d0301700904845502016908025c02016a010483560904827602026b6306023402016c0404832602016d040484410204825702016e0504831603027831020482160302c2be0504827d02026f6a05048121020171030232020483220304845402023a0201720204845c0304846e03013607048224020174060484480201780704844303016f0604814f045807070a0707070707080709070709070808090a5c07080708080b07060a06080707070b0a0b0808070b0a0b0a55070b08080a0908080707060709070709070706080c060b07070c0a66070b08080609070607080c0909660b06070707080a0807070b0b0707080a0b07070d0607080c0908630707070b07070a070d060b0707090a07080b080a070809085f0707070c0706080706070809080f06080a5807070607060e070807080907070b070b060c0709090807690808070707070708070608070709070809070a0d0b07070809095b070707070707070c080d07070b06070707070c07080b0808811a0b08060706080a070a07080609070707080808071307070a0708070907060807090b06060707070b0707080708070707080c090a0a81080a0b07070b0f0b0706070707060b07070b07080808110b070707'); -INSERT INTO t1_data VALUES(274877906946,X'00000e880330627a020482240202c2aa0a04833f0301b30704844b0302b9650704824f0301ba0204845f0202c9820a0483640202d194060482300203e19cbd0904844b0203e691b4050483510205e78dadde9b0104821201016301142a6c033b8151085c094601140b813d49313f81110e1c011681163611221527257f5d38011c150f22811a0a3c12350631238117011c3e26420b402c1d81080c40150b2f01181c3143382640273d60132e070118663b1d162a1b0e2e8111393e0110821117310e52811c01141a2f49810181391f2b130112323c0305812a6f2e390201320204842702023334020481340201350202610201360304844603023362040484470203376a360a04826b02013808026203016f0704830d02013902025502016106088170827a0301320a04820403016c0404831502016204048327050484030201630a04814302016401048349020484760302430301640204845d02016504048249020367c2bd07020a0302cebc0902150201680a02500201690204846102016a06024c0301770504842b02016b0104830e0704811803023370040483580301710404845f02016c0504844f0204820d0204837f0302c2aa0104833702016d0104844c02016e0804834f03026c6a07025702046fecbd9a01023a020270330204830a0301740304837c020271350204811e0201720706833b310206736ef09289b70104832b020174070483290301320204827c02017608022802017806025b0302c2b30904835202027978080483040303c9b56f0904846a0202c2aa04048127050482120301b30504813a0301b901024204016a0704840c0202c5820704823c0202c999010482470202cebc0602400203db91670602730202dca7050482760203e1a3950304817e0203e786a702048273010164011a0612105b292b817c1211080d5a01147c1d420b35451c36811a011e0e168117081c0c2e051d474055192d011e02050a1c81180420250f815f300f21011c02316a37143321443a10042d54230112761428810e4750054101101805072b8215294e0116680f0f5381445a3e0b070901224e4a41210c361c281b101c43051325130f01185a1e19108106300f2e3f4538020130060481370202327305022002013405048168020335377a03021802013704027b02013804048260020161030882118101030263650904814d0201620604822502016303048419030135090483240201640602280301380404817e0201650404823c0304f097ac9f010482680201660a0267030566e2b6936f0104821c0201680704813302016b0604832002016c01026a0301610204843202016d0804845e03026d6d03023c020270730a04817f0304c2be797a0804832e020271710504835f02017203021b0201740204825a02017706027202017805048451010683572e03016e0a04814c0201790304811702017a080484450302c2b90204837b0202c2aa0604825b0303b273630104841c0301b9050485040301bc0a027b0303bd37680502670202c98b0204826b0203cfa1740504823a0203d199610202350203e3bf87040483570204f1baaba90504817301016501120b8104392d0d20180f011645213f292e4d0d082f8165011e0b400c07341b2329307f193338173a012055292409050c560a272a0f4403245718011a1c3a183f1c43264c3126060829012081208102043a044d0621650b180e150e011a066c030e513d7d265e1313130c0118171953040457347b114d191901261b1c060c26090d6f0d332a1519096e03101d1d012207342f1f2c7e2517251d0f310d2a17081e02013005020a02013308048247020135030483660201380704841b030132030482180201610102600604825f0304c2bdc2ba02023a02016209021f0201630604813002056663cebc610604841e0201670504816a0104842703037177310604833f03027872080482350201680604825e02016a040483320104840d02016b0304813f02026c6408020602016d070481590104837d03016c08020d02016e060484630301780404815c02016f0104826b0804825e02017008027b0302c2aa0504847c0201710404836d0201730402510302cebc08048338020375c7bf05048344020177010482660304822303026479070481630204786ec2ba0204814f0202796a0a04834f02017a01048407030484660604810603026561020483180204c2b278390504813d0301b304024f04026536010483110302b9330604813e0301be0304840b040484560202c4a702022d0206c6a5f7ada9990402350202c79f090481180202caa60502140204cebcc2bd080483320202db900a0481250205f4b5aa9079040484360204f7b985bd0204835e01016601128101285c096981190e01121f813f0d431a8135530114698102813228492f190a011260161881328101812601188155780d813257050c0b04060114161681340772811b5e25011c4505810e13290b253a0c0c0a1a4b011a3714133e1235812b136b062c0b011a6b591356810c3c240906250b1001148127810d413e0e81090d020231680504822c0302c2bc0204830c0201360104840c0303656a740902110302d1950504824f0201370902130201390a0482530303e0a9ae0904844c0201610804810d0201620a04810c03023039030481330302356902048268020163060483470201650504822d02016706048200030483560302713509025002026a79010484410804825902016c0504822002016e07020d0304843a02016f0204842d0201710304837604048361030482430201720904840f020173020482520804810b02017406068425320201760202420201780804845d02037979650704814802017a080481110301780804812c0202c2bd070484600202cebc0508813082410102770401610202250202ddb40302310205f19e9a937a030482410101670118365558195a0a062d0581260a011881068143330844041f0a1851011a2025141e1081204f550e077521011a193b1f58351912265681220821011812070528472f4e2f407f204a01124d1e1f811b810d7b4d01180f1d3481034a35580a12811f011c2303340d1470150778070c812331011620411939703c032915143f01104281116a3d323c6a0201350604842902013703023f0201390a04816e0201610604826402016202023f0201630a04843302016409048258020165020482620404814702016602061b833f0201670404827d03026369020227020169040483490301670604847602016a0604845d0404840902016c04024601088150822b010482350204830a0301690304844902016d0202710704820002016f03048509020482380304836603026e74010483580201700504817a020171020481080204826a0201720304837b0202410302160302c2b3080484550201730904816e020174040248010484280304834e020275730404836e020176060484720104815c02017709026a020178060275040483790201790504821501026c030170080483770304cfb269710704815102017a0204813303016f090483160202c2aa050481400301ba0804810b0401630804830d0301be0604844d0202c8a30a02110203cba0640204816a0202cebf060482420204e487856e030481080204ec97bd6d080484080205f09eb3a0770502260208f687999931ed878703048424010168011a12460e090c036e151b812e065501161708411982151738471f35011a2d1c0678340c1f04425c21200c010c2a087f255d4a011a0d0b6c33814a212c3a0a401b1e011c501f2381010a0481201c0c6012280118150b5228520e0a036c1c8123011a15810a060408030a81563f381601185b1b06212a143f332a60160e011a221b1e62411d2048090e0b0f5502033072350804826a020131020482530201320304823b0201330104814502033677380204813102026174020483540304dbbf6f620404835d0201620504846a0104831803017103048323020164060483740201660a02410201690102130104821402016a0504823f02016c0404832e02016d09022602026e640a04822702016f070482000301750402670201710304813106020d0202726304048220020173060484530201750504831e020483400302c2bd0704843a020177020483470203786371030483740201790904810002017a0302300202c2b9080483280301bc010481700303bd33720304825e0205c99973c2bd040483160203d5a6330a04842b0204e7b3b3300904813c0205f099a68f72090209010169011c21101d4b2d0e0e066b4253074c140118070a0910447556030833541d01163733816837402b3909122501183c5b1139102e2d430c662334011e27050f21621230323503332b6a0332011e1e07031843202e6e3c2850094d410c01163955220b16812d24521212011681250b0a3505460481176f011a2c09266b162968051c0a1481170116022e1e820c352037263a070201310104825d0602110303696869020484070201320204826e0201610304832203016f040232020162050642843f03048336020484540301370804833c03026c6105027a0203636165090483120201640502770304833e03017107022f0301780702470201650a04811e020167080238020168040481160102230404826b030170040483000201690304836f0302766c0304811402016b0504812e02026c6108027702016d0308827d81530604837302026e790904842602016f06048208020170060481680302320201710204812902017307048255020274320104822a02017506026803016e0a04821303017207025f0302c2b90504834a020177020483130201780604836b0402210301320604847302017a010483130202c2aa0804823c0301b9030482600301ba0104845304016c0504837e0202c3b8080484600204cf9d6379020483660202d3860704812e0203e3a4be0402560203e58784010481210204f09e95ac0102580204f5aea5890a023301016a01123428131a1f6c81445601141e227c1a7b5f1918810301182318812e17455605460d811c011a28820221311a6e12093f050a0c0120082c0b0f1362074457460c3b070d5132011c2143052a20133d160a358117591f01103136813b136e6247011c100e4c28060d16815a320a3e11070124462c03582e262d45110804113326040808070807080809090b81050708060708090607060907070b070e07070807060706070b08070f0807070709080708080c070706060808090c07060708080708080909811307070708060709080707070607070a060b070706070707080a080607060c070707080809070608080908090a812406070707070a0906070b0b0908070b07070b0607070b0608070608090b080a080f080a0608080b070b08070a080b0a810408080708080607090707080807070b070c070a070f070b080607090707080d06070b810b070607070607070b08070707070b14070a0f08070b0d08070e080b060a0a070a0707080707070709080a0a0a0e810f0907070709080a0b0707060a0707060807060a08070b08070907060807090b090a0a81140a09070706100707090a060607060e07070807070d08070a07070806070608070a070708070707080a0808090909'); INSERT INTO t1_data VALUES(274877906947,X'00080e7f073c23110a1a18392f66090524183704276d6703306a320404824e030164080483520305c2bd7ac2bc0604815a0201360704833202016106021f020482400201630304822a0708817e8204030173040483500201640404824803016d0804824a03017709023002016606048367020268680a04815802016902088339811804027f0302656e0704834e0303d5a5370604816702036a3366090484470303c2ba660904826e02016b0904837c02016c07021403026c610604835802016d0204816802016e0104831202016f0104822f020270720602060201710704822202017206048174020273690204824602017409020c020175090482140201760a0482720301660404824403016a090484290201790404845703025d0203c2bc33040484620301bd0304824c020484540202c78607022403019a010482380202ca87070484390202d39d030485050203e184940404831b0203e6a881060483480203e8b18c0a04816d0203ee8d850104814801016b0110467257393c81272c011a053e815d3b190517064524521f011c3823590a8115372004313b1f3216011a5a20780b102d0804426916112c011a182f810781082d12137026161501221a180516811611051c131207811515173501180320112581062e05621c1407011c2d0e0617811522062208065a21520114582841621e6c203f1e2001161647411a272533815b1c2602013009048309020232630104835a0301720104817f0201330604836f0302ddb5080482560202347a07048102020135020483460104827b02043678ca800a04835f0201370404814b0104846002016103048246010482220301700204833f0201620404824d060481150201650304824f02016606088110834c0201670604821d0303c2be66010481790201680404843b030176050482270201690a04830e02016e0904844202016f040481010301630304822f020270640204822f03016e0704845802027177090482710206736ec2b2796a0104832e0306dab1d485377004048304020174050481700201750a0212020378627604048164030173080483190201790704833d0204823a02017a0506820e67030178070484530202c2b2060481500104823f020483030301b3020484310301bc04027e0402caaf0a026a0301be040482590204842e0202540202c79f0804824d0202cfa30804815a0204f29a92970204823301016c01140f63351a0a653b650d22011c09117a3e1538123537046a15043101141310082f49052f772b0c011c11121781583c2a5010133228241301287f3e0a2b1244080503060a100f413b4f0d070e2a01103e4e1f04814e7b1601183d0404052877111f230f811d01123a100f053e5c076910011a031732102381243d1b1727507301180e5d273e810803812e0f192a02013301048271070481330204821d020134080263020135060481280201610104830a0201640604826d020165060483050201670204841c0504841c0304841b0201680a04845602016a0104811c01024f030481080204813102016b0204837008024502016d0404836c04068207780301670704842302016f0404821203016d040484490301720404837e0201700104821d03048407030165050483050201720a04811602017307023502017407020503016f080484240302c2b90504821b020175090484090201770a088119822503026d6905048300020178030482680604812a0201790104830c0204833a0303d9a06806022002017a060482600203c2aa33030481560301b904020a0301bd0504820f0202d0b90904817c0202d3820202200202daa9080482030203de966e02024b0202df9d080484350204f098b0a20604845e01016d01220304456608322258060a031d4c38340f090112310c070e4238626e6601124a318109030513812f0118240d561e533742188113101b01160b24444b224d44814d4806011c05774e483410330d23541b28090401141f29062581131e221b6d011e81053a037a03320b0e4c24360d2310011a0e321d3c141825111d54637a1c0114093d3c2e58571a35293a0201350104840a03017701048330020136060217020138030483370201610a0482650201620504815e0201630704827701048201020164080483690201660804846703016904048113020167070483080201680504837d05022c0302cdb10a04815f0201690104833a0404824302026a360a04823b02016b0a04813502016d0504831a0204833803021a02026e360404825e02016f080484140201720304844b0404816603056ff09d899b0304823f020275390204816e0301780a04824202017604088308812703027902027770050482040201790104827e040482750204812902017a060483030304c2bdc2b30104836f0203c2aa62040484040301b903021c0302bd6b090484300301be0704814d0202c99402025603049a65656b090484020202ca92090482060203d19a730504844a0203d49f690804836e0202dfa8020482710204f09180860704822901016e011a0c0b8104243647521f43231f36011a2e1b33432c3d0b414905054d17011010573a6c0a816c1801160e063582340a5239050b06011a4481063d1b67250f2044200839012044591d1857291214135814101a1b361d011225067e8147111a4a4301166b13362e17195f3812186f01141c465b032b290406373301182a152a2281300f8107054e3f02023274080481770305c2bacf8168020481450201340604832f0201350704842e02013605020e0201380404841d0201610404810d020483750201620304812b020484230301610804834503026c6a0304816d0201630102380305613577337405048359020165040482720201660904826202066736f094b0af0a0482250201680104811f02016b0304847202016c0404822403016f0904822c0302c2b301025002016d0504817b01023f02016e020483090802040303e7bda10804832d02036f6b740404811402017005048419020484220202716506026303026b760904830a020172080482430304706c73620504825f02017308048413020174080481070201760104827f0204836e020477e7b89a0104840e02017a030483700206c2aa35657065050482740301b30804842b04046cc2be78090481040301b903020d0301bc010484260904813f0203c7a5620302330203c99f36050481010301a30704815b0202ca8b090483250202cdbb0604820a0202cebc0102170401380304842b0207eca2a6f29c87950904824001016f01221d17052b58101241060e3a201f1021633a0114816919811c142443100801280426080e2620042a812c531a490e121707131710011273432e493347811a340112195f671f46721c325e0118380c052b812822478107600b0116021c21821b2019263433040126021b05351b2a286b05181f071b5628111a330a012014533e073d0c0e5469141d1e2734050901220318051b44412803632e0642370e0a3a2b020131070481770201320a04812f0202346e04022b020136030483590304f09a81b60404834702016105048210020162030205030167010268020163010481540604820202048300020264310902420201650804834b02016703048247030365c6b602048205030573f098b890030481450201690204832802016a0a04826703016c0104825e02016b0604815e03016c03048334030677c2aa74c2bc09023d02016c0304823903027777060484540303df866c0104815b02016d0204811f0303796f7704020302016e0204814d07024a02016f0902680201700604840a0104831a0204835a020172070484440201730a023703026b660604830e030278790304815b0201750904822402017704025b020178030482350307f4b2a3896a343407026b0201790902720302633409020402017a020484590302dea004025f0202c2b20204816b050481200202de900402160204ee85a5770204822c0101700114143a0d391a60812d4e09011a2f313104201c372c3a3411321b011a268140144226334145050d1c4d01164e081f20671f088107237901186b123c1f6d07261e2b732e210116511116342a3d32376e083001106882257a0a17141101163039192b0c05812d735f3b01262a3e0841030b17181411051e0a18530e272b6d01182f322b260e24581d5381050f02013104048353020132050482370301690204843e02013302020d0201340104841f0201350a048139020137050482770201380204833a02016105025a0504832f0201620304836d020163050484100304832003016c030481290201640304837e020482490304822b010482290201660a04827002046964dc960204833102016c0704814502016d010482000201700104817e02037176760904821f020473eb91a708048152020174090482770201750404831e01063a825d03017a0904826a020276730a0254020177080260030277630104815c020178070481220202020301720804841202017a0204834c0202c2aa040484010301ba080482580202c6a3020481320203cdbf690502790202ce90070483140301bc030481470205d1a371cebc060481590203d2976a0404830c0203dfba6e0604814b01017101163732393b8120422f054b0e010e030b211d815d1c01165641757c080d81311d090e0112816581542d2313054301224e07121706516606080e39102d231c4b39010a2d81402d5e011a132527428114080d6e1111721c011a814a1a341538251023100d1c4c011e22182622623712411e38162a182d3b01142b67611981470f1f1f250201310a04824e0202336207048217020238730204815a0303cf886d06025a02033962620404833803016f0a04814e020161060483140302726d050483450301790904810c02036376690504811c02016403024d020165010483280802550301650904827f0304ebb8b561070482340201670302670301660804810e020168040482340201690704844d0302616404020f02016a030481060301700704827802016b070481240104814b0302c2bd0504816102016c0604837f03017a0404837902036dc2b90804810002016e0904821602016f0304812c03016401024b02017103048233060483600303e5848e04023a020172040481050305f3978aa06c070481151708070b070a0d0707070607080c080909090706080707070707070806070707070a090b070708080909090981140708070708080b0a0b0b070b070907090707070707070807080c0c070609070b0807100706070e08080a81110f06070707070f07120a0c070707070b0707060607080709080b0b080709060708070808080a810f0707060707070b070707070a080b08070e08070b0b08070c080f070a09060807070a080909080a810b080b070706070b0b0708060b07070c07070707070a0a09090b0708070a07070b0a070c070a060b080907080807070d8123070707070a0706060f070707090b07070707070b07080907080a060f070608080706070c060707070c070a810f07070706070707070a070b0713070a070707090a070c070706080a07070807080808070b0909810607080808090707080709060a070a060707070707070b080707090707060b0807'); INSERT INTO t1_data VALUES(274877906948,X'00000e8a0330717304048359030134050481100203756371070482190201760704817b0301770804821d0201770204844d0201780204836c0404826103017504026a0202c2aa020482420301b305022c040267390602570301b9080481540301bc0102290206c99cf6b5aa80080481430202cebc02026e02048120010172011a1c2f15158108048125463f251d010811811539011412423105812181171549011847284a30234e5b33042632120118351e8113817d0f2b220d111901264f104a211004061d0a2a0b35121a0a2118341f011c81160a1b030d2a0610243e445f0c011c6f0c1e3b1768141e322717500b140110537f810169811625011a492847203e210f532c16480627020135020481780302caae0104811a020261330904846c0201620804812d0201630404814a0201650704837503017301048276020168050213020169010484540604842b0302796f0504833302016a0304831b02016b0604701302016d0604815302016e070483630204815202027071020481520201710104835b02037266700704843002027362040481490201740904817d020175020481040304f59e9c9407048218020176060484060204776dc2aa020483070201780504812503016601048159020279790704840502027a730904826c030178030482740202c2aa04023d010483540301bd06026b0203cab877010483290202cdbf060482410202cebc04025c0401690204827f04016c0904840e0202cf880104835d0203dfbe6c01025b0204f0aeb7b2030481680205f1a7b5bb390504826a010173011a22810d12415003071f81181839011a3220221511546d810012052b57011a0c4274300d154e81111f041e10011e293f4213051b2276560817312811170120092136122418370e4e782b3912080f3201262b3b340f222b0c09142a0822116a135c1c130c0114320c4e385a0d0415075f01163543340f06362381133c0c012224180981742048191d110e180e180d310f011a20632450281f043027114b034e0203316a61030482160201360104826e0201380a0484570201610104844d0504814a0201620904820f020164010481630304810403027761040258020165080481550302c9b70204827a0204677367690102660201680104831b020169020483760301720a04814502016a04020c02016b04023405023c030269630504840502016c08048463020170030482670202716d080482000201720304835c03016507024d020174030482520504821503016505048207020175040208030137060481710201760a04833d0201780404832302027a6f080481200202c2ba0204812c0301bd0404821e0205c3a66865730702540202c7890804816f0205cebcc7af730504815c0202d2930202540202db89020481160203e8b8a00304825d0205f0958db331070481620305989b8569780a04813d0204f69299a5020210010174011a7b3829100a4e511f1a281c17140114812626032c372634234c01140a520e815a810815200501123e4f3531042d57615b011a041f3e64070f1f1913274a20770114811d0f5d743e0634161c01162c2782130c1b810520280d01164a513110480b402b810d13011e522d08042c1146137012201e810512011a290903182c05301e5d811944290201390a04836d0301610104836e02016208027803036cc2bc07048261020263640a0481480301730704813602016408022b020165050484310301720702260301780402500203666d73070484470201670104825b02016a040481590304836702016c0304835601025e02016d060484110301340204813702016f04021802017006068336280201710304813507025902017201026508022903016d0604812f0201740304827e03016a010482440201750404834b02017604021a010484150504836a02057773c2b2380602520201780204823e0302cebc080484040303d2956403048171020179020484240204813c0202c2b303048307020484410301be0204816f030484250203c798680104843d03019c030482570204e19ea86a020482350302a0950a0482280204e5a4bc780304810e0101750126090a35030a03220a1731630f31252f0c4b1e31011e39200e3715282a03103b56090f6b1501121d4916246e6d460d6501162609380406361e816d203f011a22166008124f58202e182025150114390f3a25713f0e3f715c011a5a11191123466025710c313312011e3c191326811c1444055f1f5109051201143b106f1181000d068155012043381381020d81080d0603171824260a0201300404844a02013207024203026b390204833803066eeebabb35660604842102013308020302023f02013503048243020436716b66040481440201380a04843d020261690704836002016203048209030484670201630a04841d020264790104822b0201650502340302c2bd060484300201670202620201680704810d020169010484430104843402026b67040481540306eea3ad77c2aa0a04836702016c020258040482270104830a02016d0404824102036e716a0604843e02016f0104832a04020c0204836803036530650a04817402017005025b0301630604843d020171070275020273720404827002017504048133010484120301370102270201760104814f03026203016d0904844802017701048375080481220301660202330301700a04827f020378c2b2030484710202796806023c0301730704813d02017a0404815f0104817202048407010483390202c2b204026802048254040266640204831e0301b3090482230302b963060482010401750a0483290306ba35f2999dac09020f0203ca926e090483350203cdb4780302350202cebc0a020f0204d7a7696d010483100206f097bc996d71040481480101760114185b2258291610821c0e01160272173107154f5b813722011a81020c200e1826250d39811f07011a7911152a2a45131504422c81070120050d3f5b23342e3e4139032a3813042d0116592d1c15630c0c0a814649011c1a362f5c4a35511f0804033e372b01102981262a352e8205010e0b4b6282388106011e26810a2d125f361a12170d1721311e0201300204832b0201320104811b050483790201350204832202016404024c0202657a090483710202666307022703016c07048362030277750604842f0201670304843d0104844c0203686f7a07027202016a0102430204847502046ce0a2b20704810302016d0204827b02026e330804826c02016f0104835502017102027606026c020172060483490202736505048371020174030484130302387602025d02017502048345020376346b0904825c0201770904814b0303c2bd720804812b020178040483600201790804816402017a08026e0202c2b90704811c0301bd0504821e0204cdbcc7a108021a0202cfb8080481490204f09f96a50204842101017701180207232d37812d0c045c4a0a011a06163b3408171c52213a26592201206d08581605811a171e0c0a1347104914011282181324082b73320f01122f6e811d2c3d410a44011e551414206a092f133f333d150a3e0f011e235b170e37060627471b13373b3e27011a0e1e816b270c10102d53381045011a060e1e254d044932651234691e011a158138300a04810c0a8121071802013003027707048214020131080481370201320a04835502013304048271020538ceb369650802310201390a0481440202616c0502570302c7a104026c0201620804811c02016301048168020241020164040481560104820b020165080484460102400202686c060232020269670604827302016a0108810c826e0704824e02026b6c0604816902016c0304831c02016d0304811903016407022b02016e01022e0604845a030237780702040301710804815102016f0104842903017a0704826902017003048445020482080202713002024e0201720804822003016202022302017303048111010482790204812d03067479c39f66700a0210020174020229020175090484430301690604820402017606020202017a01021b0104843a0202c2b90604842b0301bd030484570202c69b0504815d0202c8a30a02240202ceb8090484690301bc0404832f0202df85020481230101780116812b0a16810e4b045a3b2a01205b1305811134092f62072343100f0f05011e5734152612030b4c4134123009361601121781653207780a6a0d01164a25210824138107738139011481341f088158060c8133010e5920193a4c2331011a0510358101231a1b3609702732011a2f07631610033436810256174c011a1342040a58110721378139101602033067750802720201320604834303017109048244020133090481700203366f79010483520201390404810902016301048411050483420201640a048432020265310704832b02026774040481000302d5b2010259020369756c0504832902016c07048365030233700904824102016d010482670404834c0504830c02016e0104831604048120030169090481750303eba6990104835f02016f0604834c030379c2ba0a0481560201700404815e06048256020174040482370201760a04820d0201770604811c02017a0404812e06020b0202c2b9030481660301bd0a04816c0301be030483620206c99b6d7777750304835b0206ceb0646b66610402490202d8bf030482250206e8bfbc626964080482510204f09c9a9e0404830f010179010e2081335661371c01220e4e2718124f0d0649812b0b0a063b040b011402741d1235810805211a011409161d732b8106325f6a01182e330325068107703728302b011e3723081c0d0a3f810c183e061b067f0106834a12011a044030185a1e810704220a0541011245602b0e421441817801144b03811a1a29614e224b02013003048139020132050485050304f09caba6010482230201350104822708048413020138020483430201610402230201620902440108812181070301300304815e020263710404831002016403048226010483660604823c020165010484510201670104816407048418030334c2b20a0481120201690404814c02016a010481500904810f02036b75610304836402016d01027902016f0304817b03056f6373cebc0a0483010201700104817d020171050482680104843b0302383203048128040807090707070b0608060707060c0b810e07080807070707060b080707070b0807090807070a070a07070808070b06090807070708080a0b81230907070b070b0707080907070706090807070807060b07060707070808070a080b0708090b0b09810a0707060908070607060609070b0a070706080a09070707070e0a0708090b0c0b09070a080a811a0706080c09070a07080b0708060806070b080c0e07090e09060706080b060a070b0607090707130b080708070b0908070a0c810d070b0706080707080b080a0a070807090708070707090709070706080709080a81170a0707070a070707070a0b0a07080d080707060a070707070b0707060f0b060707060a08070807080708810d0807070709070b070808070907080f0b070907090b0707070a0807070c0b080c0a810107070a0b07060c07080f070b09070b0906070b070b'); INSERT INTO t1_data VALUES(274877906949,X'00000e5c033079720404826c0404833002021b03026f6b020482100201740904842e010484150303c2b36f0204840e0201760704826b02017708048273020578f48ba5b50a0481400201790904845902017a050483280304c2bac2bd0404841e0203c2aa680904843a0301bc070481100301be0304820304016a050481630202ca8103027b0202cb860604840d0204ceb56e370204832e0302bc740202520202cf8d07027e0205d8ae39c2aa0104813c0204e887b3770404816a0204f1bfb0970204832f01017a0118101e282f07045961813a193e011e69162f0d2b051c060f084460063053011e06810c20330d0733815c220515220c011210290a7e07810c3a18011a1f2f064a19155212472781047c010a123e45825501166c6062182718167131092f0112331b812c0b6e81470b01184f3a230d45261e271c36111701140a8128456b291248391a0201300104840d0204812502013401048318020137070483070201380904823b03016704026402013907023403016e060481380201610204812f0404824504045243020162050481150104824003016a0804827a020163070484590204817903017a02048209020164020484200203653077030482610302737601048424020166070481730201670304841e0504840e03016b0504825302016901048235010484400404841002016a010483680404845f02046bc2b97407023302026c7a0704812d02027161070209030378cebc0a021f020172040483750404815f0201730304813c030131020482040201740704824d0302793901027502017604048423020177040620813103048313020179030484540204833c04022503017a0904826302017a050483530203c2b2660404840f0307ba6272f397bd92010483070301be020482760202c6b9050482410203c9a3650604812c0202cbae020482180202d38c0704812f0204f096adb7070483490204f6a69c8b030484390102c2aa010c815e81147969010c811c827f0417010e81077a4c03815f010a2e8100820c010a810d148140010c0a7481201a13010c0b831525323d010a8206358129010c21637a33812701083c8340390301300504820a0304326939770404841c03016107048334030165030484150302696c0704810103016c0704812a03027178090481620301730504830e0301750504846303017802021c0301790304836803017a0604834e040135030482650302cdbf010483330304ceb23169070484090303e4849a0504817d0201b201088219744601082210832c010629846001121c8137182211816232010a81668122690108817281080110816a433581102f3e010e7481001b3481190106814e1b010e3646823a810e070301310504840b030161010481720301660604822f0301680804821003016a0704844803016b0104845b03016c0504823403026f700802340301710804826004026d3106024a03017509068458070301770a04836c04016b01021f0301790a0482790302c2b20a02270401b3040482130303ceae6e010484330302df9e040482100201b3010881656e750106820b50010e81434a27048153010a812b068122010843810c0401048211010a50817d812e010c811a8163810801061e832c010c811f81418101030163030481210301640504831102048104030482440301680a048202040271300404841303066b6576cebc6a0104843603026f7a08026d0303756a7808023e030176040483610301770704814403027973020484360302c2b906020c0401bc010484490401bd0804835c0302d38c0304846f0304eaae96750a0484020201b9010c3b824018322d010e0468315a5c817901067d583401060b810e010865118169010c0e07826b813a01066f8265010c3b8239633852010e61161e7030821b01068241210301350804843d0301620302480301690402610403e7a1910704812103016d080269030673c2bac2b36c090483070301760504821d02048454030278350702620302c2ba010483320304ca897a750804813b0401b5070482350303cebc6d050483120302d199020483020402ad660604840c0302d2a1010482550302daa30202340201ba01026d01085119826a010c2f5e82008110010882028221010481090108821b812d01068128460208810b8264010c810971812d440301310304820e0301320504825e05048221030333756d010482720301340502700301640604813d03016607020c0301690204823a03016b0104812203016c0704842103016d0304835f03016e0a04813c03016f0304834d030173060482660401710604810f0301740104843c0303d48174040482690201bc010482690108813c83290114090f816045242e148111010a810616822701048456010e2c81167a638115010a3a3b83204001067a8367010c8114127a2265010a824f7f5c230301300604833e0302616f020483560301640104827d090481280301650502150301660804841a03016804048221030169030481120301720104827803017606023d04016e020481700302c2aa0904836c0401ba090484100201bd01088240224f010843813674020a81185068620106822a44010e14154a8101825e010c19814a1b826c010c81221f81651b010a815a4d812c01082d7281160301300804843c030231300304836703013509048353030365c2bc0304814d0302667504027a0301670604831503016c09025d03016d03048178030172020483620301730104816b0402c2b30404823203017401027204016a08048451030175090237040177090481690301770302190301780a04823d03027a350a0481260302c2aa060481620401b3090481090304f098a1a30a04825d0201be010a0d730b816f010c821d81236c2b010c6f8208358119010681236801087d4e831e0108823c8235010667794b010683165e010e05812e3d3c820d010c81148123817403043531c2ba0204824c03083875c2bceba7957a03020803016302020804036dcebc0304814e0301640304843704033379790504823203016b08023603016d070481090304825804013707027a03016f080484530301710a04842203017809025e0304c2aa6135010481460401b90a04836b0401bc0704814f0306c6ab66e3afb9080482660103c39f350304821f0201b0050481110301680602030201b8040481310604810403016c0a024803017108024b0201be06048323030482650301300504812d0304f48990ae060482740103c491680a0483700301760a0481290201a7010484090202b177020482380201b3030482280102c5800602090301360104826a02018202048173040483780301370104814102018b090482730201930504810f0102c680020483790404830c020183020228020185030484400704820b02028d7a0a04821c0201920404835e02019507048437010484070301760204836102019a020214080483100303d795680504820602019e030481050201a30404820a05048436030177070484290201a50704843b0201a8060482690304816f03027735020483630202ab79050484430201b6060481050201b904024003016d070481130201bd090484350201bf06048361030163020481580102c781040484560201830604833a020286650404842a0301750a04821a020189070483370303e0b9b30904836602018c0104816c04020602021d030171050484570201960a04842d03016b0204821903016c04021e03016e0204845002019a0a04844402019d0704835103013103021702039f77750704827e0201a1090483340201a301022b0404831d0304616363750502520302cfb2040483720201a50a0481530202ad760902310201af0104811d0908816281390301640404843c0201b3080483240201bb030483480201bd040484290304840a0203bf646a0404843f03017a040482450102c89d05020f0201a1090483550201a3060483550201ab0904813203017a04024a0201ad030482210201b10204824302025d0303656577030483600202b46e010482240201b6090484410204bce39f9d020484570102c9870604813202018b0804843603017a06027102018d080481130204840c02018f0104836309026202029164020484430201930504816d0201970304843402039c6c73070483700201a0070483470201a203048262030364d7960a0484010201a80304832a060484390202a96a080481170303c69532090481440201ac040214020481180202ae770604832c0201b0090482300201b40804845a0201b5070482110302716f010482160201ba0504837b0201bb02048149030165050483250201bc07088105833002067f824d0302d19f0402330201bd04024d03017a0904814f0102ca800a048321020181040481580201820604821e020383693707048417020287630704840002018a0904834c0104835c02018b06022f02018c0704814702018d0704810a0104831a020490e7b38308048423020191020482510201920304832f030135030483710201950304833207026802019902026b02019b0904832a02019d09024c02019f0802210201a4010484080202a5710a0481680203a777670804831e0301790902100201ae0402700104834e0201af040484650202b673080484190201b705048221010483520201b808048465030167010484400104cb81cab90104834802018602048317020689777568cf920804842602018b0404817b020191050484200202a0720304833e0201a3040625827f0201a4030485080102cdb1070881408205020483700201b30404840e0202b735080482640201b80504815a0201b90a0482370206bb31cebc6c730304842f030133070483010103ce80370304826d0201900202770201ae0504844e0201b1050484080201b20904815f0201b3040483430201b40702600201b50804823b0201b90304846e0302c9a1070483300201ba0404844d0104847e0201bb050482780201bc010882378223010e811105814d8134010a8142823f2d0106811c52010a6e1814816b02088168824b010a438137812d011019060c6b812f811c010a81314e811b03033366660404825503013502048263030238620904820703016303048120040f080b0907070b07070a0907070707080a07070b0a0a81060b0707070606070f0b070b07070908070b070f0b090807080b07070707070c0e0707090d07080908080a0a50070a0707080708070706070707080a094d07070707070707070707080706070707090844070f07080c0708070708070707080a4707060609060c0b07080a07090808080737070b09060706070707070707070707094807080b0607070707060708074107080709070706070707080607060706070808070a460a0d06090709060b060707060a07070c0907060b06060b070a090707080707070b0707070c060b08070b070a09070b07070b08080706070707070807080707090d070707060707070609070a090807070d0707070b09070707070706070a0908070a0807060b0a080707090707090b08090a08070707080707070e07060708070709080b06070b0a0707070a06070606070809060a07080b07070a070c07070808070e070807070c07090607070707060707080b0743090708'); @@ -6976,7 +6975,7 @@ COMMIT; do_catchsql_test 51.1 { SELECT max(rowid)==0 FROM t1('e*'); -} {1 {database disk image is malformed}} +} {/.*fts5: corrupt.*/} #-------------------------------------------------------------------------- reset_db @@ -8752,7 +8751,7 @@ do_test 60.0 { do_catchsql_test 60.2 { SELECT (matchinfo(t1,591)) FROM t1 WHERE t1 MATCH 'e*eŸ' -} {1 {database disk image is malformed}} +} {/.*fts5: corrupt.*/} #------------------------------------------------------------------------- do_test 61.0 { @@ -8958,7 +8957,6 @@ do_catchsql_test 61.2 { SELECT * FROM t3 ORDER BY rowid; } {/*malformed database schema*/} -breakpoint #------------------------------------------------------------------------- do_test 62.0 { sqlite3 db {} @@ -9774,7 +9772,7 @@ do_test 66.0 { do_catchsql_test 66.1 { INSERT INTO t1(t1) VALUES('integrity-check'); -} {1 {database disk image is malformed}} +} {/.*fts5: corrupt.*/} #------------------------------------------------------------------------- # @@ -10108,7 +10106,7 @@ do_test 68.0 { do_catchsql_test 68.1 { PRAGMA reverse_unordered_selects=ON; INSERT INTO t1(t1) SELECT x FROM t2; -} {1 {database disk image is malformed}} +} {1 {fts5: corruption on page 1, segment 1, table "t1"}} #------------------------------------------------------------------------- reset_db @@ -10324,7 +10322,7 @@ do_test 69.0 { do_catchsql_test 69.2 { SELECT * FROM t1 WHERE a MATCH 'fx*' -} {1 {database disk image is malformed}} +} {/.*fts5: corrupt.*/} #------------------------------------------------------------------------- reset_db @@ -10507,7 +10505,7 @@ do_test 71.0 { do_catchsql_test 71.2 { INSERT INTO t1(t1) VALUES('integrity-check'); -} {1 {database disk image is malformed}} +} {/.*fts5: corrupt.*/} #------------------------------------------------------------------------- reset_db @@ -10634,9 +10632,9 @@ do_catchsql_test 72.1 { INSERT INTO ttt(ttt) VALUES('integrity-check'); } {1 {database disk image is malformed}} -do_catchsql_test 72.1 { +do_catchsql_test 72.2 { SELECT 1 FROM ttt('e* NOT ee*'); -} {1 {database disk image is malformed}} +} {/.*fts5: corrupt.*/} #------------------------------------------------------------------------- reset_db @@ -10762,12 +10760,13 @@ do_test 73.0 { do_catchsql_test 73.1 { SELECT snippet(ttt,ttt, NOT 54 ), * FROM ttt('e* NOT ee*e* NOT ee* NOT ee*e* NOT e*') ; -} {1 {database disk image is malformed}} +} {/.*fts5: corrupt.*/} #------------------------------------------------------------------------- reset_db do_test 74.0 { sqlite3 db {} + sqlite3_fts5_register_matchinfo db db deserialize [decode_hexdb { | size 106496 pagesize 4096 filename x.db | page 1 offset 0 @@ -14587,14 +14586,19 @@ do_test 74.0 { | end x.db }]} {} -do_catchsql_test 74.1 { - SELECT rowid, quote(matchinfo(t1,'p�xyb... +| 2416: 17 01 05 05 34 74 61 62 6c 03 02 03 01 04 77 68 ....4tabl.....wh +| 2432: 65 72 03 02 06 09 1b 8c 80 80 80 80 0f 03 00 3c er.............< +| 2448: 00 00 00 16 05 34 66 74 73 34 03 02 02 01 04 6e .....4fts4.....n +| 2464: 75 6d 62 03 07 01 04 09 1b 8c 80 80 80 80 0e 03 umb............. +| 2480: 00 3c 00 00 00 16 04 33 74 68 65 03 06 01 01 04 .<.....3the..... +| 2496: 01 03 77 68 65 03 02 04 04 0a 1b 8c 80 80 80 80 ..whe........... +| 2512: 0d 03 00 3c 00 00 00 16 04 33 6e 75 6d 03 06 01 ...<.....3num... +| 2528: 01 05 01 03 74 61 62 03 02 03 04 0a 19 8c 80 80 ....tab......... +| 2544: 80 80 0c 03 00 38 00 00 00 14 03 32 77 68 03 02 .....8.....2wh.. +| 2560: 04 00 04 33 66 74 73 03 02 02 04 07 18 8c 80 80 ...3fts......... +| 2576: 80 80 0b 03 00 36 00 00 00 13 03 32 74 61 03 02 .....6.....2ta.. +| 2592: 03 02 01 68 03 06 01 01 04 04 07 1b 8c 80 80 80 ...h............ +| 2608: 80 0a 03 00 3c 00 00 00 16 03 32 6e 75 03 06 01 ....<.....2nu... +| 2624: 01 05 01 02 6f 66 03 06 01 01 06 04 09 19 8c 80 ....of.......... +| 2640: 80 80 80 09 03 00 38 00 00 00 14 03 32 66 74 03 ......8.....2ft. +| 2656: 02 02 01 02 69 73 02 06 01 01 03 04 07 18 8c 80 ....is.......... +| 2672: 80 80 22 08 03 00 36 00 00 00 13 02 31 74 03 08 ......6.....1t.. +| 2688: 03 01 01 04 01 01 77 03 02 04 04 09 1a 8c 80 80 ......w......... +| 2704: 80 80 07 03 00 3a 00 00 00 15 02 31 6e 03 08 01 .....:.....1n... +| 2720: 01 02 05 01 01 6f 03 06 01 01 06 04 09 18 8c 80 .....o.......... +| 2736: 80 80 80 06 03 00 36 00 00 00 13 04 02 31 66 03 ......6......1f. +| 2752: 02 02 01 01 69 03 06 01 01 03 05 06 1c 8c 80 80 ....i........... +| 2768: 80 80 05 03 00 3e 00 00 00 17 04 30 74 68 65 03 .....>.....0the. +| 2784: 06 01 01 04 01 05 77 68 65 72 65 03 02 04 0a 15 ......where..... +| 2800: 8c 80 80 80 80 04 03 00 30 00 00 00 11 01 01 06 ........0....... +| 2816: 06 30 74 61 62 6c 65 03 01 f3 07 1c 8c 80 80 80 .0table......... +| 2832: 80 03 03 00 3e 00 00 00 17 07 30 6e 75 6d 62 65 ....>.....0numbe +| 2848: 72 03 06 01 01 05 01 02 6f 66 03 06 04 0d 13 8c r.......of...... +| 2864: 80 80 80 80 02 03 00 2c 00 00 00 0f 01 01 03 02 .......,........ +| 2880: 30 6e 03 06 01 01 02 07 1b 8c 80 80 80 80 01 03 0n.............. +| 2896: 00 3c 00 00 00 16 08 30 66 74 73 34 61 75 78 03 .<.....0fts4aux. +| 2912: 02 02 01 02 69 73 03 06 04 0c 00 00 00 14 2a 00 ....is........*. +| 2928: 00 00 01 01 02 24 00 02 01 01 12 02 01 12 08 88 .....$.......... +| 2944: 80 80 80 80 12 03 00 16 00 00 00 05 02 1c 88 80 ................ +| 2960: 80 80 80 11 03 00 3e 00 00 00 17 05 34 72 6f 77 ......>.....4row +| 2976: 73 02 06 01 01 05 01 04 74 68 65 72 02 02 04 0b s.......ther.... +| 2992: 15 88 80 80 80 80 10 03 00 30 00 00 00 11 02 01 .........0...... +| 3008: 01 07 05 34 62 65 74 77 02 02 04 08 1b 88 80 80 ...4betw........ +| 3024: 80 80 0f 03 00 3c 00 00 00 16 04 04 33 72 6f 77 .....<......3row +| 3040: 02 06 01 01 05 01 03 74 68 65 02 08 05 0a 1b 88 .......the...... +| 3056: 80 80 80 80 0e 03 00 3c 00 00 00 16 01 01 02 04 .......<........ +| 3072: 33 61 72 65 02 02 b3 01 03 62 65 74 02 02 07 08 3are.....bet.... +| 3088: 1b 88 80 80 80 80 0d 03 00 3c 00 00 00 16 03 32 .........<.....2 +| 3104: 74 68 02 08 02 01 01 07 00 04 33 61 6e 64 02 06 th........3and.. +| 3120: 04 0a 1b 88 80 80 80 80 0c 03 00 3c 00 00 00 16 ...........<.... +| 3136: 03 32 69 6e 02 06 01 01 06 01 02 72 6f 02 06 01 .2in.......ro... +| 3152: 01 05 04 09 18 88 80 80 80 80 0b 03 00 36 00 0f .............6.. +| 3168: f0 13 02 03 32 61 72 02 02 03 01 02 62 65 02 02 ....2ar.....be.. +| 3184: 03 05 07 1b 88 80 80 80 80 0a 03 00 3c dd 00 00 ............<... +| 3200: 18 c2 31 74 02 08 02 01 01 07 00 03 32 61 6e 02 ..1t........2an. +| 3216: 06 01 01 04 09 19 88 80 80 80 80 09 03 00 38 00 ..............8. +| 3232: 00 00 14 02 31 6e 02 06 01 01 03 01 01 72 02 06 ....1n.......r.. +| 3248: 01 01 05 04 08 17 88 80 80 80 80 08 03 00 34 00 ..............4. +| 3264: 00 00 12 02 31 62 02 02 04 01 01 69 02 06 01 01 ....1b.....i.... +| 3280: 06 04 06 19 88 80 90 80 80 07 03 00 38 00 00 00 ............8... +| 3296: 14 04 02 31 32 02 02 05 01 01 61 02 08 03 01 01 ...12.....a..... +| 3312: 02 05 06 1b 88 80 80 80 80 06 03 00 3c 00 00 00 ............<... +| 3328: 16 06 30 74 68 65 72 65 02 02 02 00 02 31 31 02 ..0there.....11. +| 3344: 06 01 01 04 0a 15 88 80 80 80 80 05 03 00 30 00 ..............0. +| 3360: 00 00 11 01 01 05 04 30 74 68 65 02 06 01 01 07 .......0the..... +| 3376: 07 1c 88 80 80 80 80 04 03 00 3e 00 00 00 17 01 ..........>..... +| 3392: 01 06 02 30 6e 02 06 01 01 03 01 04 72 6f 77 73 ...0n.......rows +| 3408: 02 06 07 08 1b 88 80 80 80 80 03 03 00 3c 00 00 .............<.. +| 3424: 00 16 08 30 62 65 74 77 65 65 6e 02 02 04 01 02 ...0between..... +| 3440: 69 6e 02 06 04 0c 1a 88 80 80 80 80 02 03 00 3a in.............: +| 3456: 00 00 00 15 04 30 61 6e 64 02 06 01 01 02 02 02 .....0and....... +| 3472: 72 65 02 02 03 04 0a 17 88 80 80 80 80 01 03 00 re.............. +| 3488: 34 00 00 0c 52 02 30 31 02 06 01 01 04 01 01 32 4...R.01.......2 +| 3504: 02 02 05 04 08 08 84 80 80 80 80 12 03 00 16 00 ................ +| 3520: 00 00 05 04 1b 84 80 80 80 80 11 03 00 3c 00 00 .............<.. +| 3536: 00 16 05 34 74 61 62 6c 01 06 00 f1 05 02 03 65 ...4tabl.......e +| 3552: 72 6d 01 02 04 0b 1b 84 80 80 80 80 10 03 00 3c rm.............< +| 3568: 00 00 00 16 05 34 65 61 63 68 01 02 03 01 04 70 .....4each.....p +| 3584: 72 65 73 01 02 05 04 09 1a 84 80 80 80 80 0f 03 res............. +| 3600: 00 3a 00 00 00 15 04 33 74 65 72 01 02 04 02 02 .:.....3ter..... +| 3616: 68 65 01 06 01 01 03 04 08 1b 84 80 80 80 80 0e he.............. +| 3632: 03 00 3c 00 00 00 16 04 33 70 72 65 01 02 05 01 ..<.....3pre.... +| 3648: 03 74 61 62 01 06 01 01 05 04 08 1a 84 80 80 80 .tab............ +| 3664: 80 0d 03 00 3a 00 00 00 15 04 33 66 6f 72 01 02 ....:.....3for.. +| 3680: 02 02 02 74 73 01 06 01 01 04 04 08 1b 84 80 80 ...ts........... +| 3696: 80 80 0c 03 00 3c 00 00 00 16 03 32 74 68 01 06 .....<.....2th.. +| 3712: 01 01 03 00 04 33 65 61 63 01 02 03 04 09 18 74 .....3eac......t +| 3728: 80 80 80 80 0b 03 00 36 00 00 00 13 03 32 74 61 .......6.....2ta +| 3744: 01 06 01 01 05 02 01 65 01 02 04 04 09 19 84 80 .......e........ +| 3760: 80 80 80 0a 03 00 38 00 00 00 14 03 32 69 6e 01 ......8.....2in. +| 3776: 06 01 01 02 11 02 70 62 01 02 05 04 09 18 84 80 ......pb........ +| 3792: 80 80 80 09 03 00 36 00 00 00 13 03 32 66 6f 01 ......6.....2fo. +| 3808: 02 02 02 01 74 01 06 01 01 04 04 07 1b 84 80 80 ....t........... +| 3824: 80 80 08 03 00 3c 0d c0 00 16 12 31 74 01 0a 04 .....<.....1t... +| 3840: 01 01 03 04 00 03 32 65 61 01 02 03 04 0a 17 84 ......2ea....... +| 3856: 80 80 80 80 07 03 00 34 00 00 00 12 02 31 69 01 .......4.....1i. +| 3872: 06 01 01 02 01 01 70 01 02 05 04 08 18 84 80 80 ......p......... +| 3888: 80 80 06 03 00 36 00 00 00 13 02 31 65 01 02 03 .....6.....1e... +| 3904: 01 01 66 01 08 02 5b 01 04 04 06 1b 84 80 80 80 ..f...[......... +| 3920: 80 05 03 00 3c 00 00 00 16 05 30 74 65 72 6d 01 ....<.....0term. +| 3936: 02 04 02 02 68 65 01 06 01 01 03 04 09 14 84 80 ....he.......... +| 3952: 80 80 80 04 03 00 2e 00 00 00 10 06 30 74 61 62 ............0tab +| 3968: 6c 65 01 06 01 01 05 04 15 84 80 80 80 80 03 03 le.............. +| 3984: 00 30 00 00 00 11 01 f8 30 70 72 65 73 65 6e 74 .0......0present +| 4000: 01 02 05 05 1b 84 80 80 80 80 02 03 00 3c 00 00 .............<.. +| 4016: 00 16 04 30 66 74 73 01 06 01 01 04 01 02 69 6e ...0fts.......in +| 4032: 01 06 01 01 04 0a 1a 84 80 80 80 80 01 03 00 3a ...............: +| 4048: 00 00 00 15 05 30 65 61 63 68 01 02 03 01 03 66 .....0each.....f +| 4064: 6f 72 01 02 02 04 09 06 01 03 00 12 03 0b 0f 00 or.............. +| 4080: 00 08 8c 80 80 80 80 11 03 00 16 00 00 00 05 04 ................ +| page 3 offset 8192 +| 0: 0a 00 00 00 32 0e 4f 00 0f fa 0f f1 0f e9 0f e1 ....2.O......... +| 16: 0f d8 0f d1 0f c9 0f c1 0f b9 0f b1 0f a9 0f a0 ................ +| 32: 0f 98 0f 90 0f 87 0f 80 0f 78 0f 71 0f 68 0f 5f .........x.q.h._ +| 48: 0f 56 0f 4d 0f 41 0f 38 0f 2f 0f 26 0f 1d 0f 13 .V.M.A.8./.&.... +| 64: 0f 0a 0f 01 0e f7 0e ee 0e e6 0e dd 0e d6 0e cd ................ +| 80: 0e c3 0e ba 0e b0 0e a8 0e 9f 0e 96 0e 00 00 00 ................ +| 3648: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 08 ................ +| 3664: 04 01 10 01 03 34 74 20 07 04 02 4e 01 03 34 1e .....4t ...N..4. +| 3680: 09 04 01 12 01 03 33 74 68 1c 08 04 01 10 01 03 ......3th....... +| 3696: 33 6e 1a 08 04 01 10 01 03 32 77 18 08 04 01 10 3n.......2w..... +| 3712: 01 03 32 74 16 08 04 01 10 01 03 32 6e 14 07 04 ..2t.......2n... +| 3728: 01 0e 01 03 32 12 08 04 01 10 01 03 31 74 10 08 ....2.......1t.. +| 3744: 04 01 10 01 03 31 6e 0e 07 04 01 0e 01 03 31 0c .....1n.......1. +| 3760: 09 04 01 12 01 03 30 74 68 0a 08 04 01 10 01 03 ......0th....... +| 3776: 30 74 08 19 04 01 12 01 03 30 6e 75 06 08 04 01 0t.......0nu.... +| 3792: 10 01 03 30 6e 04 06 04 01 0c 01 03 02 08 04 01 ...0n........... +| 3808: 10 01 02 34 72 22 07 04 01 0e 01 02 34 20 08 04 ...4r.......4 .. +| 3824: 01 10 01 02 33 72 1e 09 04 01 12 01 02 33 61 72 ....3r.......3ar +| 3840: 1c 08 04 01 10 01 02 32 74 1a 08 04 01 10 01 02 .......2t....... +| 3856: 32 69 18 09 04 01 12 01 02 32 60 82 16 08 04 01 2i.......2`..... +| 3872: 10 01 02 31 74 14 08 04 01 10 01 02 31 6e 12 08 ...1t.......1n.. +| 3888: 04 01 10 01 02 31 62 10 08 04 01 10 01 02 31 32 .....1b.......12 +| 3904: 0e 0b 04 01 16 01 02 30 74 68 65 72 0c 08 04 01 .......0ther.... +| 3920: 10 01 02 30 74 0a 08 04 01 10 01 02 30 6e 08 08 ...0t.......0n.. +| 3936: 04 01 10 01 02 30 62 06 08 04 01 10 01 02 30 61 .....0b.......0a +| 3952: 04 06 04 01 0c 01 02 02 07 04 09 10 01 34 74 22 .............4t. +| 3968: 06 04 09 0e 01 34 20 08 04 09 12 01 33 74 65 1e .....4 .....3te. +| 3984: 07 04 09 10 01 33 70 1c 07 04 09 10 01 33 66 1a .....3p......3f. +| 4000: 08 04 09 12 01 32 74 68 18 07 04 09 10 01 32 74 .....2th......2t +| 4016: 16 07 04 09 10 01 32 69 14 07 04 09 10 01 32 66 ......2i......2f +| 4032: 12 07 04 09 10 01 31 74 10 07 04 09 10 01 31 69 ......1t......1i +| 4048: 0e 06 04 09 0e 01 31 0c 08 04 09 12 01 30 74 65 ......1......0te +| 4064: 0a 07 04 09 10 01 30 74 08 07 04 09 10 01 30 70 ......0t......0p +| 4080: 06 08 04 09 12 01 30 66 74 04 05 04 09 0c 01 02 ......0ft....... +| page 4 offset 12288 +| 0: 0d 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ +| 4064: 00 00 00 00 00 00 00 00 00 00 00 05 03 03 00 10 ................ +| 4080: 03 05 05 02 03 00 10 04 06 05 01 03 00 10 04 04 ................ +| page 5 offset 16384 +| 0: 0a 00 00 00 02 0f eb 00 0f eb 0f f4 00 00 00 00 ................ +| 4064: 00 00 00 00 00 00 00 00 00 00 00 08 03 15 01 70 ...............p +| 4080: 67 83 7a 18 0b 03 1b 01 76 65 72 73 69 6f 6e 04 g.z.....version. +| page 6 offset 20480 +| 0: 0d 00 00 00 03 0f f2 00 0f fc 0f 00 00 00 00 00 ................ +| 4080: 00 00 03 03 02 01 03 03 02 02 01 02 02 01 02 09 ................ +| end crash-c4a4c5492615bd.db +}]} {} + + +do_catchsql_test 83.1 { + SELECT * FROM t1('R*R*R*R*R*R*R*R*') WHERE (a,b)<=(current_date,0 BETWEEN 'a'<>11 AND '') ORDER BY rowid DESC; +} {/.*fts5: corruption found/} + +#------------------------------------------------------------------------- +reset_db +do_test 84.0 { + sqlite3 db {} + db deserialize [decode_hexdb { +.open --hexdb +| size 53248 pagesize 4096 filename c1a.txt.db +| page 1 offset 0 +| 0: 53 51 4c 69 74 65 20 66 6f 72 6d 61 74 20 33 00 SQLite format 3. +| 16: 10 00 01 01 00 40 20 20 00 00 00 00 00 00 00 0d .....@ ........ +| 32: 00 00 00 02 00 00 00 01 00 00 00 09 00 00 00 04 ................ +| 48: 00 00 00 00 00 00 00 00 00 00 00 01 00 00 00 00 ................ +| 96: 00 00 00 00 0d 0f c7 00 07 0d 92 00 0f 8d 0f 36 ...............6 +| 112: 0e cb 0e 6b 0e 0e 0d b6 0d 92 0d 92 00 00 00 00 ...k............ +| 3472: 00 00 22 08 06 17 11 11 01 31 74 61 62 6c 65 74 .........1tablet +| 3488: 32 74 32 0d 43 52 45 41 54 45 20 54 41 42 4c 45 2t2.CREATE TABLE +| 3504: 20 74 32 28 78 29 56 07 06 17 1f 1f 01 7d 74 61 t2(x)V.......ta +| 3520: 62 6c 65 74 31 5f 63 6f 6e 66 69 67 74 31 5f 63 blet1_configt1_c +| 3536: 6f 6e 66 69 67 07 43 52 45 41 54 45 20 54 41 42 onfig.CREATE TAB +| 3552: 4c 45 20 27 74 31 5f 63 6f 6e 66 69 67 27 28 6b LE 't1_config'(k +| 3568: 20 50 52 49 4d 41 52 59 20 4b 45 59 2c 20 76 29 PRIMARY KEY, v) +| 3584: 20 57 49 54 48 4f 55 54 20 52 4f 57 49 44 5b 06 WITHOUT ROWID[. +| 3600: 07 17 21 21 01 81 01 74 61 62 6c 65 74 31 5f 64 ..!!...tablet1_d +| 3616: 6f 63 73 69 7a 65 74 31 5f 64 6f 63 73 69 7a 65 ocsizet1_docsize +| 3632: 06 43 52 45 41 54 45 20 54 41 42 4c 45 20 27 74 .CREATE TABLE 't +| 3648: 31 5f 64 6f 63 73 69 7a 65 27 28 69 64 20 49 4e 1_docsize'(id IN +| 3664: 54 45 47 45 52 20 50 52 49 4d 41 52 59 20 4b 45 TEGER PRIMARY KE +| 3680: 59 2c 20 73 7a 20 42 4c 4f 42 29 5e 05 07 17 21 Y, sz BLOB)^...! +| 3696: 21 01 81 07 74 61 62 6c 65 74 31 5f 63 6f 6e 74 !...tablet1_cont +| 3712: 65 6e 74 74 31 5f 63 6f 6e 74 65 6e 74 05 43 52 entt1_content.CR +| 3728: 45 41 54 45 20 54 41 42 4c 45 20 27 74 31 5f 63 EATE TABLE 't1_c +| 3744: 6f 6e 74 65 6e 74 27 28 69 64 20 49 4e 54 45 47 ontent'(id INTEG +| 3760: 45 52 20 50 52 49 4d 41 52 59 20 4b 45 59 2c 20 ER PRIMARY KEY, +| 3776: 63 30 2c 20 63 31 2c 20 63 32 29 69 04 07 17 19 c0, c1, c2)i.... +| 3792: 19 01 81 2d 74 61 62 6c 65 74 31 5f 69 64 78 74 ...-tablet1_idxt +| 3808: 31 5f 69 64 78 04 43 52 45 41 54 45 20 54 41 42 1_idx.CREATE TAB +| 3824: 4c 45 20 27 74 31 5f 69 64 78 27 28 73 65 67 69 LE 't1_idx'(segi +| 3840: 64 2c 20 74 65 72 6d 2c 20 70 67 6e 6f 2c 20 50 d, term, pgno, P +| 3856: 52 49 4d 41 52 59 20 4b 45 59 28 73 65 67 69 64 RIMARY KEY(segid +| 3872: 2c 20 74 65 72 6d 29 29 20 57 49 54 48 4f 55 54 , term)) WITHOUT +| 3888: 20 52 4f 57 49 44 55 03 07 17 1b 1b 01 81 01 74 ROWIDU........t +| 3904: 61 62 6c 65 74 31 5f 64 61 74 61 74 31 5f 64 61 ablet1_datat1_da +| 3920: 74 61 03 43 52 45 41 54 45 20 54 41 42 4c 45 20 ta.CREATE TABLE +| 3936: 27 74 31 5f 64 61 74 61 27 28 69 64 20 49 4e 54 't1_data'(id INT +| 3952: 45 47 45 52 20 50 52 49 4d 41 52 59 20 4b 45 59 EGER PRIMARY KEY +| 3968: 2c 20 62 6c 6f 63 6b 20 42 4c 4f 42 29 38 02 06 , block BLOB)8.. +| 3984: 17 11 11 08 5f 74 61 62 6c 65 74 31 74 31 43 52 ...._tablet1t1CR +| 4000: 45 41 54 45 20 56 49 52 54 55 41 4c 20 54 41 42 EATE VIRTUAL TAB +| 4016: 4c 45 20 74 31 20 55 53 49 4e 47 20 66 74 73 35 LE t1 USING fts5 +| 4032: 28 61 2c 62 2c 63 29 00 00 00 39 00 00 00 00 00 (a,b,c)...9..... +| page 3 offset 8192 +| 0: 05 00 00 00 02 0f f1 00 00 00 00 0c 0f fb 0f f1 ................ +| 4064: 00 00 0b 01 03 00 1c 81 3a 84 5e 81 3a 81 3a 0a ........:.^.:.:. +| 4080: 0a 00 00 00 0b 84 80 80 80 80 01 00 00 00 0a 0a ................ +| page 4 offset 12288 +| 0: 0a 00 00 00 01 0f fa 00 0f fa 00 00 00 00 00 00 ................ +| 4080: 00 00 00 00 00 00 00 00 00 00 05 04 09 0c 01 02 ................ +| page 7 offset 24576 +| 0: 0a 00 00 00 01 0f f4 00 0f f4 00 00 00 00 00 00 ................ +| 4080: 00 00 00 00 0b 03 1b 01 76 65 72 73 69 6f 6e 04 ........version. +| page 10 offset 36864 +| 0: 0d 00 00 00 02 0f e2 00 0f e2 0f ef 00 00 00 00 ................ +| 4064: 00 00 0b 01 03 00 1c 81 3a 84 5e 81 3a 81 3a 0f ........:.^.:.:. +| 4080: 0a 03 00 24 00 00 00 00 01 01 02 00 01 01 01 09 ...$............ +| page 11 offset 40960 +| 0: 0d 00 00 00 01 00 22 00 00 22 00 00 00 00 00 00 ................ +| 32: 00 00 9f 56 84 80 80 80 80 01 04 00 bf 30 00 00 ...V.........0.. +| 48: 0f 58 02 30 30 19 02 05 01 02 05 01 02 05 16 02 .X.00........... +| 64: 05 01 02 05 01 02 05 61 02 05 01 02 05 01 02 05 .......a........ +| 80: 13 02 05 01 02 05 01 02 05 0d 02 03 01 02 03 01 ................ +| 96: 02 03 02 09 78 66 66 66 66 66 66 66 65 81 17 02 ....xfffffffe... +| 112: 05 01 02 05 01 02 05 01 01 31 04 02 04 01 02 04 .........1...... +| 128: 01 02 04 01 02 05 01 02 05 01 02 05 0d 02 06 01 ................ +| 144: 02 06 01 02 06 02 01 30 79 02 04 01 02 04 01 02 .......0y....... +| 160: 04 03 02 30 30 2b 02 05 01 02 05 01 02 05 58 02 ...00+........X. +| 176: 05 01 02 05 01 02 05 01 02 05 01 02 05 01 02 05 ................ +| 192: 16 02 05 01 02 05 01 02 05 05 06 30 30 30 30 30 ...........00000 +| 208: 30 81 0b 02 04 01 02 04 01 02 04 10 02 05 01 02 0............... +| 224: 05 01 02 05 03 02 32 34 76 02 05 01 02 05 01 02 ......24v....... +| 240: 05 02 01 38 07 02 04 01 02 04 01 02 04 01 01 32 ...8...........2 +| 256: 28 02 04 01 02 04 01 02 04 04 02 05 01 02 05 01 (............... +| 272: 02 05 02 01 30 1f 02 05 01 02 05 01 02 05 03 02 ....0........... +| 288: 30 30 10 02 05 01 02 05 01 02 05 6a 02 04 01 02 00.........j.... +| 304: 04 01 02 04 02 08 35 30 30 30 30 30 30 30 81 26 ......50000000.& +| 320: 02 05 01 02 05 01 02 05 01 01 33 07 02 06 01 02 ..........3..... +| 336: 06 01 02 06 81 2c 02 04 01 02 04 01 02 04 02 04 .....,.......... +| 352: 32 37 36 36 81 23 02 05 01 02 05 01 02 05 01 01 2766.#.......... +| 368: 34 13 02 05 01 02 05 01 02 05 02 03 30 39 36 1c 4...........096. +| 384: 02 05 01 02 05 01 02 05 07 02 05 01 02 05 01 02 ................ +| 400: 05 01 03 35 30 30 7f 02 05 01 02 05 01 02 05 04 ...500.......... +| 416: 02 30 30 81 0e 02 06 01 02 06 01 02 06 06 03 30 .00............0 +| 432: 30 30 81 11 02 04 01 02 04 01 02 04 01 05 36 35 00............65 +| 448: 35 33 36 81 1a 02 05 01 02 05 01 02 05 01 04 38 536............8 +| 464: 31 39 32 81 02 02 06 01 02 06 01 02 06 01 05 61 192............a +| 480: 6c 6c 6f 77 01 02 02 01 02 02 01 02 02 02 02 72 llow...........r +| 496: 67 81 08 02 04 01 02 04 01 02 04 02 05 74 6f 6d g............tom +| 512: 69 63 04 02 02 01 02 02 01 02 02 03 06 74 61 63 ic...........tac +| 528: 68 65 64 79 02 03 01 02 03 01 02 03 02 0d 75 74 hedy..........ut +| 544: 6f 63 68 65 63 6b 70 6f 69 6e 74 2b 02 04 01 02 ocheckpoint+.... +| 560: 04 01 02 04 05 06 76 61 63 75 75 6d 0d 02 03 01 ......vacuum.... +| 576: 02 03 01 02 03 01 06 62 69 6e 61 72 79 03 06 01 .......binary... +| 592: 02 02 03 06 01 02 02 03 06 01 02 02 03 06 01 02 ................ +| 608: 02 03 06 01 02 02 03 06 01 02 02 03 06 01 02 02 ................ +| 624: 03 06 01 02 02 03 06 01 02 02 03 06 01 02 02 03 ................ +| 640: 06 01 02 02 03 06 01 02 02 03 06 01 02 02 03 06 ................ +| 656: 01 02 02 03 06 01 02 02 03 06 01 02 02 03 06 01 ................ +| 672: 02 02 03 06 01 02 02 03 06 01 02 02 03 06 01 02 ................ +| 688: 02 03 06 01 02 02 03 06 01 02 02 03 06 01 02 02 ................ +| 704: 03 06 01 02 02 03 06 01 02 02 03 06 01 02 02 03 ................ +| 720: 06 01 02 02 03 06 01 02 02 03 06 01 02 02 03 06 ................ +| 736: 01 02 02 03 06 01 02 02 03 06 01 02 02 03 06 01 ................ +| 752: 02 02 03 06 01 02 02 03 06 01 02 02 03 06 01 02 ................ +| 768: 02 03 06 01 02 02 03 06 01 02 02 03 06 01 02 02 ................ +| 784: 03 06 01 02 02 03 06 01 02 02 03 06 01 02 02 03 ................ +| 800: 06 01 02 02 03 06 01 02 02 03 06 01 02 02 03 06 ................ +| 816: 01 02 02 03 06 01 02 02 03 06 01 02 02 03 06 01 ................ +| 832: 02 02 03 06 01 02 02 03 06 01 02 02 03 06 01 02 ................ +| 848: 02 03 06 01 02 02 03 06 01 02 02 03 06 01 02 02 ................ +| 864: 03 06 01 02 02 03 06 01 02 02 03 06 01 02 02 03 ................ +| 880: 06 01 02 02 03 06 01 02 02 03 06 01 02 02 03 06 ................ +| 896: 01 02 02 02 07 79 74 65 63 6f 64 65 37 02 03 01 .....ytecode7... +| 912: 02 03 01 02 03 01 05 63 61 63 68 65 10 02 03 01 .......cache.... +| 928: 02 03 01 01 03 02 04 6c 61 6e 67 07 02 03 01 02 .......lang..... +| 944: 03 01 02 03 02 05 6f 6c 75 6d 6e 7c 02 03 01 02 ......olumn|.... +| 960: 03 01 02 03 03 06 6d 6d 65 6e 74 73 43 02 04 01 ......mmentsC... +| 976: 02 04 01 02 04 04 05 70 69 6c 65 72 07 02 02 01 .......piler.... +| 992: 02 02 01 02 02 05 04 6f 75 6e 64 7f 02 03 01 02 .......ound..... +| 1008: 03 01 02 03 03 03 75 6e 74 81 17 02 04 01 02 04 ......unt....... +| 1024: 01 02 04 02 05 75 72 73 6f 72 3a 02 03 01 02 03 .....ursor:..... +| 1040: 01 02 03 01 06 64 62 70 61 67 65 3d 02 03 01 02 .....dbpage=.... +| 1056: 03 01 02 03 03 04 73 74 61 74 40 02 03 01 02 03 ......stat@..... +| 1072: 01 02 03 02 04 65 62 75 67 0a 02 02 01 02 02 01 .....ebug....... +| 1088: 02 02 03 05 66 61 75 6c 74 0d 02 02 01 02 02 01 ....fault....... +| 1104: 02 02 01 02 02 01 02 02 01 02 02 01 02 02 01 02 ................ +| 1120: 02 01 02 02 01 02 02 01 02 02 01 02 02 01 02 02 ................ +| 1136: 01 02 02 01 02 02 01 02 02 01 02 02 01 02 02 01 ................ +| 1152: 02 02 01 02 02 01 02 02 01 02 02 01 02 02 01 02 ................ +| 1168: 02 01 02 02 01 02 02 01 02 02 01 02 02 01 02 02 ................ +| 1184: 01 02 02 01 02 02 01 02 02 01 02 02 01 02 02 01 ................ +| 1200: 02 02 01 02 02 01 02 02 01 02 02 01 02 02 4f 02 ..............O. +| 1216: 03 01 02 03 01 02 03 03 03 70 74 68 81 05 02 04 .........pth.... +| 1232: 01 02 04 01 02 04 19 02 04 01 02 04 01 02 04 02 ................ +| 1248: 05 69 72 65 63 74 34 02 02 01 02 02 01 02 02 01 .irect4......... +| 1264: 06 65 6e 61 62 6c 65 37 02 02 01 02 02 01 02 02 .enable7........ +| 1280: 01 02 02 01 02 02 01 02 02 01 02 02 01 02 02 01 ................ +| 1296: 02 02 01 02 02 01 02 02 01 02 02 01 02 02 01 02 ................ +| 1312: 02 01 02 02 01 02 02 01 02 02 01 02 02 01 02 02 ................ +| 1328: 01 02 02 01 02 02 01 02 02 01 02 02 01 02 02 01 ................ +| 1344: 02 02 01 02 02 01 02 02 01 02 02 01 02 02 01 02 ................ +| 1360: 02 01 02 02 01 02 02 01 02 02 01 02 02 01 02 02 ................ +| 1376: 01 02 02 01 02 02 01 02 02 01 02 02 01 02 02 01 ................ +| 1392: 02 02 01 02 02 01 02 02 01 02 02 01 02 02 01 02 ................ +| 1408: 02 01 02 02 01 02 02 01 02 02 01 02 02 01 02 02 ................ +| 1424: 01 02 02 01 02 02 01 02 02 01 02 02 01 02 02 01 ................ +| 1440: 02 02 01 02 02 01 02 02 01 02 02 01 02 02 01 02 ................ +| 1456: 02 01 02 02 02 06 78 70 6c 61 69 6e 43 02 03 01 ......xplainC... +| 1472: 02 03 01 02 03 04 01 72 81 05 02 03 01 02 03 01 .......r........ +| 1488: 02 03 03 07 74 65 6e 73 69 6f 6e 81 2f 02 04 01 ....tension./... +| 1504: 02 04 01 02 04 01 04 66 69 6c 65 13 02 03 01 02 .......file..... +| 1520: 03 01 02 03 02 05 6f 72 6d 61 74 13 02 04 01 02 ......ormat..... +| 1536: 04 01 02 04 02 03 74 73 33 46 02 03 01 02 03 01 ......ts3F...... +| 1552: 02 03 01 02 03 01 02 03 01 02 03 04 01 34 4c 02 .............4L. +| 1568: 03 01 02 03 01 02 03 04 01 35 4f 02 03 01 02 03 .........5O..... +| 1584: 01 02 03 02 03 75 6e 63 5e 02 05 01 02 05 01 02 .....unc^....... +| 1600: 05 05 04 74 69 6f 6e 73 02 05 01 02 05 01 02 05 ...tions........ +| 1616: 13 02 03 01 02 03 01 02 03 09 01 73 55 02 04 01 ...........sU... +| 1632: 02 04 01 02 04 01 07 67 65 6f 70 6f 6c 79 52 02 .......geopolyR. +| 1648: 03 01 02 03 01 02 03 01 05 68 69 6e 74 73 3a 02 .........hints:. +| 1664: 04 01 02 04 01 02 04 02 03 6f 6f 6b 61 02 04 01 .........ooka... +| 1680: 02 04 01 02 04 01 02 69 6e 01 02 04 01 02 04 01 .......in....... +| 1696: 02 04 03 04 69 74 73 7a 1f 02 04 01 02 04 01 02 ....itsz........ +| 1712: 04 03 08 74 72 69 6e 73 69 63 73 04 02 03 01 02 ...trinsics..... +| 1728: 03 01 02 03 01 07 6a 6f 75 72 6e 61 6c 16 02 03 ......journal... +| 1744: 01 02 03 01 02 03 01 06 6c 65 6e 67 74 68 81 0b ........length.. +| 1760: 02 03 01 02 03 01 02 03 01 02 05 01 02 05 01 02 ................ +| 1776: 05 0d 02 04 01 02 04 01 02 04 02 03 69 6b 65 81 ............ike. +| 1792: 0e 02 03 01 02 03 01 02 03 03 03 6d 69 74 16 02 ...........mit.. +| 1808: 05 01 02 05 01 02 05 5e 02 04 01 02 04 01 02 04 .......^........ +| 1824: 02 03 6f 61 64 81 2f 02 03 01 02 03 01 02 03 01 ..oad./......... +| 1840: 06 6d 61 6c 6c 6f 63 76 02 02 01 02 02 01 02 02 .mallocv........ +| 1856: 3a 02 03 01 02 03 01 02 03 03 02 74 68 55 02 03 :..........thU.. +| 1872: 01 02 03 01 02 03 03 01 78 79 02 02 01 02 02 01 ........xy...... +| 1888: 02 02 01 02 02 01 02 02 01 02 02 01 02 02 01 02 ................ +| 1904: 02 01 02 02 01 02 02 01 02 02 01 02 02 01 02 02 ................ +| 1920: 01 02 02 01 02 02 01 02 02 01 02 02 01 02 02 01 ................ +| 1936: 02 02 01 02 02 01 02 02 01 02 02 01 02 02 01 02 ................ +| 1952: 02 01 02 02 01 02 02 01 02 02 01 02 02 01 02 02 ................ +| 1968: 01 02 02 01 02 02 01 02 02 01 02 02 01 02 02 01 ................ +| 1984: 02 02 01 02 02 01 02 02 01 02 02 01 02 02 01 02 ................ +| 2000: 02 01 02 02 01 02 02 01 02 02 01 02 02 01 02 02 ................ +| 2016: 01 02 02 01 02 02 01 02 02 01 02 02 01 02 02 01 ................ +| 2032: 02 02 02 05 65 6d 6f 72 79 81 11 02 03 01 02 03 ....emory....... +| 2048: 01 02 03 04 04 73 79 73 35 58 02 03 01 02 03 01 .....sys5X...... +| 2064: 02 03 02 03 6d 61 70 19 02 03 01 02 03 01 02 03 ....map......... +| 2080: 79 02 03 01 02 03 01 02 03 02 04 75 74 65 78 81 y..........utex. +| 2096: 2c 02 02 01 02 02 01 02 02 01 06 6e 6f 63 61 73 ,..........nocas +| 2112: 65 02 06 01 02 02 03 06 01 02 02 03 06 01 02 02 e............... +| 2128: 03 06 01 02 02 03 06 01 02 02 03 06 01 02 02 03 ................ +| 2144: 06 01 02 02 03 06 01 02 02 03 06 01 02 02 03 06 ................ +| 2160: 01 02 02 03 06 01 02 02 03 06 01 02 02 03 06 01 ................ +| 2176: 02 02 03 06 01 02 02 03 06 01 02 02 03 06 01 02 ................ +| 2192: 02 03 06 01 02 02 03 06 01 02 02 03 06 01 02 02 ................ +| 2208: 03 06 01 02 02 03 06 01 02 02 03 06 01 02 02 03 ................ +| 2224: 06 01 02 02 03 06 01 02 02 03 06 01 02 02 03 06 ................ +| 2240: 01 02 02 03 06 01 02 02 03 06 01 02 02 03 06 01 ................ +| 2256: 02 02 03 06 01 02 02 03 06 01 02 02 03 06 01 02 ................ +| 2272: 02 03 06 01 02 02 03 06 01 02 02 03 06 01 02 02 ................ +| 2288: 03 06 01 02 02 03 06 01 02 02 03 06 01 02 02 03 ................ +| 2304: 06 01 02 02 03 06 01 02 02 03 06 01 02 02 03 06 ................ +| 2320: 01 02 02 03 06 01 02 02 03 06 01 02 02 03 06 01 ................ +| 2336: 02 02 03 06 01 02 02 03 06 01 02 02 03 06 01 02 ................ +| 2352: 02 03 06 01 02 02 03 06 01 02 02 03 06 01 02 02 ................ +| 2368: 03 06 01 02 02 03 06 01 02 02 03 06 01 02 02 03 ................ +| 2384: 06 01 02 02 03 06 01 02 02 03 06 01 02 02 03 06 ................ +| 2400: 01 02 02 03 06 01 02 02 03 06 01 02 02 03 06 01 ................ +| 2416: 02 02 03 06 01 02 02 03 07 72 6d 61 6c 69 7a 65 .........rmalize +| 2432: 5b 02 03 01 02 03 01 02 03 02 05 75 6d 62 65 72 [..........umber +| 2448: 81 23 02 04 01 02 04 01 02 04 01 06 6f 66 66 73 .#..........offs +| 2464: 65 74 5e 02 03 01 02 03 01 02 03 02 03 6d 69 74 et^..........mit +| 2480: 81 2c 02 03 01 02 03 01 02 03 01 02 02 01 02 02 .,.............. +| 2496: 01 02 02 02 01 70 81 26 02 04 01 02 04 01 02 04 .....p.&........ +| 2512: 02 07 76 65 72 66 6c 6f 77 34 02 03 01 02 03 01 ..verflow4...... +| 2528: 02 03 01 04 70 61 67 65 1c 02 03 01 02 03 01 02 ....page........ +| 2544: 03 64 02 04 01 02 04 01 02 04 13 02 03 01 02 03 .d.............. +| 2560: 01 02 03 01 02 03 01 02 03 01 02 03 03 09 72 65 ..............re +| 2576: 6e 74 68 65 73 69 73 49 02 04 01 02 04 01 02 04 nthesisI........ +| 2592: 03 05 74 74 65 72 6e 81 0e 02 04 01 02 04 01 02 ..ttern......... +| 2608: 04 02 05 63 61 63 68 65 1f 02 03 01 02 03 01 02 ...cache........ +| 2624: 03 02 08 72 65 75 70 64 61 74 65 61 02 03 01 02 ...reupdatea.... +| 2640: 03 01 02 03 01 04 72 65 61 64 34 02 04 01 02 04 ......read4..... +| 2656: 01 02 04 03 07 63 75 72 73 69 76 65 22 02 03 01 .....cursive.... +| 2672: 02 03 01 02 03 02 04 6f 77 69 64 01 02 03 01 02 .......owid..... +| 2688: 03 01 02 03 02 04 74 72 65 65 64 02 03 01 02 03 ......treed..... +| 2704: 01 02 03 04 02 69 6d 01 06 01 02 02 03 06 01 02 .....im......... +| 2720: 02 03 06 01 02 02 03 06 01 02 02 03 06 01 02 02 ................ +| 2736: 03 06 01 02 02 03 06 01 02 02 03 06 01 02 02 03 ................ +| 2752: 06 01 02 02 03 06 01 02 02 03 06 01 02 02 03 06 ................ +| 2768: 01 02 02 03 06 01 02 02 03 06 01 02 02 03 06 01 ................ +| 2784: 02 02 03 06 01 02 02 03 06 01 02 02 03 06 01 02 ................ +| 2800: 02 03 06 01 02 02 03 06 01 02 02 03 06 01 02 02 ................ +| 2816: 03 06 01 02 02 03 06 01 02 02 03 06 01 02 02 03 ................ +| 2832: 06 01 02 02 03 06 01 02 02 03 06 01 02 02 03 06 ................ +| 2848: 01 02 02 03 06 01 02 02 03 06 01 02 02 03 06 01 ................ +| 2864: 02 02 03 06 01 02 02 03 06 01 02 02 03 06 01 02 ................ +| 2880: 02 03 06 01 02 02 03 06 01 02 02 03 06 01 02 02 ................ +| 2896: 03 06 01 02 02 03 06 01 02 02 03 06 01 02 02 03 ................ +| 2912: 06 01 02 02 03 06 01 02 02 03 06 01 02 02 03 06 ................ +| 2928: 01 02 02 03 06 01 02 02 03 06 01 02 02 03 06 01 ................ +| 2944: 02 02 03 06 01 02 02 03 06 01 02 02 03 06 01 02 ................ +| 2960: 02 03 06 01 02 02 03 06 01 02 02 03 06 01 02 02 ................ +| 2976: 03 06 01 02 02 03 06 01 02 02 03 06 01 02 02 03 ................ +| 2992: 06 01 02 02 03 06 01 02 02 03 06 01 02 02 03 06 ................ +| 3008: 01 02 02 03 06 01 02 02 03 06 01 02 02 01 0a 73 ...............s +| 3024: 63 61 6e 73 74 61 74 75 73 70 02 04 01 02 04 01 canstatusp...... +| 3040: 02 04 02 05 65 63 74 6f 72 25 02 03 01 02 03 01 ....ector%...... +| 3056: 02 03 03 04 6c 65 63 74 7f 02 04 01 02 04 01 02 ....lect........ +| 3072: 04 03 05 73 73 69 6f 6e 67 02 03 01 02 03 01 02 ...ssiong....... +| 3088: 03 02 03 69 7a 65 10 02 04 01 02 04 01 02 04 04 ...ize.......... +| 3104: 02 04 01 02 04 01 02 04 01 02 04 01 02 04 01 02 ................ +| 3120: 04 01 02 04 01 02 04 01 02 04 07 02 04 01 02 04 ................ +| 3136: 01 02 04 5b 02 05 01 02 05 01 02 05 10 02 04 01 ...[............ +| 3152: 02 04 01 02 04 04 02 04 01 02 04 01 02 04 02 03 ................ +| 3168: 6f 66 74 76 02 03 01 02 03 01 02 03 02 02 71 6c oftv..........ql +| 3184: 5e 02 04 01 02 04 01 02 04 13 02 04 01 02 04 01 ^............... +| 3200: 02 04 28 02 03 01 02 03 01 02 03 02 04 74 61 74 ..(..........tat +| 3216: 34 6a 02 03 01 02 03 01 02 03 03 02 6d 74 70 02 4j..........mtp. +| 3232: 03 01 02 03 01 02 03 05 04 76 74 61 62 6d 02 03 .........vtabm.. +| 3248: 01 02 03 01 02 03 03 03 6f 72 65 81 35 02 03 01 ........ore.5... +| 3264: 02 03 01 02 03 02 0a 79 6e 63 68 72 6f 6e 6f 75 .......ynchronou +| 3280: 73 28 02 03 01 02 03 01 02 03 04 02 04 01 02 04 s(.............. +| 3296: 01 02 04 03 04 73 74 65 6d 81 32 02 02 01 02 02 .....stem.2..... +| 3312: 01 02 02 01 04 74 65 6d 70 81 35 02 02 01 02 02 .....temp.5..... +| 3328: 01 02 02 02 06 68 72 65 61 64 73 31 02 04 01 02 .....hreads1.... +| 3344: 04 01 02 04 76 02 04 01 02 04 01 02 04 08 03 61 ....v..........a +| 3360: 66 65 81 38 02 02 01 02 02 01 02 02 02 06 72 69 fe.8..........ri +| 3376: 67 67 65 72 81 20 02 03 01 02 03 01 02 03 08 01 gger. .......... +| 3392: 73 22 02 04 01 02 04 01 02 04 01 07 75 6e 6b 6e s...........unkn +| 3408: 6f 77 6e 73 02 03 01 02 03 01 02 03 01 08 76 61 owns..........va +| 3424: 72 69 61 62 6c 65 81 23 02 03 01 02 03 01 02 03 riable.#........ +| 3440: 02 03 64 62 65 81 26 02 03 01 02 03 01 02 03 02 ..dbe.&......... +| 3456: 03 69 65 77 01 02 05 01 02 05 01 02 05 02 03 74 .iew...........t +| 3472: 61 62 37 02 04 01 02 04 01 02 04 04 02 04 01 02 ab7............. +| 3488: 04 01 02 04 01 02 04 01 02 04 01 02 04 01 03 77 ...............w +| 3504: 61 6c 2b 02 03 01 02 03 01 02 03 01 02 03 01 02 al+............. +| 3520: 03 01 02 03 02 05 6f 72 6b 65 72 31 02 03 01 02 ......orker1.... +| 3536: 03 01 02 03 76 02 03 01 02 03 01 02 03 01 01 78 ....v..........x +| 3552: 01 06 01 01 02 01 06 01 01 02 01 06 01 01 02 01 ................ +| 3568: 06 01 01 02 01 06 01 01 02 01 06 01 01 02 01 06 ................ +| 3584: 01 01 02 01 06 01 01 02 01 06 01 01 02 01 06 01 ................ +| 3600: 01 02 01 06 01 01 02 01 06 01 01 02 01 06 01 01 ................ +| 3616: 02 01 06 01 01 02 01 06 01 01 02 01 06 01 01 02 ................ +| 3632: 01 06 01 01 02 01 06 01 01 02 01 06 01 01 02 01 ................ +| 3648: 06 01 01 02 01 06 01 01 02 01 06 01 01 02 01 06 ................ +| 3664: 01 01 02 01 06 01 01 02 01 06 01 01 02 01 06 01 ................ +| 3680: 01 02 01 06 01 01 02 01 06 01 01 02 01 06 01 01 ................ +| 3696: 02 01 06 01 01 02 01 06 01 01 02 01 06 01 01 02 ................ +| 3712: 01 06 01 01 02 01 06 01 01 02 01 06 01 01 02 01 ................ +| 3728: 06 01 01 02 01 06 01 01 02 01 06 01 01 02 01 06 ................ +| 3744: 01 01 02 01 06 01 01 02 01 06 01 01 02 01 06 01 ................ +| 3760: 01 02 01 06 01 01 02 01 06 01 01 02 01 06 01 01 ................ +| 3776: 02 01 06 01 01 02 01 06 01 01 02 01 06 01 01 02 ................ +| 3792: 01 06 01 01 02 01 06 01 01 02 01 06 01 01 02 01 ................ +| 3808: 06 01 01 02 01 06 01 01 02 01 06 01 01 02 01 06 ................ +| 3824: 01 01 02 01 06 01 01 02 01 06 01 01 02 01 06 01 ................ +| 3840: 01 02 01 06 01 01 02 01 06 01 01 02 01 06 01 01 ................ +| 3856: 02 01 06 01 01 02 01 06 01 01 02 01 06 01 01 02 ................ +| 3872: 01 06 01 01 02 01 06 01 01 02 01 06 01 01 02 01 ................ +| 3888: 06 01 01 02 01 06 01 01 02 01 06 01 01 02 01 06 ................ +| 3904: 01 01 02 01 06 01 01 02 01 06 01 01 02 01 06 01 ................ +| 3920: 01 02 01 06 01 01 02 01 06 01 01 02 01 06 01 01 ................ +| 3936: 02 01 06 01 01 02 01 06 01 01 02 01 06 01 01 02 ................ +| 3952: 01 06 01 01 02 01 06 01 01 02 01 06 01 01 02 01 ................ +| 3968: 06 01 01 02 01 06 04 30 15 1e 0c 28 1b 0d 0c 15 .......0...(.... +| 3984: 0c 16 14 16 10 0c 17 0e 0e 0f 11 10 10 0e 10 11 ................ +| 4000: 18 11 82 3e 12 10 0f 10 11 10 0f 0f 10 11 0f 0f ...>............ +| 4016: 81 05 18 10 81 45 11 0d 13 0f 10 17 0c 0c 0e 18 .....E.......... +| 4032: 0c 12 10 0e 0d 0f 13 12 24 0f 17 0f 1a 0d 81 1c ........$....... +| 4048: 11 0f 17 10 82 3e 12 11 11 18 0d 12 2a 14 11 10 .....>......*... +| 4064: 13 0f 12 0f 0f 82 3a 15 10 0f 10 4d 0e 1f 0f 0d ......:....M.... +| 4080: 0f 0f 1e 10 10 1a 0f 12 0c 12 14 0f 0e 20 17 19 ............. .. +| page 12 offset 45056 +| 0: 0d 00 00 00 01 0d f4 00 0d f4 00 00 00 00 00 00 ................ +| 3568: 00 00 00 00 84 04 84 80 80 80 80 02 04 00 88 0c ................ +| 3584: 00 07 02 00 01 01 02 56 06 01 01 02 01 06 01 01 .......V........ +| 3600: 02 01 06 01 01 02 01 06 01 01 02 01 06 01 01 02 ................ +| 3616: 01 06 01 01 02 01 06 01 01 02 01 06 01 01 02 01 ................ +| 3632: 06 01 01 02 01 06 01 01 02 01 06 01 01 02 01 06 ................ +| 3648: 01 01 02 01 06 01 01 02 01 06 01 01 02 01 06 01 ................ +| 3664: 01 02 01 06 01 01 02 01 06 01 01 02 01 06 01 01 ................ +| 3680: 02 01 06 01 01 02 01 06 01 01 02 01 06 01 01 02 ................ +| 3696: 01 06 01 01 02 01 06 01 01 02 01 06 01 01 02 01 ................ +| 3712: 06 01 01 02 01 06 01 01 02 01 06 01 01 02 01 06 ................ +| 3728: 01 01 02 01 06 01 01 02 01 06 01 01 02 01 06 01 ................ +| 3744: 01 02 01 06 01 01 02 01 06 01 01 02 01 06 01 01 ................ +| 3760: 02 01 06 01 01 02 01 06 01 01 02 01 06 01 01 02 ................ +| 3776: 01 06 01 01 02 01 06 01 01 02 01 06 01 01 02 01 ................ +| 3792: 06 01 01 02 01 06 01 01 02 01 06 01 01 02 01 06 ................ +| 3808: 01 01 02 01 06 01 01 02 01 06 01 01 02 01 06 01 ................ +| 3824: 01 02 01 06 01 01 02 01 06 01 01 02 01 06 01 01 ................ +| 3840: 02 01 06 01 01 02 01 06 01 01 02 01 06 01 01 02 ................ +| 3856: 01 06 01 01 02 01 06 01 01 02 01 06 01 01 02 01 ................ +| 3872: 06 01 01 02 01 06 01 01 02 01 06 01 01 02 01 06 ................ +| 3888: 01 01 02 01 06 01 01 02 01 06 01 01 02 01 06 01 ................ +| 3904: 01 02 01 06 01 01 02 01 06 01 01 02 01 06 01 01 ................ +| 3920: 02 01 06 01 01 02 01 06 01 01 02 01 06 01 01 02 ................ +| 3936: 01 06 01 01 02 01 06 01 01 02 01 06 01 01 02 01 ................ +| 3952: 06 01 01 02 01 06 01 01 02 01 06 01 01 02 01 06 ................ +| 3968: 01 01 02 01 06 01 01 02 01 06 01 01 02 01 06 01 ................ +| 3984: 01 02 01 06 01 01 02 01 06 01 01 02 01 06 01 01 ................ +| 4000: 02 01 06 01 01 02 01 06 01 01 02 01 06 01 01 02 ................ +| 4016: 01 06 01 01 02 01 06 01 01 02 01 06 01 01 02 01 ................ +| 4032: 06 01 01 02 01 06 01 01 02 01 06 01 01 02 01 06 ................ +| 4048: 52 01 02 01 06 01 01 02 01 06 01 01 02 01 06 01 R............... +| 4064: 01 02 01 06 01 01 02 01 06 01 01 02 01 06 01 01 ................ +| 4080: 02 01 06 01 01 02 01 06 01 01 02 01 06 01 01 02 ................ +| end c1a.txt.db +}]} {} + +do_catchsql_test 84.1 { + SELECT * FROM t1('R*R*x') ORDER BY rowid DESC; +} {1 {fts5: corruption found reading blob 137438953475 from table "t1"}} + +sqlite3_fts5_may_be_corrupt 0 +finish_test diff --git a/ext/fts5/test/fts5corrupt4.test b/ext/fts5/test/fts5corrupt4.test index b31f4d96e9..0505250c2f 100644 --- a/ext/fts5/test/fts5corrupt4.test +++ b/ext/fts5/test/fts5corrupt4.test @@ -14,7 +14,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5corrupt4 -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return diff --git a/ext/fts5/test/fts5corrupt5.test b/ext/fts5/test/fts5corrupt5.test index 16682b1325..4b21a9ff74 100644 --- a/ext/fts5/test/fts5corrupt5.test +++ b/ext/fts5/test/fts5corrupt5.test @@ -15,9 +15,9 @@ # source [file join [file dirname [info script]] fts5_common.tcl] -set testprefix fts5corrupt3 +set testprefix fts5corrupt5 -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return @@ -237,7 +237,7 @@ do_test 1.0 { do_catchsql_test 1.1 { SELECT * FROM t1('R*') WHERE (a,b)<=(current_date,0) ORDER BY rowid DESC; -} {1 {database disk image is malformed}} +} {/.*fts5: corrupt.*/} #------------------------------------------------------------------------- # @@ -450,7 +450,7 @@ do_test 2.0 { do_catchsql_test 2.1 { SELECT * FROM t1('R*R*R*R*') WHERE (a,b)<=(current_date,0) ORDER BY rowid DESC; -} {1 {database disk image is malformed}} +} {/.*fts5: corrupt.*/} #------------------------------------------------------------------------- reset_db @@ -569,7 +569,7 @@ do_test 3.0 { do_catchsql_test 3.1 { UPDATE t1 SET b=quote(zeroblob(200)) WHERE a MATCH 'thra*T'; -} {1 {database disk image is malformed}} +} {/.*fts5: corrupt.*/} #------------------------------------------------------------------------- reset_db @@ -793,6 +793,1131 @@ do_catchsql_test 4.5 { REPLACE INTO t1(rowid,a,b,rowid) VALUES(200,1,2,3); } {1 {database disk image is malformed}} +#------------------------------------------------------------------------- +reset_db +do_test 5.0 { + sqlite3 db {} + db deserialize [decode_hexdb { +.open --hexdb +| size 28672 pagesize 4096 filename crash-0c6d3451d11597.db +| page 1 offset 0 +| 0: 53 51 4c 69 74 65 20 66 6f 72 6d 61 74 20 33 00 SQLite format 3. +| 16: 10 00 01 01 00 40 20 20 00 00 00 00 00 00 00 07 .....@ ........ +| 32: 00 00 00 00 00 00 00 00 00 00 00 07 00 00 00 04 ................ +| 96: 00 00 00 00 0d 00 00 00 07 0d d2 00 0f c4 0f 6d ...............m +| 112: 0f 02 0e ab 0e 4e 0d f6 0d d2 00 00 00 00 00 00 .....N.......... +| 3536: 00 00 22 07 06 17 11 11 01 31 74 61 62 6c 65 74 .........1tablet +| 3552: 32 74 32 07 43 52 45 41 54 45 20 54 41 42 4c 45 2t2.CREATE TABLE +| 3568: 20 74 32 28 78 29 56 06 06 17 1f 1f 01 7d 74 61 t2(x)V.......ta +| 3584: 62 6c 65 74 31 5f 63 6f 6e 66 69 67 74 31 5f 63 blet1_configt1_c +| 3600: 6f 6e 66 69 67 06 43 52 45 41 54 45 20 54 41 42 onfig.CREATE TAB +| 3616: 4c 45 20 27 74 31 5f 63 6f 6e 66 69 67 27 28 6b LE 't1_config'(k +| 3632: 20 50 52 49 4d 41 52 59 20 4b 45 59 2c 20 76 29 PRIMARY KEY, v) +| 3648: 20 57 49 54 48 4f 55 54 20 52 4f 57 49 44 5b 05 WITHOUT ROWID[. +| 3664: 07 17 21 21 01 81 01 74 61 62 6c 65 74 31 5f 64 ..!!...tablet1_d +| 3680: 6f 63 73 69 7a 65 74 31 5f 64 6f 63 73 69 7a 65 ocsizet1_docsize +| 3696: 05 43 52 45 41 54 45 20 54 41 42 4c 45 20 27 74 .CREATE TABLE 't +| 3712: 31 5f 64 6f 63 73 69 7a 65 27 28 69 64 20 49 4e 1_docsize'(id IN +| 3728: 54 45 47 45 52 20 50 52 49 4d 41 52 59 20 4b 45 TEGER PRIMARY KE +| 3744: 59 2c 20 73 7a 20 42 4c 4f 42 29 55 04 06 17 21 Y, sz BLOB)U...! +| 3760: 21 01 77 74 61 62 6c 65 74 31 5f 63 6f 6e 74 65 !.wtablet1_conte +| 3776: 6e 74 74 31 5f 63 6f 6e 74 65 6e 74 04 43 52 45 ntt1_content.CRE +| 3792: 41 54 45 20 54 41 42 4c 45 20 27 74 31 5f 63 6f ATE TABLE 't1_co +| 3808: 6e 74 65 6e 74 27 28 69 64 20 49 4e 54 45 47 45 ntent'(id INTEGE +| 3824: 52 20 50 52 49 4d 41 52 59 20 4b 45 59 2c 20 63 R PRIMARY KEY, c +| 3840: 30 29 69 03 07 17 19 19 01 81 2d 74 61 62 6c 65 0)i.......-table +| 3856: 74 31 5f 69 64 78 74 31 5f 69 64 78 03 43 52 45 t1_idxt1_idx.CRE +| 3872: 41 54 45 20 54 41 42 4c 45 20 27 74 31 5f 69 64 ATE TABLE 't1_id +| 3888: 78 27 28 73 65 67 69 64 2c 20 74 65 72 6d 2c 20 x'(segid, term, +| 3904: 70 67 6e 6f 2c 20 50 52 49 4d 41 52 59 20 4b 45 pgno, PRIMARY KE +| 3920: 59 28 73 65 67 69 64 2c 20 74 65 72 6d 29 29 20 Y(segid, term)) +| 3936: 57 49 54 48 4f 55 54 20 52 4f 57 49 44 55 02 07 WITHOUT ROWIDU.. +| 3952: 17 1b 1b 01 81 01 74 61 62 6c 65 74 31 5f 64 61 ......tablet1_da +| 3968: 74 61 74 31 5f 64 61 74 61 02 43 52 45 41 54 45 tat1_data.CREATE +| 3984: 20 54 41 42 4c 45 20 27 74 31 5f 64 61 74 61 27 TABLE 't1_data' +| 4000: 28 69 64 20 49 4e 54 45 47 45 52 20 50 52 49 4d (id INTEGER PRIM +| 4016: 41 52 b9 20 4b 45 59 2c 20 62 6c 6f 63 6b 20 42 AR. KEY, block B +| 4032: 4c 4f 42 29 3a 01 06 17 11 11 08 63 74 61 62 6c LOB):......ctabl +| 4048: 65 74 31 74 31 43 52 45 41 54 45 20 56 49 52 54 et1t1CREATE VIRT +| 4064: 55 41 4c 20 54 41 42 4c 45 20 74 31 20 55 53 49 UAL TABLE t1 USI +| 4080: 4e 47 20 66 74 73 35 28 63 6f 6e 74 65 6e 74 29 NG fts5(content) +| page 2 offset 4096 +| 0: 0d 00 00 00 03 0f bd 00 0f e8 0f ef 0f bd 00 00 ................ +| 16: 00 01 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ +| 4016: 00 00 00 00 00 00 00 00 00 00 00 00 00 24 84 80 .............$.. +| 4032: 80 80 80 01 03 00 4e 00 00 00 1e 06 30 61 62 61 ......N.....0aba +| 4048: 63 6b 01 02 02 04 02 66 74 02 02 02 04 04 6e 64 ck.....ft.....nd +| 4064: 6f 6e 03 02 02 04 0a 07 05 01 03 00 10 03 03 0f on.............. +| 4080: 0a 03 00 24 00 00 00 00 01 01 01 00 01 01 01 11 ...$............ +| page 3 offset 8192 +| 0: 0a 00 00 00 01 0f 00 00 00 00 00 00 00 00 00 00 ................ +| 4080: 00 00 00 00 00 00 00 00 00 00 05 04 09 0c 01 02 ................ +| page 4 offset 12288 +| 0: 0d 00 00 00 03 0f e0 00 0f f6 0f ec 0f e0 00 00 ................ +| 4064: 0a 03 03 00 1b 61 62 61 6e 64 6f 6e 08 02 03 00 .....abandon.... +| 4080: 17 61 62 61 66 74 08 01 03 00 17 61 62 61 63 6b .abaft.....aback +| page 5 offset 16384 +| 0: 0d 00 00 00 03 0f ee 00 0f fa 0f f4 0f ee 00 00 ................ +| 16: 00 00 01 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ +| 4064: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 04 03 ................ +| 4080: 03 00 0e 01 04 02 03 00 0e 01 04 01 03 00 0e 01 ................ +| page 6 offset 20480 +| 0: 0a 00 00 00 01 0f f4 00 0f f4 00 00 00 00 00 00 ................ +| 4080: 00 00 00 00 0b 03 1b 01 76 65 72 73 69 6f 6e 04 ........version. +| page 7 offset 24576 +| 0: 0d 00 00 10 03 0f d6 00 0f f4 10 e1 0f d6 00 00 ................ +| 16: 00 00 00 01 00 00 00 00 00 00 00 00 00 00 00 00 ................ +| 4048: 00 00 00 00 00 00 09 03 02 1b 72 65 62 75 69 6c ..........rebuil +| 4064: 64 11 02 02 2b 69 6e 74 65 67 72 69 74 79 2d 63 d...+integrity-c +| 4080: 68 65 63 6b 0a 01 02 1d 6f 70 74 69 6d 00 00 00 heck....optim... +| end crash-0c6d3451d11597.db +}]} {} + +do_execsql_test 5.1 { + INSERT INTO t1(t1,rank) VALUES('secure-delete',1); +} +do_catchsql_test 5.4 { + UPDATE t1 SET content=randomblob(500); +} {/.*fts5: corrupt.*/} + +#------------------------------------------------------------------------- +reset_db +do_test 6.0 { + sqlite3 db {} + db deserialize [decode_hexdb { +.open --hexdb +| size 32768 pagesize 4096 filename crash-42fa37b694d45a.db +| page 1 offset 0 +| 0: 53 51 4c 69 74 65 20 66 6f 72 6d 61 74 20 33 00 SQLite format 3. +| 16: 10 00 01 01 00 40 20 20 00 00 00 00 00 00 00 07 .....@ ........ +| 96: 00 00 00 00 0d 00 00 00 07 0d d2 00 0f c4 0f 6d ...............m +| 112: 0f 02 0e ab 0e 4e 0d f6 0d d2 00 00 00 00 00 00 .....N.......... +| 3536: 00 00 22 07 06 17 11 11 01 31 74 61 62 6c 65 74 .........1tablet +| 3552: 32 74 32 07 43 52 45 41 54 45 20 54 41 42 4c 45 2t2.CREATE TABLE +| 3568: 20 74 32 28 78 29 56 06 06 17 1f 1f 01 7d 74 61 t2(x)V.......ta +| 3584: 62 6c 65 74 31 5f 63 6f 6e 66 69 67 74 31 5f 63 blet1_configt1_c +| 3600: 6f 6e 66 69 67 06 43 52 45 41 54 45 20 54 41 42 onfig.CREATE TAB +| 3616: 4c 45 20 27 74 31 5f 63 6f 6e 66 69 67 27 28 6b LE 't1_config'(k +| 3632: 20 50 52 49 4d 41 52 59 20 4b 45 59 2c 20 76 29 PRIMARY KEY, v) +| 3648: 20 57 49 54 48 4f 55 54 20 52 4f 57 49 44 5b 05 WITHOUT ROWID[. +| 3664: 07 17 21 21 01 81 01 74 61 62 6c 65 74 31 5f 64 ..!!...tablet1_d +| 3680: 6f 63 73 69 7a 65 74 31 5f 64 6f 63 73 69 7a 65 ocsizet1_docsize +| 3696: 05 43 52 45 41 54 45 20 54 41 42 4c 45 20 27 74 .CREATE TABLE 't +| 3712: 31 5f 64 6f 63 73 69 7a 65 27 28 69 64 20 49 4e 1_docsize'(id IN +| 3728: 54 45 47 45 52 20 50 52 49 4d 41 52 59 20 4b 45 TEGER PRIMARY KE +| 3744: 59 2c 20 73 7a 20 42 4c 4f 42 29 55 04 06 17 21 Y, sz BLOB)U...! +| 3760: 21 01 77 74 61 62 6c 65 74 31 5f 63 6f 6e 74 65 !.wtablet1_conte +| 3776: 6e 74 74 31 5f 63 6f 6e 74 65 6e 74 04 43 52 45 ntt1_content.CRE +| 3792: 41 54 45 20 54 41 42 4c 45 20 27 74 31 5f 63 6f ATE TABLE 't1_co +| 3808: 6e 74 65 6e 74 27 28 69 64 20 49 4e 54 45 47 45 ntent'(id INTEGE +| 3824: 52 20 50 52 49 4d 41 52 49 20 4b 45 59 2c 20 63 R PRIMARI KEY, c +| 3840: 30 29 69 03 07 17 19 19 01 81 2d 74 61 62 6c 65 0)i.......-table +| 3856: 74 31 5f 69 64 78 74 31 5f 69 64 78 03 43 52 45 t1_idxt1_idx.CRE +| 3872: 41 54 45 20 54 41 42 4c 45 20 27 74 31 5f 69 64 ATE TABLE 't1_id +| 3888: 78 27 28 73 65 67 69 64 2c 20 74 65 72 6d 2c 20 x'(segid, term, +| 3904: 70 67 6e 6f 2c 20 50 52 49 4d 41 52 59 20 4b 45 pgno, PRIMARY KE +| 3920: 59 28 73 65 67 69 64 2c 20 74 65 72 6d 29 29 20 Y(segid, term)) +| 3936: 57 49 54 48 4f 55 54 20 52 4f 57 49 44 55 02 07 WITHOUT ROWIDU.. +| 3952: 17 1b 1b 01 81 01 74 61 62 6c 65 74 31 5f 64 61 ......tablet1_da +| 3968: 74 61 74 31 5f 64 61 74 61 02 43 52 45 41 54 45 tat1_data.CREATE +| 3984: 20 54 41 42 4c 45 20 27 74 31 5f 64 61 74 61 27 TABLE 't1_data' +| 4000: 28 69 64 20 49 4e 54 45 47 45 52 20 50 52 49 4d (id INTEGER PRIM +| 4016: 41 52 b9 20 4b 45 59 2c 20 62 6c 6f 63 6b 20 42 AR. KEY, block B +| 4032: 4c 4f 42 29 3a 01 06 17 11 11 08 63 74 61 62 6c LOB):......ctabl +| 4048: 65 74 31 74 31 43 52 45 41 54 45 20 56 49 52 54 et1t1CREATE VIRT +| 4064: 55 41 4c 20 54 41 42 4c 45 20 74 31 20 55 53 49 UAL TABLE t1 USI +| 4080: 4e 47 20 66 74 73 35 28 63 6f 6e 74 65 6e 74 29 NG fts5(content) +| page 2 offset 4096 +| 0: 0d 00 00 00 03 0f bd 00 0f e8 0f ef 0f bd f0 00 ................ +| 16: 00 01 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ +| 4016: 00 00 00 00 00 00 00 00 00 00 00 00 00 24 84 80 .............$.. +| 4032: 80 80 80 01 03 00 4e 00 10 00 1e 06 30 61 62 61 ......N.....0aba +| 4048: 63 6c 01 02 02 04 02 66 74 02 5f 02 04 04 6e 64 cl.....ft._...nd +| 4064: 6f 6e 02 02 02 04 0a 07 05 01 03 00 10 03 03 0f on.............. +| 4080: 0a 03 00 24 00 00 00 00 01 01 01 00 01 01 01 11 ...$............ +| page 3 offset 8192 +| 0: 0a 00 00 00 01 0f 00 01 00 00 00 00 00 00 00 00 ................ +| 4080: 00 00 00 00 00 00 00 00 00 00 05 04 09 0c 01 02 ................ +| page 4 offset 12288 +| 0: 0d 00 00 00 03 0f e0 00 0f f6 0f ec 0f e0 00 00 ................ +| 4064: 0a 03 03 00 1b 61 62 61 6e 64 6f 6e 08 02 03 00 .....abandon.... +| 4080: 17 61 62 61 66 74 08 01 03 00 17 61 62 61 63 6b .abaft.....aback +| page 5 offset 16384 +| 0: 0d 00 00 00 03 0f ee 00 0f fa 0f 00 00 00 00 00 ................ +| 4064: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 04 03 ................ +| 4080: 03 00 0e 01 04 02 03 00 0e 01 04 01 03 00 0e 01 ................ +| page 6 offset 20480 +| 0: 0a 00 00 00 01 0f f4 00 0f f4 00 00 00 00 00 00 ................ +| 4080: 00 00 00 00 0b 03 1b 01 76 65 72 73 69 6f 6e 04 ........version. +| page 7 offset 24576 +| 0: 0d 00 00 10 03 0f d6 00 0f 00 00 00 00 00 00 00 ................ +| 4048: 00 00 00 00 00 00 09 03 02 1b 72 65 62 75 69 6c ..........rebuil +| 4064: 64 11 02 02 2b 69 6e 74 65 67 72 69 74 79 2d 63 d...+integrity-c +| 4080: 68 65 63 6b 0a 01 02 1d 6f 70 74 69 6d 00 00 00 heck....optim... +| page 8 offset 28672 +| 0: 00 00 01 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ +| end crash-42fa37b694d45a.db +}]} {} + +do_execsql_test 6.1 { + INSERT INTO t1(t1,rank) VALUES('secure-delete',1); +} +do_catchsql_test 6.2 { + UPDATE t1 SET content=randomblob(500) WHERE t1; +} {1 {constraint failed}} + +#------------------------------------------------------------------------- +reset_db +do_test 7.0 { + sqlite3 db {} + db deserialize [decode_hexdb { +.open --hexdb +| size 40960 pagesize 4096 filename crash-d8b4a99207c10b.db +| page 1 offset 0 +| 0: 53 51 4c 69 74 65 20 66 6f 72 6d 61 74 20 33 00 SQLite format 3. +| 16: 10 00 01 01 00 40 20 20 00 00 00 00 00 00 00 0a .....@ ........ +| 32: 00 00 00 00 00 00 00 00 00 00 00 0d 00 00 00 04 ................ +| 48: 00 00 00 00 00 00 00 00 00 00 00 01 00 00 00 00 ................ +| 96: 00 00 00 00 0d 00 00 00 0d 0b 62 00 0f 97 0f 40 ..........b....@ +| 112: 0e d5 0e 75 0e 18 0d c0 0d 66 0d 0f 0c a4 0c 44 ...u.....f.....D +| 128: 0b ec 0b a7 0b 62 00 00 00 00 00 00 00 00 00 00 .....b.......... +| 2912: 00 00 43 0d 06 17 11 11 08 75 74 61 62 6c 65 74 ..C......utablet +| 2928: 34 74 34 43 52 45 41 54 45 20 56 49 52 54 55 41 4t4CREATE VIRTUA +| 2944: 4c 20 54 41 42 4c 45 20 74 34 20 55 53 49 4e 47 L TABLE t4 USING +| 2960: 20 66 74 73 35 76 6f 63 61 62 28 27 74 32 27 2c fts5vocab('t2', +| 2976: 20 27 72 6f 77 27 29 43 0c 06 17 11 11 08 75 74 'row')C......ut +| 2992: 61 62 6c 65 74 33 74 33 43 52 45 41 54 45 20 56 ablet3t3CREATE V +| 3008: 49 52 54 55 41 4c 20 54 41 42 4c 45 20 74 33 20 IRTUAL TABLE t3 +| 3024: 55 53 49 4e 47 20 66 74 73 35 76 6f 63 61 62 28 USING fts5vocab( +| 3040: 27 74 31 27 2c 20 27 72 6f 77 27 29 56 0b 06 17 't1', 'row')V... +| 3056: 1f 1f 01 7d 74 61 62 6c 65 74 32 5f 63 6f 6e 66 ....tablet2_conf +| 3072: 69 67 74 32 5f 63 6f 6e 66 69 67 0a 43 52 45 41 igt2_config.CREA +| 3088: 54 45 20 54 41 42 4c 45 20 27 74 32 5f 63 6f 6e TE TABLE 't2_con +| 3104: 66 69 67 27 28 6b 20 50 52 49 4d 41 52 59 20 4b fig'(k PRIMARY K +| 3120: 45 59 2c 20 76 29 20 57 49 54 48 4f 55 54 20 52 EY, v) WITHOUT R +| 3136: 4f 57 49 44 5e 0a 07 17 21 21 01 81 07 74 61 62 OWID^...!!...tab +| 3152: 6c 65 74 32 5f 63 6f 6e 74 65 6e 74 74 32 5f 63 let2_contentt2_c +| 3168: 6f 6e 74 65 6e 74 09 43 52 45 41 54 45 20 54 41 ontent.CREATE TA +| 3184: 42 4c 45 20 27 74 32 5f 63 6f 6e 74 65 6e 74 27 BLE 't2_content' +| 3200: 28 69 64 20 49 4e 54 45 47 45 52 20 50 52 49 4d (id INTEGER PRIM +| 3216: 41 52 59 20 4b 45 59 2c 20 63 30 2c 20 63 31 2c ARY KEY, c0, c1, +| 3232: 20 63 32 29 69 09 07 17 19 19 01 81 2d 74 61 62 c2)i.......-tab +| 3248: 6c 65 74 32 5f 69 64 78 74 32 5f 69 64 78 08 43 let2_idxt2_idx.C +| 3264: 52 45 41 54 45 20 54 41 42 4c 45 20 27 74 32 5f REATE TABLE 't2_ +| 3280: 69 64 78 27 28 73 65 67 69 64 2c 20 74 65 72 6d idx'(segid, term +| 3296: 2c 20 70 67 6e 6f 2c 20 50 52 49 4d 41 52 59 20 , pgno, PRIMARY +| 3312: 4b 45 59 28 73 65 67 69 64 2c 20 74 65 72 6d 29 KEY(segid, term) +| 3328: 29 20 57 49 54 48 4f 55 54 20 52 4f 57 49 44 55 ) WITHOUT ROWIDU +| 3344: 08 07 17 1b 1b 01 81 01 74 61 62 6c 65 74 32 5f ........tablet2_ +| 3360: 64 61 74 61 74 32 5f 64 61 74 61 07 43 52 45 41 datat2_data.CREA +| 3376: 54 45 20 54 41 42 4c 45 20 27 74 32 5f 64 61 74 TE TABLE 't2_dat +| 3392: 61 27 28 69 64 20 49 4e 54 45 47 45 52 20 50 52 a'(id INTEGER PR +| 3408: 49 4d 41 52 59 20 4b 45 59 2c 20 62 6c 6f 63 6b IMARY KEY, block +| 3424: 20 42 4c 4f 42 29 58 07 07 17 11 11 08 81 1d 74 BLOB)X........t +| 3440: 61 62 6c 65 74 32 74 32 43 52 45 41 54 45 20 56 ablet2t2CREATE V +| 3456: 49 52 54 55 41 4c 20 54 41 42 4c 45 20 74 32 20 IRTUAL TABLE t2 +| 3472: 55 53 49 4e 47 20 66 74 73 35 28 27 61 27 2c 5b USING fts5('a',[ +| 3488: 62 5d 2c 22 63 22 2c 64 65 74 61 69 6c 3d 6e 6f b],.c.,detail=no +| 3504: 6e 65 2c 63 6f 6c 75 6d 6e 73 69 7a 65 3d 30 29 ne,columnsize=0) +| 3520: 56 06 06 17 1f 1f 01 7d 74 61 62 6c 65 74 31 5f V.......tablet1_ +| 3536: 63 6f 6e 66 69 67 74 31 5f 63 6f 6e 66 69 67 06 configt1_config. +| 3552: 43 52 45 41 54 45 20 54 41 42 4c 45 20 27 74 31 CREATE TABLE 't1 +| 3568: 5f 63 6f 6e 66 69 67 27 28 6b 20 50 52 49 4d 41 _config'(k PRIMA +| 3584: 52 59 20 4b 45 59 2c 20 76 29 20 57 49 54 48 4f RY KEY, v) WITHO +| 3600: 55 54 20 52 4f 57 49 44 5b 05 07 17 21 21 01 81 UT ROWID[...!!.. +| 3616: 01 74 61 62 6c 65 74 31 5f 64 6f 63 73 69 7a 65 .tablet1_docsize +| 3632: 74 31 5f 64 6f 63 73 69 7a 65 05 43 52 45 41 54 t1_docsize.CREAT +| 3648: 45 20 54 41 42 4c 45 20 27 74 31 5f 64 6f 63 73 E TABLE 't1_docs +| 3664: 69 7a 65 27 28 69 64 20 49 4e 54 45 47 45 52 20 ize'(id INTEGER +| 3680: 50 52 49 4d 41 52 59 20 4b 45 59 2c 20 73 7a 20 PRIMARY KEY, sz +| 3696: 42 4c 4f 42 29 5e 04 07 17 21 21 01 81 07 74 61 BLOB)^...!!...ta +| 3712: 62 6c 65 74 31 5f 63 6f 6e 74 65 6e 74 74 31 5f blet1_contentt1_ +| 3728: 63 6f 6e 74 65 6e 74 04 43 52 45 41 54 45 20 54 content.CREATE T +| 3744: 41 42 4c 45 20 27 74 31 5f 63 6f 6e 74 65 6e 74 ABLE 't1_content +| 3760: 27 28 69 64 20 49 4e 54 45 47 45 52 20 50 52 49 '(id INTEGER PRI +| 3776: 4d 41 52 59 20 4b 45 59 2c 20 63 30 2c 20 63 31 MARY KEY, c0, c1 +| 3792: 2c 20 63 32 29 69 03 07 17 19 19 01 81 2d 74 61 , c2)i.......-ta +| 3808: 62 6c 65 74 31 5f 69 64 78 74 31 5f 69 64 78 03 blet1_idxt1_idx. +| 3824: 43 52 45 41 54 45 20 54 41 42 4c 45 20 27 74 31 CREATE TABLE 't1 +| 3840: 5f 69 64 78 27 28 73 65 67 69 64 2c 20 74 65 72 _idx'(segid, ter +| 3856: 6d 2c 20 70 67 6e 6f 2c 20 50 52 49 4d 41 52 59 m, pgno, PRIMARY +| 3872: 20 4b 45 59 28 73 65 67 69 64 2c 20 74 65 72 6d KEY(segid, term +| 3888: 29 29 20 57 49 54 48 4f 55 54 20 52 4f 57 49 44 )) WITHOUT ROWID +| 3904: 55 02 07 17 1b 1b 01 81 01 74 61 62 6c 65 74 31 U........tablet1 +| 3920: 5f 64 61 74 61 74 31 5f 64 61 74 61 02 43 52 45 _datat1_data.CRE +| 3936: 41 54 45 20 54 41 42 4c 45 20 27 74 31 5f 64 61 ATE TABLE 't1_da +| 3952: 74 61 27 28 69 64 20 49 4e 54 45 47 45 52 20 50 ta'(id INTEGER P +| 3968: 52 49 4d 41 52 59 20 4b 45 59 2c 20 62 6c 6f 63 RIMARY KEY, bloc +| 3984: 6b 20 42 4c 4f 42 29 67 01 07 17 11 11 08 81 3b k BLOB)g.......; +| 4000: 74 61 62 6c 65 74 31 74 31 43 52 45 41 54 45 20 tablet1t1CREATE +| 4016: 56 49 52 54 55 41 4c 20 54 41 42 4c 45 20 74 31 VIRTUAL TABLE t1 +| 4032: 20 55 53 49 4e 47 20 66 74 73 35 28 61 2c 62 20 USING fts5(a,b +| 4048: 75 6e 69 6e 64 65 78 65 64 2c 63 2c 74 6f 6b 65 unindexed,c,toke +| 4064: 6e 69 7a 65 3d 22 70 6f 72 74 65 72 20 61 73 63 nize=.porter asc +| 4080: 69 69 22 2c 74 6f 6b 65 6e 64 61 74 61 3d 31 29 ii.,tokendata=1) +| page 2 offset 4096 +| 0: 0d 0f 68 00 05 0f 13 00 0f e6 0f 13 0f a8 0f 7c ..h............| +| 16: 0f 2a 00 00 00 00 00 00 00 00 00 00 00 00 00 00 .*.............. +| 3856: 00 00 00 15 0a 03 00 30 00 00 00 00 01 03 03 00 .......0........ +| 3872: 03 01 01 01 02 01 01 03 01 01 37 8c 80 80 80 80 ..........7..... +| 3888: 01 03 00 74 00 00 00 2e 02 30 61 03 02 02 01 01 ...t.....0a..... +| 3904: 62 03 02 03 01 01 63 03 02 04 01 01 67 03 06 01 b.....c.....g... +| 3920: 02 02 01 01 68 03 06 01 02 03 01 01 69 03 06 01 ....h.......i... +| 3936: 02 04 04 06 06 06 08 08 0f ef 00 14 2a 00 00 00 ............*... +| 3952: 00 01 02 02 00 02 01 01 01 02 01 01 25 88 80 80 ............%... +| 3968: 80 80 01 03 00 50 00 00 00 1f 02 30 67 02 08 02 .....P.....0g... +| 3984: 01 02 02 01 01 68 02 08 03 01 02 03 01 01 69 02 .....h........i. +| 4000: 08 04 01 02 04 04 09 09 37 84 80 80 80 7f f1 03 ........7....... +| 4016: 00 74 00 00 00 2e 02 30 61 01 02 02 01 01 62 01 .t.....0a.....b. +| 4032: 02 03 01 01 63 01 02 04 01 01 67 01 06 01 02 02 ....c.....g..... +| 4048: 01 01 68 01 06 01 02 03 01 01 69 01 06 01 02 04 ..h.......i..... +| 4064: 04 06 06 06 08 08 07 01 03 00 14 03 09 00 09 00 ................ +| 4080: 00 00 11 24 00 00 00 00 01 01 01 00 01 01 01 01 ...$............ +| page 3 offset 8192 +| 0: 0a 00 00 00 03 0f ec 00 0f fa 0f f3 0f ec 00 00 ................ +| 4064: 00 00 00 00 00 00 00 00 00 00 00 00 06 04 01 0c ................ +| 4080: 01 03 02 06 04 01 0c 01 02 02 05 04 09 0c 01 02 ................ +| page 4 offset 12288 +| 0: 0d 00 00 00 03 0f be 00 0f ea 0f d4 0f be 00 00 ................ +| 4016: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 14 03 ................ +| 4032: 05 00 17 17 17 61 20 62 20 63 67 20 68 20 69 67 .....a b cg h ig +| 4048: 20 68 20 69 14 02 05 00 17 17 17 67 20 68 20 69 h i.......g h i +| 4064: 61 20 62 20 63 67 20 68 20 69 14 01 05 00 17 17 a b cg h i...... +| 4080: 17 61 20 62 20 63 64 20 65 20 66 67 20 68 20 69 .a b cd e fg h i +| page 5 offset 16384 +| 0: 0d 00 00 00 03 0f e8 00 0f f8 0f f0 0f e8 00 00 ................ +| 4064: 00 00 00 00 00 00 00 00 06 03 03 00 12 03 00 03 ................ +| 4080: 06 02 03 00 12 03 00 03 06 01 03 00 12 03 00 03 ................ +| page 6 offset 20480 +| 0: 0a 00 00 00 01 0f f4 00 0f f4 00 00 00 00 00 00 ................ +| 4080: 00 00 00 00 0b 03 1b 01 76 65 72 73 69 6f 6e 04 ........version. +| page 7 offset 24576 +| 0: 0d 00 00 00 03 0f 9e 00 0f e6 0f ef 0f 9e 00 00 ................ +| 3984: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 41 84 ..............A. +| 4000: 80 80 80 80 01 04 00 81 06 00 00 00 34 02 30 61 ............4.0a +| 4016: 01 01 01 01 01 62 01 01 01 01 01 63 01 01 01 01 .....b.....c.... +| 4032: 01 64 01 01 01 65 01 01 01 66 01 01 01 67 01 01 .d...e...f...g.. +| 4048: 01 01 01 68 01 01 01 01 01 69 01 01 01 04 06 06 ...h.....i...... +| 4064: 06 04 04 04 06 06 07 01 03 00 14 03 09 09 09 0f ................ +| 4080: 0a 03 00 24 00 00 00 00 01 01 01 00 01 01 01 01 ...$............ +| page 8 offset 28672 +| 0: 0a 00 00 00 01 0f fa 00 0f fa 00 00 00 00 00 00 ................ +| 4080: 00 00 00 00 00 00 00 00 00 00 05 04 09 0c 01 02 ................ +| page 9 offset 32768 +| 0: 0d 00 00 00 03 0f be 00 0f ea 0f d4 0f be 00 00 ................ +| 4016: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 14 03 ................ +| 4032: 05 00 17 17 17 61 20 62 20 63 67 20 68 20 69 67 .....a b cg h ig +| 4048: 20 68 20 69 14 02 05 00 17 17 17 67 20 68 20 69 h i.......g h i +| 4064: 61 20 62 20 63 67 20 68 20 69 14 01 05 00 17 17 a b cg h i...... +| 4080: 17 61 20 62 20 63 64 20 65 20 66 67 20 68 20 69 .a b cd e fg h i +| page 10 offset 36864 +| 0: 0a 00 00 00 01 0f f4 00 0f f4 00 00 00 00 00 00 ................ +| 4080: 00 00 00 00 0b 03 1b 01 76 65 72 73 69 6f 6e 04 ........version. +| end crash-d8b4a99207c10b.db +}]} {} + +do_catchsql_test 7.1 { + SELECT snippet(t1, -1, '.', '..', '[', ']'), + highlight(t1, 2, '[', ']') + FROM t1('g + h') + WHERE rank MATCH 'bm25(1.0, 1.0)' ORDER BY rank; +} {1 {database disk image is malformed}} + +#------------------------------------------------------------------------- +reset_db +do_test 8.0 { + sqlite3 db {} + db deserialize [decode_hexdb { +.open --hexdb +| size 20480 pagesize 4096 filename crash-d57c01958e48ab.db +| page 1 offset 0 +| 0: 53 51 4c 69 74 65 20 66 6f 72 6d 61 74 20 33 00 SQLite format 3. +| 16: 10 00 01 01 00 40 20 20 00 00 00 00 00 00 00 05 .....@ ........ +| 32: 00 00 00 00 00 00 00 00 00 00 00 05 00 00 00 04 ................ +| 48: 00 00 00 00 00 00 00 00 00 00 00 01 00 00 00 00 ................ +| 96: 00 00 00 00 0d 00 00 00 05 0e 10 00 0f 97 0f 40 ...............@ +| 112: 0e d5 0e 68 0e 10 01 00 00 00 00 00 00 00 00 00 ...h............ +| 3600: 56 05 06 17 1f 1f 01 7d 74 61 62 6c 65 74 31 5f V.......tablet1_ +| 3616: 63 6f 6e 66 69 67 74 31 5f 63 6f 6e 66 69 67 05 configt1_config. +| 3632: 43 52 45 41 54 45 20 54 41 42 4c 45 20 27 74 31 CREATE TABLE 't1 +| 3648: 5f 63 6f 6e 66 69 67 27 28 6b 20 50 52 49 4d 41 _config'(k PRIMA +| 3664: 52 59 20 4b 45 59 2c 20 76 29 20 57 49 54 48 4f RY KEY, v) WITHO +| 3680: 55 54 20 52 4f 57 49 44 6b 04 07 17 21 21 01 81 UT ROWIDk...!!.. +| 3696: 21 74 61 62 6c 65 74 31 5f 64 6f 63 73 69 7a 65 !tablet1_docsize +| 3712: 74 31 5f 64 6f 63 73 69 7a 65 04 43 52 45 41 54 t1_docsize.CREAT +| 3728: 45 20 54 41 42 4c 45 20 27 74 31 5f 64 6f 63 73 E TABLE 't1_docs +| 3744: 69 7a 65 27 28 69 64 20 49 4e 54 45 47 45 52 20 ize'(id INTEGER +| 3760: 50 52 49 4d 41 52 59 20 4b 45 59 2c 20 73 7a 20 PRIMARY KEY, sz +| 3776: 42 4c 4f 42 2c 20 6f 72 69 67 69 6e 20 49 4e 54 BLOB, origin INT +| 3792: 45 47 45 52 29 69 03 07 17 19 19 01 81 2d 74 61 EGER)i.......-ta +| 3808: 62 6c 65 74 31 5f 69 64 78 74 31 5f 69 64 78 03 blet1_idxt1_idx. +| 3824: 43 52 45 41 54 45 20 54 41 42 4c 45 20 27 74 31 CREATE TABLE 't1 +| 3840: 5f 69 64 78 27 28 73 65 67 69 64 2c 20 74 65 72 _idx'(segid, ter +| 3856: 6d 2c 20 70 67 6e 6f 2c 20 50 52 49 4d 41 52 59 m, pgno, PRIMARY +| 3872: 20 4b 45 59 28 73 65 67 69 64 2c 20 74 65 72 6d KEY(segid, term +| 3888: 29 29 20 57 49 54 48 4f 55 54 20 52 4f 57 49 44 )) WITHOUT ROWID +| 3904: 55 02 07 17 1b 1b 01 81 01 74 61 62 6c 65 74 31 U........tablet1 +| 3920: 5f 64 61 74 61 74 31 5f 64 61 74 61 02 43 52 45 _datat1_data.CRE +| 3936: 41 54 45 20 54 41 42 4c 45 20 27 74 31 5f 64 61 ATE TABLE 't1_da +| 3952: 74 61 27 28 69 64 20 49 4e 54 45 47 45 52 20 50 ta'(id INTEGER P +| 3968: 52 49 4d 41 52 59 20 4b 45 59 2c 20 62 6c 6f 63 RIMARY KEY, bloc +| 3984: 6b 20 42 4c 4f 42 29 67 01 07 17 11 11 08 81 3b k BLOB)g.......; +| 4000: 74 61 62 6c 65 74 31 74 31 43 52 45 41 54 45 20 tablet1t1CREATE +| 4016: 56 49 52 54 55 41 4c 20 54 41 42 4c 45 20 74 31 VIRTUAL TABLE t1 +| 4032: 20 55 53 49 4e 47 20 66 74 73 35 28 61 2c 20 62 USING fts5(a, b +| 4048: 2c 20 63 6f 6e 74 65 6e 74 3d 27 27 2c 20 63 6f , content='', co +| 4064: 6e 74 65 6e 74 6c 65 73 73 5f 64 65 6c 65 74 65 ntentless_delete +| 4080: 3d 31 2c 20 74 6f 6b 65 6e 64 61 74 61 3d 31 29 =1, tokendata=1) +| page 2 offset 4096 +| 0: 0d 0f eb 00 03 0e 17 00 0f e2 0e 17 0e 31 00 00 .............1.. +| 16: 01 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ +| 3600: 00 00 00 00 00 00 00 18 0a 03 00 36 00 00 00 00 ...........6.... +| 3616: ff 00 00 01 01 01 01 00 01 01 01 01 01 01 00 00 ................ +| 3632: 07 83 29 84 80 80 80 80 01 04 00 86 56 00 00 01 ..).........V... +| 3648: 96 04 30 61 61 61 01 02 02 01 04 02 04 01 08 02 ..0aaa.......... +| 3664: 04 04 04 01 10 02 04 04 04 04 04 04 04 01 20 02 .............. . +| 3680: 04 04 04 04 04 04 04 04 04 04 04 04 04 04 04 01 ................ +| 3696: 40 02 04 04 04 04 04 04 04 04 04 04 04 04 04 04 @............... +| 3712: 04 04 04 04 04 04 04 04 04 04 04 04 04 04 04 04 ................ +| 3728: 04 01 81 00 02 04 04 04 04 04 04 04 04 04 04 04 ................ +| 3744: 04 04 04 04 04 04 04 04 04 04 04 04 04 04 04 04 ................ +| 3760: 04 04 04 04 04 04 04 04 04 04 04 04 04 04 04 04 ................ +| 3776: 04 04 04 04 04 04 04 04 04 04 04 04 04 04 04 04 ................ +| 3792: 04 04 04 04 02 02 62 63 01 06 01 01 02 01 03 62 ......bc.......b +| 3808: 62 62 02 02 03 01 04 03 06 01 08 03 06 06 06 01 bb.............. +| 3824: 10 03 06 06 06 06 06 06 06 01 20 03 06 06 06 06 .......... ..... +| 3840: 06 06 06 06 06 06 06 06 06 06 06 01 40 03 06 06 ............@... +| 3856: 06 06 06 06 06 06 06 06 06 06 06 06 06 06 06 06 ................ +| 3872: 06 06 06 06 06 06 06 06 06 06 16 06 06 02 02 63 ...............c +| 3888: 64 02 06 01 01 02 01 03 63 63 63 03 02 05 01 04 d.......ccc..... +| 3904: 05 0a 01 08 05 0a 0a 0a 01 10 05 0a 0a 0a 0a 0a ................ +| 3920: 0a 0a 01 20 05 0a 0a 0a 0a 0a 0a 0a 0a 0a 0a 0a ... ............ +| 3936: 0a 0a 0a 0a 02 02 64 65 03 06 01 01 02 01 03 64 ......de.......d +| 3952: 64 64 04 02 09 01 04 09 12 01 08 09 12 12 12 01 dd.............. +| 3968: 10 09 12 12 12 12 12 12 12 02 02 65 66 04 06 01 ...........ef... +| 3984: 01 02 01 03 65 65 65 05 02 11 01 04 11 22 01 08 ....eee......... +| 4000: 11 22 22 22 02 02 66 67 05 06 01 01 02 01 03 66 ......fg.......f +| 4016: 56 66 06 02 21 01 04 21 42 02 02 67 68 06 06 01 Vf..!..!B..gh... +| 4032: 01 02 cb 03 67 67 67 07 02 41 02 02 68 69 07 06 ....ggg..A..hi.. +| 4048: 01 01 02 04 81 13 09 50 09 2e 09 1c 09 12 09 0c .......P........ +| 4064: 09 08 07 01 03 00 14 07 81 77 07 00 00 00 15 22 .........w...... +| 4080: 00 00 00 00 ff 00 00 01 00 00 00 00 00 00 05 0c ................ +| page 3 offset 8192 +| 0: 0a 00 00 00 01 0f fa 00 0f fa 00 00 00 00 00 00 ................ +| 4080: 00 00 00 00 00 00 00 00 00 00 05 04 09 0c 01 02 ................ +| page 4 offset 12288 +| 0: 0d 00 00 00 07 0f c8 00 0f f8 0f f0 0f e8 0f e0 ................ +| 16: 0f d8 0f d0 0f c8 00 00 00 00 00 00 00 00 00 00 ................ +| 4032: 00 00 00 00 00 00 00 00 06 07 04 00 10 09 7f 01 ................ +| 4048: 06 06 04 00 10 09 3f 01 06 05 04 00 10 09 1f 01 ......?......... +| 4064: 06 04 04 00 10 09 0f 01 06 03 04 00 10 09 07 01 ................ +| 4080: 06 02 04 00 10 09 03 01 06 01 04 00 10 09 01 01 ................ +| page 5 offset 16384 +| 0: 0a 00 00 00 01 0f f4 00 0f f4 00 00 00 00 00 00 ................ +| 4080: 00 00 00 00 0b 03 1b 01 76 65 72 73 69 6f 6e 04 ........version. +| end crash-d57c01958e48ab.db +}]} {} + +do_catchsql_test 8.1 { + SELECT rowid FROM t1('a* NOT ý‘') ; +} {0 {1 2 3 4 5 6 7}} + +#------------------------------------------------------------------------- +reset_db +do_test 9.0 { + sqlite3 db {} + db deserialize [decode_hexdb { +.open --hexdb +| size 32768 pagesize 4096 filename crash-c76a16c24c8ba6.db +| page 1 offset 0 +| 0: 53 51 4c 69 74 65 20 66 6f 72 6d 61 74 20 33 00 SQLite format 3. +| 16: 10 00 01 01 00 40 20 20 00 00 00 00 00 00 00 08 .....@ ........ +| 32: 00 00 00 02 00 00 00 01 00 00 00 09 00 00 00 04 ................ +| 96: 00 00 00 00 0d 0f c7 00 07 0d 92 00 0f 8d 0f 36 ...............6 +| 112: 0e cb 0e 6b 0e 0e 0d b6 0d 92 0d 92 00 00 00 00 ...k............ +| 3472: 00 00 22 08 06 17 11 11 01 31 74 61 62 6c 65 74 .........1tablet +| 3488: 32 74 32 08 43 52 45 41 54 45 20 54 41 42 4c 45 2t2.CREATE TABLE +| 3504: 20 74 32 28 78 29 56 07 06 17 1f 1f 01 7d 74 61 t2(x)V.......ta +| 3520: 62 6c 65 74 31 5f 63 6f 6e 66 69 67 74 31 5f 63 blet1_configt1_c +| 3536: 6f 6e 66 69 67 07 43 52 45 41 54 45 20 54 41 42 onfig.CREATE TAB +| 3552: 4c 45 20 27 74 31 5f 63 6f 6e 66 69 67 27 28 6b LE 't1_config'(k +| 3568: 20 50 52 49 4d 41 52 59 20 4b 45 59 2c 20 76 29 PRIMARY KEY, v) +| 3584: 20 57 49 54 48 4f 55 54 20 52 4f 57 49 44 5b 06 WITHOUT ROWID[. +| 3600: 07 17 21 21 01 81 01 74 61 62 6c 65 74 31 5f 64 ..!!...tablet1_d +| 3616: 6f 63 73 69 7a 65 74 31 5f 64 6f 63 73 69 7a 65 ocsizet1_docsize +| 3632: 06 43 52 45 41 54 45 20 54 41 42 4c 45 20 27 74 .CREATE TABLE 't +| 3648: 31 5f 64 6f 63 73 69 7a 65 27 28 69 64 20 49 4e 1_docsize'(id IN +| 3664: 54 45 47 45 52 20 50 52 49 4d 41 52 59 20 4b 45 TEGER PRIMARY KE +| 3680: 59 2c 20 73 7a 20 42 4c 4f 42 29 5e 05 07 17 21 Y, sz BLOB)^...! +| 3696: 21 01 81 07 74 61 62 6c 65 74 31 5f 63 6f 6e 74 !...tablet1_cont +| 3712: 65 6e 74 74 31 5f 63 6f 6e 74 65 6e 74 05 43 52 entt1_content.CR +| 3728: 45 41 54 45 20 54 41 42 4c 45 20 27 74 31 5f 63 EATE TABLE 't1_c +| 3744: 6f 6e 74 65 6e 74 27 28 69 64 20 49 4e 54 45 47 ontent'(id INTEG +| 3760: 45 52 20 50 52 49 4d 41 52 59 20 4b 45 59 2c 20 ER PRIMARY KEY, +| 3776: 63 30 2c 20 63 31 2c 20 63 32 29 69 04 07 17 19 c0, c1, c2)i.... +| 3792: 19 01 81 2d 74 61 62 6c 65 74 31 5f 69 64 78 74 ...-tablet1_idxt +| 3808: 31 5f 69 64 78 04 43 52 45 41 54 45 20 54 41 42 1_idx.CREATE TAB +| 3824: 4c 45 20 27 74 31 5f 69 64 78 27 28 73 65 67 69 LE 't1_idx'(segi +| 3840: 64 2c 20 74 65 72 6d 2c 20 70 67 6e 6f 2c 20 50 d, term, pgno, P +| 3856: 52 49 4d 41 52 59 20 4b 45 59 28 73 65 67 69 64 RIMARY KEY(segid +| 3872: 2c 20 74 65 72 6d 29 29 20 57 49 54 48 4f 55 54 , term)) WITHOUT +| 3888: 20 52 4f 57 49 44 55 03 07 17 1b 1b 01 81 01 74 ROWIDU........t +| 3904: 61 62 6c 65 74 31 5f 64 61 74 61 74 31 5f 64 61 ablet1_datat1_da +| 3920: 74 61 03 43 52 45 41 54 45 20 54 41 42 4c 45 20 ta.CREATE TABLE +| 3936: 27 74 31 5f 64 61 74 61 27 28 69 64 20 49 4e 54 't1_data'(id INT +| 3952: 45 47 45 52 20 50 52 49 4d 41 52 59 20 4b 45 59 EGER PRIMARY KEY +| 3968: 2c 20 62 6c 6f 63 6b 20 42 4c 4f 42 29 38 02 06 , block BLOB)8.. +| 3984: 17 11 11 08 5f 74 61 62 6c 65 74 31 74 31 43 52 ...._tablet1t1CR +| 4000: 45 41 54 45 20 56 49 52 54 55 41 4c 20 54 41 42 EATE VIRTUAL TAB +| 4016: 4c 45 20 74 31 20 55 53 49 4e 47 20 66 74 73 35 LE t1 USING fts5 +| 4032: 28 61 2c 62 2c 63 29 00 00 00 00 00 00 00 00 00 (a,b,c)......... +| page 3 offset 8192 +| 0: 0d 00 00 00 03 0c 94 00 0f e6 0f ef 0c 94 00 00 ................ +| 3216: 00 00 00 00 86 4a 84 80 80 80 80 01 04 00 8d 18 .....J.......... +| 3232: 00 00 03 2b 02 30 30 01 02 06 01 02 06 01 02 06 ...+.00......... +| 3248: 1f 02 03 01 02 03 01 02 03 01 08 32 30 31 36 30 ...........20160 +| 3264: 36 30 39 01 02 07 01 02 07 01 02 07 01 01 34 01 609...........4. +| 3280: 02 05 01 02 05 01 02 05 01 01 35 01 02 04 01 02 ..........5..... +| 3296: 04 01 02 04 02 07 30 30 30 30 30 30 30 1c 02 04 ......0000000... +| 3312: 01 02 04 01 02 04 01 06 62 69 6e 61 72 79 03 06 ........binary.. +| 3328: 01 02 02 03 06 01 02 02 03 06 01 02 02 03 06 01 ................ +| 3344: 02 02 03 06 01 02 02 03 06 01 02 02 03 06 01 02 ................ +| 3360: 02 03 06 01 02 02 03 06 01 02 02 03 06 01 02 02 ................ +| 3376: 03 06 01 02 02 03 06 01 02 02 01 08 63 6f 6d 70 ............comp +| 3392: 69 6c 65 72 01 02 02 01 02 02 01 02 02 01 06 64 iler...........d +| 3408: 62 73 74 61 74 07 02 03 01 02 03 01 02 03 02 04 bstat........... +| 3424: 65 62 75 67 04 02 02 01 02 02 01 02 02 01 06 65 ebug...........e +| 3440: 6e 61 62 6c 65 07 02 02 01 02 02 01 02 02 01 02 nable........... +| 3456: 02 01 02 02 01 02 02 01 02 02 01 02 02 01 02 02 ................ +| 3472: 01 02 02 01 02 02 01 02 02 01 02 02 01 02 02 01 ................ +| 3488: 02 02 01 02 02 01 02 02 01 02 02 01 02 02 01 02 ................ +| 3504: 02 01 02 02 02 08 78 74 65 6e 73 69 6f 6e 1f 02 ......xtension.. +| 3520: 04 01 02 04 01 02 04 01 04 66 74 73 34 0a 02 03 .........fts4... +| 3536: 01 02 03 01 02 03 04 01 35 0d 02 03 01 02 03 01 ........5....... +| 3552: 02 03 01 03 67 63 63 01 02 03 01 02 03 01 02 03 ....gcc......... +| 3568: 02 06 65 6f 70 6f 6c 79 10 02 03 01 02 03 01 02 ..eopoly........ +| 3584: 03 01 05 6a 73 6f 6e 31 13 02 03 01 02 03 01 02 ...json1........ +| 3600: 03 01 04 6c 6f 61 64 1f 02 03 01 02 03 01 02 03 ...load......... +| 3616: 01 03 6d 61 78 1c 02 02 01 02 02 01 02 02 02 05 ..max........... +| 3632: 65 6d 6f 72 79 1c 02 03 01 02 03 01 02 03 04 04 emory........... +| 3648: 73 79 73 35 16 02 03 01 02 03 01 02 03 01 06 6e sys5...........n +| 3664: 6f 63 61 73 65 02 06 01 02 02 03 06 01 02 02 03 ocase........... +| 3680: 06 01 02 02 03 06 01 02 02 03 06 01 02 02 03 06 ................ +| 3696: 01 02 02 03 06 01 02 02 03 06 01 02 02 03 06 01 ................ +| 3712: 02 02 03 06 01 02 02 03 06 01 02 02 03 06 01 02 ................ +| 3728: 02 01 04 6f 6d 69 74 1f 02 02 01 02 02 01 02 02 ...omit......... +| 3744: 01 05 72 74 72 65 65 19 02 03 01 02 03 01 02 03 ..rtree......... +| 3760: 04 02 69 6d 01 06 01 02 02 03 06 01 02 02 03 06 ..im............ +| 3776: 01 02 02 03 06 01 02 02 03 06 01 02 02 03 06 01 ................ +| 3792: 02 02 03 06 01 02 02 03 06 01 02 02 03 06 01 02 ................ +| 3808: 02 03 06 01 02 02 03 06 01 02 02 03 06 01 02 02 ................ +| 3824: 01 0a 74 68 72 65 61 64 73 61 66 65 03 57 34 56 ..threadsafe.W4V +| 3840: 94 64 91 46 85 84 04 76 74 61 62 07 02 04 01 02 .d.F...vtab..... +| 3856: 04 01 02 04 01 01 78 01 06 01 01 02 01 06 01 01 ......x......... +| 3872: 02 01 06 01 01 02 01 06 01 01 02 01 06 01 10 02 ................ +| 3888: 01 06 01 01 02 01 06 01 01 02 01 06 01 01 02 01 ................ +| 3904: 06 01 01 02 01 06 01 01 02 01 06 01 01 02 01 06 ................ +| 3920: 01 01 02 01 06 01 01 02 01 06 01 01 02 01 06 01 ................ +| 3936: 01 02 01 06 01 01 10 01 06 01 01 02 01 06 01 01 ................ +| 3952: 02 01 06 01 01 02 01 06 01 01 02 01 06 01 01 02 ................ +| 3968: 01 06 01 01 02 01 06 01 01 02 01 06 01 01 02 01 ................ +| 3984: 06 01 01 02 01 06 01 01 02 01 06 01 01 02 01 06 ................ +| 4000: 01 01 02 01 06 01 01 02 01 06 01 01 02 01 06 01 ................ +| 4016: 01 02 01 06 01 01 02 01 06 01 01 02 01 06 01 01 ................ +| 4032: 02 01 06 01 01 02 01 06 01 01 02 04 15 13 0c 0c ................ +| 4048: 12 44 13 11 0f 47 13 0f 0c 0e 11 10 0f 0e 10 0f .D...G.......... +| 4064: 44 0f 10 40 15 0f 07 01 03 00 14 24 5a 24 24 0f D..@.......$Z$$. +| 4080: 0a 03 00 24 00 00 00 00 01 01 01 00 01 01 01 01 ...$............ +| page 4 offset 12288 +| 0: 0a 00 00 00 01 0f fa 00 00 00 00 00 00 00 00 00 ................ +| 4080: 00 00 00 00 00 00 00 00 00 00 05 04 09 0c 01 02 ................ +| page 5 offset 16384 +| 0: 0d 00 00 00 24 0c 0a 00 0f d8 0f af 0f 86 0f 74 ....$..........t +| 16: 0f 61 0f 4e 0f 2f 0f 0f 0e ef 0e d7 0e be 0e a5 .a.N./.......... +| 32: 0e 8d 0e 74 0e 5b 0e 40 0e 24 0e 08 0d ef 0d d5 ...t.[.@.$...... +| 48: 0d bb 0d a0 0d 84 0d 68 0d 4f 0d 35 0d 1b 0c fb .......h.O.5.... +| 64: 0c da 0c b9 0c 99 0c 78 0c 57 0c 3e 0c 24 0c 0a .......x.W.>.$.. +| 3072: 00 00 00 00 00 00 00 00 00 00 18 24 05 00 25 0f ...........$..%. +| 3088: 19 54 48 52 45 41 44 53 41 46 45 3d 30 58 42 49 .THREADSAFE=0XBI +| 3104: 4e 41 52 59 18 23 05 00 25 0f 19 54 48 52 45 41 NARY.#..%..THREA +| 3120: 44 53 41 46 45 3d 30 58 4e 4f 43 41 53 45 17 22 DSAFE=0XNOCASE.. +| 3136: 05 00 25 0f 17 54 48 52 45 41 44 53 31 46 45 3d ..%..THREADS1FE= +| 3152: 30 58 52 64 52 49 4d 1f 21 05 00 33 0f 19 4f 4d 0XRdRIM.!..3..OM +| 3168: 49 54 20 4c 4f 41 44 20 45 58 54 45 4e 53 49 4f IT LOAD EXTENSIO +| 3184: 4e 58 42 49 4e 41 52 59 1f 20 05 00 33 0f 19 4f NXBINARY. ..3..O +| 3200: 4d 49 54 20 4c 4f 41 44 20 45 58 54 45 4e 53 49 MIT LOAD EXTENSI +| 3216: 4f 4e 58 4e 4f 43 41 53 45 1e 1f 05 00 33 0f 17 ONXNOCASE....3.. +| 3232: 4f 4d 49 54 20 4c 4f 41 44 20 45 58 54 45 4e 53 OMIT LOAD EXTENS +| 3248: 49 4f 4e 58 52 54 52 49 4d 1f 1e 05 00 33 0f 19 IONXRTRIM....3.. +| 3264: 4d 41 58 20 4d 45 4d 4f 52 59 3d 35 30 30 30 30 MAX MEMORY=50000 +| 3280: 30 30 30 58 42 49 4e 41 52 59 1f 1d 05 00 33 0f 000XBINARY....3. +| 3296: 19 4d 41 58 20 4d 45 4d 4f 52 59 3d 35 30 30 30 .MAX MEMORY=5000 +| 3312: 30 30 30 30 58 4e 4f 43 41 53 45 1e 1c 05 00 33 0000XNOCASE....3 +| 3328: 0f 17 4d 41 58 20 4d 45 4d 4f 52 59 3d 35 30 30 ..MAX MEMORY=500 +| 3344: 30 30 30 30 30 58 52 54 52 49 4d 18 1b 05 00 25 00000XRTRIM....% +| 3360: 0f 19 45 4e 41 42 4c 45 20 52 54 52 45 45 58 42 ..ENABLE RTREEXB +| 3376: 49 4e 41 52 59 18 1a 05 00 25 0f 19 45 4e 41 42 INARY....%..ENAB +| 3392: 4c 45 20 52 54 52 45 45 58 4e 4f 43 41 53 45 17 LE RTREEXNOCASE. +| 3408: 19 05 00 25 0f 17 45 4e 41 42 4c 45 20 52 54 52 ...%..ENABLE RTR +| 3424: 45 45 58 52 54 52 49 4d 1a 18 05 00 29 0f 19 45 EEXRTRIM....)..E +| 3440: 4e 41 42 4b 45 20 4d 45 4d 53 59 53 35 58 42 49 NABKE MEMSYS5XBI +| 3456: 4e 41 52 59 1a 17 05 00 29 0f 19 45 4e 41 42 4c NARY....)..ENABL +| 3472: 42 60 2d 45 4d 53 59 53 35 58 4e 4f 43 41 53 45 B`-EMSYS5XNOCASE +| 3488: 19 16 05 00 29 0f 17 45 4e 41 42 4c 45 20 4d 45 ....)..ENABLE ME +| 3504: 4d 53 59 53 35 58 52 54 52 49 4d 18 15 05 00 25 MSYS5XRTRIM....% +| 3520: 0f 19 45 4e 41 42 4c 45 20 4a 53 4f 4e 31 58 42 ..ENABLE JSON1XB +| 3536: 49 4e 41 52 59 18 14 05 00 25 0f 19 45 4e 41 42 INARY....%..ENAB +| 3552: 4c 45 20 4a 53 4f 4e 31 58 4e 4f 43 41 53 45 17 LE JSON1XNOCASE. +| 3568: 13 05 00 25 0f 17 45 4e 41 42 4c 45 20 4a 53 4f ...%..ENABLE JSO +| 3584: 4e 31 58 52 54 52 49 4d 1a 12 05 00 29 0f 19 45 N1XRTRIM....)..E +| 3600: 4e 41 42 4c 45 20 47 45 4f 50 4f 4c 59 58 42 49 NABLE GEOPOLYXBI +| 3616: 4e 41 52 59 1a 11 05 00 39 0f 19 45 4e 41 42 4c NARY....9..ENABL +| 3632: 45 20 47 45 4f 50 4f 4c 59 58 4e 4f 43 41 53 45 E GEOPOLYXNOCASE +| 3648: 19 10 05 00 29 0f 17 45 4e 41 42 4c 45 20 47 45 ....)..ENABLE GE +| 3664: 4f 50 4f 4c 59 58 52 54 52 49 4d 17 0f 05 00 23 OPOLYXRTRIM....# +| 3680: 0f 19 45 4e 41 42 4c 45 20 46 54 53 35 58 42 49 ..ENABLE FTS5XBI +| 3696: 4e 41 52 59 17 0e 05 00 23 0f 19 45 4e 41 42 4c NARY....#..ENABL +| 3712: 45 20 46 54 53 35 58 4e 4f 43 41 53 45 16 0d 05 E FTS5XNOCASE... +| 3728: 00 23 0f 17 45 4e 41 42 4c 45 20 46 54 53 35 58 .#..ENABLE FTS5X +| 3744: 52 54 52 49 4d 17 0c 05 00 23 0f 19 45 4e 41 42 RTRIM....#..ENAB +| 3760: 4c 45 20 46 54 53 34 58 42 49 4e 41 52 59 17 0b LE FTS4XBINARY.. +| 3776: 05 00 23 0f 19 45 4e 41 42 4c 45 20 46 54 53 34 ..#..ENABLE FTS4 +| 3792: 58 4e 4f 43 41 53 45 16 0a 05 00 23 0f 17 45 4e XNOCASE....#..EN +| 3808: 41 42 4c 45 20 46 54 53 34 58 52 54 52 49 4d 1e ABLE FTS4XRTRIM. +| 3824: 09 05 00 31 0f 19 45 4e 41 42 4c 45 20 44 42 53 ...1..ENABLE DBS +| 3840: 54 41 54 20 56 54 41 42 58 42 49 4e 41 52 59 1e TAT VTABXBINARY. +| 3856: 08 05 00 31 0f 19 45 4e 41 42 4c 45 20 44 42 53 ...1..ENABLE DBS +| 3872: 54 41 54 20 56 54 24 15 48 4e 4f 43 41 53 45 1d TAT VT$.HNOCASE. +| 3888: 07 05 00 31 0f 17 45 4e 41 42 4c 45 20 44 42 53 ...1..ENABLE DBS +| 3904: 54 41 54 20 56 54 41 42 58 52 54 52 49 4d 11 06 TAT VTABXRTRIM.. +| 3920: 05 00 17 0f 19 44 45 42 55 47 58 42 49 4e 41 52 .....DEBUGXBINAR +| 3936: 59 11 05 05 00 17 0f 19 44 45 42 55 47 58 4e 4f Y.......DEBUGXNO +| 3952: 43 41 53 45 10 04 05 00 17 0f 17 44 45 42 55 47 CASE.......DEBUG +| 3968: 58 52 54 52 49 4d 27 03 05 00 43 0f 19 43 4f 4d XRTRIM'...C..COM +| 3984: 50 49 4c 45 52 3d 67 63 63 2d 35 2e 34 2e 30 20 PILER=gcc-5.4.0 +| 4000: 32 30 31 36 30 36 30 39 58 42 49 4e 41 52 59 27 20160609XBINARY' +| 4016: 02 05 00 43 0f 19 43 4f 4d 50 49 4c 45 52 3c 67 ...C..COMPILER 10; +} {X'0000001A04306162630102020101620202020101640206030303040806'} + +do_execsql_test 2.2 { + UPDATE t1_data SET + block=X'0000001A04306162630102025501620202020101640206030303040806' + WHERE id>10 +} + +do_catchsql_test 2.3 { + DELETE FROM t1 WHERE rowid = 1 +} {/.*fts5: corrupt.*/} + +finish_test diff --git a/ext/fts5/test/fts5corrupt8.test b/ext/fts5/test/fts5corrupt8.test new file mode 100644 index 0000000000..471a1b0e39 --- /dev/null +++ b/ext/fts5/test/fts5corrupt8.test @@ -0,0 +1,147 @@ +# 2024 Aug 28 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# + +source [file join [file dirname [info script]] fts5_common.tcl] +set testprefix fts5corrupt8 + +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. +ifcapable !fts5 { + finish_test + return +} + +do_execsql_test 1.0 { + CREATE VIRTUAL TABLE t1 USING fts5(x); +} + +do_execsql_test 1.1 { + UPDATE t1_data SET block='hello world' WHERE id=10 +} + +db close +sqlite3 db test.db + +do_catchsql_test 1.2 { + SELECT * FROM t1 +} {1 {fts5: corrupt structure record for table "t1"}} +do_catchsql_test 1.3 { + DROP TABLE t1 +} {0 {}} +do_execsql_test 1.4 { + SELECT * FROM sqlite_schema +} + +do_execsql_test 2.0 { + CREATE VIRTUAL TABLE t1 USING fts5(x); +} +do_execsql_test 2.1 { + UPDATE t1_config SET v=555 WHERE k='version' +} +db close +sqlite3 db test.db +do_catchsql_test 2.2 { + SELECT * FROM t1 +} {1 {invalid fts5 file format (found 555, expected 4 or 5) - run 'rebuild'}} +do_catchsql_test 2.3 { + DROP TABLE t1 +} {1 {invalid fts5 file format (found 555, expected 4 or 5) - run 'rebuild'}} +do_test 2.4 { + sqlite3_fts5_drop_corrupt_table db main t1 +} {} +do_execsql_test 2.5 { + SELECT * FROM sqlite_schema +} + +do_execsql_test 3.0 { + CREATE VIRTUAL TABLE t1 USING fts5(x); +} +do_execsql_test 3.1 { + DELETE FROM t1_config; +} +db close +sqlite3 db test.db +do_catchsql_test 3.2 { + SELECT * FROM t1 +} {1 {invalid fts5 file format (found 0, expected 4 or 5) - run 'rebuild'}} +do_catchsql_test 3.3 { + DROP TABLE t1 +} {1 {invalid fts5 file format (found 0, expected 4 or 5) - run 'rebuild'}} + + +do_test 3.4 { + sqlite3_db_config db DEFENSIVE 1 +} {1} +do_test 3.5 { + sqlite3_fts5_drop_corrupt_table db main t1 +} {} +do_test 3.6 { + sqlite3_db_config db DEFENSIVE -1 +} {1} +do_execsql_test 3.7 { + SELECT * FROM sqlite_schema +} + +#------------------------------------------------------------------------- +reset_db + +proc hex_to_blob {hex} { + binary encode hex $hex +} +db func hex_to_blob hex_to_blob + +do_execsql_test 4.0 { + CREATE VIRTUAL TABLE x1 USING fts5(x, content='', contentless_delete=1); + BEGIN; + INSERT INTO x1(rowid, x) VALUES(1, 'a b c d e f g h'); + INSERT INTO x1(rowid, x) VALUES(2, 'a b c d e f g h'); + COMMIT; + DELETE FROM x1 WHERE rowid=1; +} + +do_execsql_test 4.1 { + SELECT hex(block) FROM x1_data WHERE id=10 +} { + 00000000FF00000101010200010101010101010102 +} + +do_execsql_test 4.2.1 { + UPDATE x1_data SET block= + X'00000000FF00000101010200010101010101819C9B95A8000102' + WHERE id=10; +} + +do_catchsql_test 4.2.2 { + SELECT * FROM x1('c d e'); +} {1 {out of memory}} + +do_execsql_test 4.3.1 { + UPDATE x1_data SET block= + X'00000000FF000001010102000101010101019282AFF9A0000102' + WHERE id=10; +} + +do_catchsql_test 4.3.2 { + SELECT * FROM x1('c d e'); +} {1 {out of memory}} + +do_execsql_test 4.4.1 { + UPDATE x1_data SET block= + X'00000000FF000001010102000101010101018181808080130102' + WHERE id=10; +} + +do_catchsql_test 4.3.2 { + SELECT * FROM x1('c d e'); +} {1 {out of memory}} + +finish_test + diff --git a/ext/fts5/test/fts5corruptbig.test b/ext/fts5/test/fts5corruptbig.test new file mode 100644 index 0000000000..6019f17eee --- /dev/null +++ b/ext/fts5/test/fts5corruptbig.test @@ -0,0 +1,53 @@ +# 2025 October 13 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This test is focused on really large position lists. Those that require +# 4 or 5 byte position-list size varints. Because of the amount of memory +# required, these tests only run on 64-bit platforms. +# + +source [file join [file dirname [info script]] fts5_common.tcl] +set testprefix fts5corruptbig + +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. +ifcapable !fts5 { + finish_test + return +} + +if { $tcl_platform(wordSize)<8 } { + finish_test + return +} + +if { $SQLITE_MAX_LENGTH!=0x7FFFFFFF } { + finish_test + return +} + +do_execsql_test 1.0 { + CREATE VIRTUAL TABLE t1 USING fts5(x); +} + +do_execsql_test 1.1 { + UPDATE t1_data SET block = zeroblob(2147483640) WHERE id=10; +} + +do_execsql_test 1.2 { + SELECT id, length(block) FROM t1_data +} {1 0 10 2147483640} + +do_catchsql_test 1.3 { + SELECT * FROM t1('abc') +} {1 {out of memory}} + +finish_test + diff --git a/ext/fts5/test/fts5delete.test b/ext/fts5/test/fts5delete.test index 1214fec4f4..024f89594c 100644 --- a/ext/fts5/test/fts5delete.test +++ b/ext/fts5/test/fts5delete.test @@ -113,5 +113,58 @@ do_execsql_test 3.4 { INSERT INTO tx(tx) VALUES('integrity-check'); } +#------------------------------------------------------------------------- +reset_db +do_execsql_test 4.0 { + CREATE VIRTUAL TABLE ft1 USING fts5(a, b UNINDEXED, + content='', contentless_unindexed=1 + ); + CREATE VIRTUAL TABLE ft2 USING fts5(a, b UNINDEXED, + content='', contentless_unindexed=1, contentless_delete=1 + ); + + INSERT INTO ft1(rowid, a, b) VALUES(1, 'one', 'i'); + INSERT INTO ft1(rowid, a, b) VALUES(2, 'two', 'ii'); + INSERT INTO ft1(rowid, a, b) VALUES(3, 'three', 'iii'); + INSERT INTO ft2(rowid, a, b) VALUES(1, 'one', 'i'); + INSERT INTO ft2(rowid, a, b) VALUES(2, 'two', 'ii'); + INSERT INTO ft2(rowid, a, b) VALUES(3, 'three', 'iii'); +} + +do_catchsql_test 4.1 { + DELETE FROM ft1 WHERE rowid=2 +} {1 {cannot DELETE from contentless fts5 table: ft1}} +do_catchsql_test 4.2 { + DELETE FROM ft2 WHERE rowid=2 +} {0 {}} + +do_catchsql_test 4.3 { + INSERT INTO ft1(ft1, rowid, a) VALUES('delete', 2, 'two'); +} {0 {}} +do_catchsql_test 4.2 { + INSERT INTO ft2(ft2, rowid, a) VALUES('delete', 2, 'two'); +} {1 {'delete' may not be used with a contentless_delete=1 table}} + +do_execsql_test 4.3 { + SELECT rowid, * FROM ft1; +} { + 1 {} i + 3 {} iii +} +do_execsql_test 4.4 { + SELECT rowid, * FROM ft2; +} { + 1 {} i + 3 {} iii +} + +do_execsql_test 4.5 { + SELECT * FROM ft1_content +} {1 i 3 iii} + +do_execsql_test 4.6 { + SELECT * FROM ft2_content +} {1 i 3 iii} + finish_test diff --git a/ext/fts5/test/fts5dlidx.test b/ext/fts5/test/fts5dlidx.test index 1fb95a9004..db82db1c2b 100644 --- a/ext/fts5/test/fts5dlidx.test +++ b/ext/fts5/test/fts5dlidx.test @@ -15,7 +15,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5dlidx -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return diff --git a/ext/fts5/test/fts5doclist.test b/ext/fts5/test/fts5doclist.test index 08b773f6f5..5b1becb514 100644 --- a/ext/fts5/test/fts5doclist.test +++ b/ext/fts5/test/fts5doclist.test @@ -15,7 +15,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5doclist -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return diff --git a/ext/fts5/test/fts5ea.test b/ext/fts5/test/fts5ea.test index 3ccbd7d7a2..49c2f2753a 100644 --- a/ext/fts5/test/fts5ea.test +++ b/ext/fts5/test/fts5ea.test @@ -16,7 +16,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5ea -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return diff --git a/ext/fts5/test/fts5eb.test b/ext/fts5/test/fts5eb.test index 9d6f251ed1..bee9683c3c 100644 --- a/ext/fts5/test/fts5eb.test +++ b/ext/fts5/test/fts5eb.test @@ -13,7 +13,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5eb -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return @@ -83,18 +83,21 @@ for {set i 0} {$i < 255} {incr i} { do_execsql_test 3.0 { CREATE VIRTUAL TABLE e1 USING fts5(text, tokenize = 'porter unicode61'); - INSERT INTO e1 VALUES ("just a few words with a / inside"); + INSERT INTO e1 VALUES ('just a few words with a / inside'); } do_execsql_test 3.1 { - SELECT rowid, bm25(e1) FROM e1 WHERE e1 MATCH '"just"' ORDER BY rank; + SELECT rowid, format('%g',bm25(e1)) FROM e1 WHERE e1 MATCH '"just"' ORDER BY rank; } {1 -1e-06} do_execsql_test 3.2 { SELECT rowid FROM e1 WHERE e1 MATCH '"/" OR "just"' } 1 do_execsql_test 3.3 { - SELECT rowid, bm25(e1) FROM e1 WHERE e1 MATCH '"/" OR "just"' ORDER BY rank; + SELECT rowid, format('%g',bm25(e1)) FROM e1 WHERE e1 MATCH '"/" OR "just"' ORDER BY rank; } {1 -1e-06} +do_execsql_test 3.4 " + SELECT fts5_expr_tcl('e AND \" \"'); +" {{AND [nearset -- {e}] [{}]}} finish_test diff --git a/ext/fts5/test/fts5expr.test b/ext/fts5/test/fts5expr.test new file mode 100644 index 0000000000..49be61d9c4 --- /dev/null +++ b/ext/fts5/test/fts5expr.test @@ -0,0 +1,52 @@ +# 2024 August 8 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The +# focus of this script is testing the FTS5 module. +# + +source [file join [file dirname [info script]] fts5_common.tcl] +set testprefix fts5expr + +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. +ifcapable !fts5 { + finish_test + return +} + +do_execsql_test 1.0 { + CREATE VIRTUAL TABLE x1 USING fts5(a); + INSERT INTO x1(rowid, a) VALUES (113, 'fts5 expr test'); +} + +do_execsql_test 1.1 { + SELECT rowid FROM x1('expr'); +} {113} + +for {set ii 0} {$ii < 300} {incr ii} { + set expr "expr " + append expr [string repeat "NOT abcd " $ii] + + if {$ii<257} { + set res {0 113} + } else { + set res {1 {fts5 expression tree is too large (maximum depth 256)}} + } + do_catchsql_test 1.1.$ii { + SELECT rowid FROM x1($expr) + } $res +} + +do_execsql_test 1.2 { + SELECT rowid FROM x1 WHERE a MATCH '"..."' +} {} + +finish_test + diff --git a/ext/fts5/test/fts5fault4.test b/ext/fts5/test/fts5fault4.test index 877e0228ad..2b4f6c4d2a 100644 --- a/ext/fts5/test/fts5fault4.test +++ b/ext/fts5/test/fts5fault4.test @@ -16,12 +16,16 @@ source [file join [file dirname [info script]] fts5_common.tcl] source $testdir/malloc_common.tcl set testprefix fts5fault4 -# If SQLITE_ENABLE_FTS3 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return } +set ::TMPDBERROR [list 1 \ + {unable to open a temporary database file for storing temporary tables} +] + #------------------------------------------------------------------------- # An OOM while dropping an fts5 table. # @@ -86,7 +90,7 @@ set ::res [db eval {SELECT rowid, x1 FROM x1 WHERE x1 MATCH '*reads'}] do_faultsim_test 4 -faults oom-* -body { db eval {SELECT rowid, x, x1 FROM x1 WHERE x1 MATCH '*reads'} } -test { - faultsim_test_result {0 {0 {} 3}} + faultsim_test_result {0 {0 {} 2}} } #------------------------------------------------------------------------- @@ -391,7 +395,7 @@ do_faultsim_test 14.1 -faults oom-t* -prep { } -body { db eval { ALTER TABLE "tbl one" RENAME TO "tbl two" } } -test { - faultsim_test_result {0 {}} + faultsim_test_result {0 {}} $::TMPDBERROR } finish_test diff --git a/ext/fts5/test/fts5fault6.test b/ext/fts5/test/fts5fault6.test index a39063a356..1aacddce9f 100644 --- a/ext/fts5/test/fts5fault6.test +++ b/ext/fts5/test/fts5fault6.test @@ -16,7 +16,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] source $testdir/malloc_common.tcl set testprefix fts5fault6 -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return diff --git a/ext/fts5/test/fts5fault8.test b/ext/fts5/test/fts5fault8.test index 5afab77541..dc060a1592 100644 --- a/ext/fts5/test/fts5fault8.test +++ b/ext/fts5/test/fts5fault8.test @@ -57,7 +57,6 @@ foreach_detail_mode $testprefix { } ;# foreach_detail_mode... - do_execsql_test 4.0 { CREATE VIRTUAL TABLE x2 USING fts5(a); INSERT INTO x2(x2, rank) VALUES('crisismerge', 2); @@ -80,5 +79,18 @@ do_faultsim_test 4 -faults oom-* -prep { faultsim_test_result {0 {}} {1 SQLITE_NOMEM} } +set TMPDBERROR {1 {unable to open a temporary database file for storing temporary tables}} + +do_faultsim_test 5 -faults oom-t* -prep { + faultsim_restore_and_reopen + execsql { PRAGMA temp_store = memory } +} -body { + execsql { PRAGMA integrity_check } +} -test { + if {[string match {*error code=7*} $testresult]==0} { + faultsim_test_result {0 ok} {1 SQLITE_NOMEM} $::TMPDBERROR + } +} + finish_test diff --git a/ext/fts5/test/fts5faultF.test b/ext/fts5/test/fts5faultF.test new file mode 100644 index 0000000000..96cc2b083f --- /dev/null +++ b/ext/fts5/test/fts5faultF.test @@ -0,0 +1,111 @@ +# 2023 July 20 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# +# This file is focused on OOM errors. Particularly those that may occur +# when using contentless_delete=1 databases. +# + +source [file join [file dirname [info script]] fts5_common.tcl] +source $testdir/malloc_common.tcl +set testprefix fts5faultF + +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. +ifcapable !fts5 { + finish_test + return +} + +faultsim_save_and_close +do_faultsim_test 1 -prep { + faultsim_restore_and_reopen +} -body { + execsql { + CREATE VIRTUAL TABLE t1 USING fts5(x, y, content=, contentless_delete=1) + } +} -test { + faultsim_test_result {0 {}} {1 {vtable constructor failed: t1}} +} + +reset_db +do_execsql_test 2.0 { + CREATE VIRTUAL TABLE t1 USING fts5(doc, content=, contentless_delete=1); + BEGIN; + INSERT INTO t1(rowid, doc) VALUES(1, 'a b c d'); + INSERT INTO t1(rowid, doc) VALUES(2, 'a b c d'); + INSERT INTO t1(rowid, doc) VALUES(3, 'a b c d'); + INSERT INTO t1(rowid, doc) VALUES(4, 'a b c d'); + COMMIT; + DELETE FROM t1 WHERE rowid IN (2, 4); +} + +do_faultsim_test 2 -prep { + sqlite3 db test.db + execsql { SELECT rowid FROM t1 } +} -body { + execsql { + SELECT rowid FROM t1('b c'); + } +} -test { + faultsim_test_result {0 {1 3}} +} + +#------------------------------------------------------------------------- +reset_db +do_execsql_test 3.0 { + CREATE VIRTUAL TABLE t1 USING fts5(doc, content=, contentless_delete=1); + BEGIN; + INSERT INTO t1(rowid, doc) VALUES(1, 'a b c d'); + INSERT INTO t1(rowid, doc) VALUES(2, 'a b c d'); + INSERT INTO t1(rowid, doc) VALUES(3, 'a b c d'); + INSERT INTO t1(rowid, doc) VALUES(4, 'a b c d'); + COMMIT; +} + +faultsim_save_and_close +do_faultsim_test 3 -prep { + faultsim_restore_and_reopen + execsql { SELECT rowid FROM t1 } +} -body { + execsql { + INSERT INTO t1(rowid, doc) VALUES(5, 'a b c d'); + } +} -test { + faultsim_test_result {0 {}} +} + +#------------------------------------------------------------------------- +reset_db +do_execsql_test 4.0 { + CREATE VIRTUAL TABLE t1 USING fts5(doc, content=, contentless_delete=1); + INSERT INTO t1(t1, rank) VALUES('pgsz', 64); + WITH s(i) AS ( + SELECT 1 UNION ALL SELECT i+1 FROM s WHERE i<1000 + ) + INSERT INTO t1(rowid, doc) SELECT i, 'a b c d' FROM s; +} + +do_execsql_test 4.1 { DELETE FROM t1 WHERE rowid <= 25 } + +faultsim_save_and_close +do_faultsim_test 4 -faults oom-t* -prep { + faultsim_restore_and_reopen + execsql { SELECT rowid FROM t1 } +} -body { + execsql { + DELETE FROM t1 WHERE rowid < 100 + } +} -test { + faultsim_test_result {0 {}} +} + + +finish_test + diff --git a/ext/fts5/test/fts5faultG.test b/ext/fts5/test/fts5faultG.test new file mode 100644 index 0000000000..9110c6336d --- /dev/null +++ b/ext/fts5/test/fts5faultG.test @@ -0,0 +1,76 @@ +# 2010 June 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# + +source [file join [file dirname [info script]] fts5_common.tcl] +source $testdir/malloc_common.tcl +set testprefix fts5faultG + +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. +ifcapable !fts5 { + finish_test + return +} + +set ::testprefix fts5faultG + + +do_execsql_test 1.0 { + CREATE VIRTUAL TABLE t1 USING fts5(a); + INSERT INTO t1 VALUES('test renaming the table'); + INSERT INTO t1 VALUES(' after it has been written'); + INSERT INTO t1 VALUES(' actually other stuff instead'); +} +faultsim_save_and_close +do_faultsim_test 1 -faults oom* -prep { + faultsim_restore_and_reopen + execsql { + BEGIN; + DELETE FROM t1 WHERE rowid=2; + } +} -body { + execsql { + DELETE FROM t1; + } +} -test { + catchsql { COMMIT } + faultsim_integrity_check + faultsim_test_result {0 {}} +} + +reset_db +do_execsql_test 2.0 { + CREATE VIRTUAL TABLE t1 USING fts5(a, content=, contentless_delete=1); + BEGIN; + INSERT INTO t1 VALUES('here''s some text'); + INSERT INTO t1 VALUES('useful stuff, text'); + INSERT INTO t1 VALUES('what would we do without text!'); + COMMIT; +} +faultsim_save_and_close +do_faultsim_test 2 -faults oom* -prep { + faultsim_restore_and_reopen + execsql { + BEGIN; + DELETE FROM t1 WHERE rowid=2; + } +} -body { + execsql { + INSERT INTO t1(t1) VALUES('optimize'); + } +} -test { + faultsim_integrity_check + faultsim_test_result {0 {}} +} + + + +finish_test diff --git a/ext/fts5/test/fts5faultH.test b/ext/fts5/test/fts5faultH.test new file mode 100644 index 0000000000..0cbbf7f5ef --- /dev/null +++ b/ext/fts5/test/fts5faultH.test @@ -0,0 +1,150 @@ +# 2010 June 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# + +source [file join [file dirname [info script]] fts5_common.tcl] +source $testdir/malloc_common.tcl +set testprefix fts5faultG + +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. +ifcapable !fts5 { + finish_test + return +} + +set ::testprefix fts5faultH + +sqlite3_fts5_register_origintext db + +do_execsql_test 1.0 { + CREATE VIRTUAL TABLE t1 USING fts5( + x, tokenize="origintext unicode61", tokendata=1 + ); + + BEGIN; + INSERT INTO t1 VALUES('oNe tWo thRee'); + INSERT INTO t1 VALUES('One Two Three'); + INSERT INTO t1 VALUES('onE twO threE'); + COMMIT; + BEGIN; + INSERT INTO t1 VALUES('one two three'); + INSERT INTO t1 VALUES('one two three'); + INSERT INTO t1 VALUES('one two three'); + COMMIT; +} + +do_faultsim_test 1 -faults oom* -prep { +} -body { + execsql { + SELECT rowid FROM t1('three'); + } +} -test { + faultsim_integrity_check + faultsim_test_result {0 {1 2 3 4 5 6}} +} + + +reset_db +sqlite3_fts5_register_origintext db +do_execsql_test 2.0 { + CREATE VIRTUAL TABLE t1 USING fts5( + x, tokenize="origintext unicode61", tokendata=1 + ); + INSERT INTO t1(t1, rank) VALUES('pgsz', 64); + + BEGIN; + INSERT INTO t1(rowid, x) VALUES(10, 'aaa bbb BBB'); + INSERT INTO t1(rowid, x) VALUES(12, 'bbb bbb bbb'); + INSERT INTO t1(rowid, x) VALUES(13, 'bbb bbb bbb'); + INSERT INTO t1(rowid, x) VALUES(14, 'bbb BBB bbb'); + INSERT INTO t1(rowid, x) VALUES(15, 'bbb bbb bbb'); + INSERT INTO t1(rowid, x) VALUES(16, 'bbb bbb bbb'); + INSERT INTO t1(rowid, x) VALUES(17, 'bbb bbb bbb'); + INSERT INTO t1(rowid, x) VALUES(18, 'bbb bbb bbb'); + INSERT INTO t1(rowid, x) VALUES(19, 'bbb bbb bbb'); + INSERT INTO t1(rowid, x) VALUES(20, 'bbb bbb bbb'); + INSERT INTO t1(rowid, x) VALUES(21, 'bbb bbb bbb'); + INSERT INTO t1(rowid, x) VALUES(22, 'bbb bbb bbb'); + INSERT INTO t1(rowid, x) VALUES(23, 'bbb bbb bbb'); + INSERT INTO t1(rowid, x) VALUES(24, 'aaa bbb BBB'); + COMMIT; +} + +do_faultsim_test 2 -faults oom* -prep { +} -body { + execsql { + SELECT rowid FROM t1('BBB AND AAA'); + } +} -test { + faultsim_integrity_check + faultsim_test_result {0 {10 24}} +} + +reset_db +sqlite3_fts5_register_origintext db +do_execsql_test 3.0 { + CREATE VIRTUAL TABLE t1 USING fts5( + x, tokenize="origintext unicode61", tokendata=1 + ); + INSERT INTO t1(t1, rank) VALUES('pgsz', 64); + + INSERT INTO t1(rowid, x) VALUES(9, 'bbb Bbb BBB'); + BEGIN; + INSERT INTO t1(rowid, x) VALUES(10, 'aaa bbb BBB'); + INSERT INTO t1(rowid, x) VALUES(11, 'bbb Bbb BBB'); + INSERT INTO t1(rowid, x) VALUES(12, 'bbb Bbb BBB'); + INSERT INTO t1(rowid, x) VALUES(13, 'bbb Bbb BBB'); + INSERT INTO t1(rowid, x) VALUES(14, 'bbb Bbb BBB'); + INSERT INTO t1(rowid, x) VALUES(15, 'bbb Bbb BBB'); + INSERT INTO t1(rowid, x) VALUES(16, 'bbb Bbb BBB'); + INSERT INTO t1(rowid, x) VALUES(17, 'bbb Bbb BBB'); + INSERT INTO t1(rowid, x) VALUES(18, 'bbb Bbb BBB'); + INSERT INTO t1(rowid, x) VALUES(19, 'bbb Bbb BBB'); + INSERT INTO t1(rowid, x) VALUES(20, 'bbb Bbb BBB'); + INSERT INTO t1(rowid, x) VALUES(21, 'bbb Bbb BBB'); + INSERT INTO t1(rowid, x) VALUES(22, 'bbb Bbb BBB'); + INSERT INTO t1(rowid, x) VALUES(23, 'bbb Bbb BBB'); + INSERT INTO t1(rowid, x) VALUES(24, 'bbb Bbb BBB'); + INSERT INTO t1(rowid, x) VALUES(25, 'bbb Bbb BBB'); + INSERT INTO t1(rowid, x) VALUES(26, 'bbb Bbb BBB'); + INSERT INTO t1(rowid, x) VALUES(27, 'bbb Bbb BBB'); + INSERT INTO t1(rowid, x) VALUES(28, 'bbb Bbb BBB'); + INSERT INTO t1(rowid, x) VALUES(29, 'bbb Bbb BBB'); + INSERT INTO t1(rowid, x) VALUES(30, 'bbb Bbb BBB'); + INSERT INTO t1(rowid, x) VALUES(31, 'bbb Bbb BBB'); + INSERT INTO t1(rowid, x) VALUES(32, 'bbb Bbb BBB'); + INSERT INTO t1(rowid, x) VALUES(33, 'bbb Bbb BBB'); + INSERT INTO t1(rowid, x) VALUES(34, 'bbb Bbb BBB'); + INSERT INTO t1(rowid, x) VALUES(35, 'aaa bbb BBB'); + COMMIT; +} + +do_faultsim_test 3.1 -faults oom* -prep { +} -body { + execsql { + SELECT rowid FROM t1('BBB AND AAA'); + } +} -test { + faultsim_integrity_check + faultsim_test_result {0 {10 35}} +} +do_faultsim_test 3.2 -faults oom* -prep { +} -body { + execsql { + SELECT count(*) FROM t1('BBB'); + } +} -test { + faultsim_integrity_check + faultsim_test_result {0 27} +} + + +finish_test diff --git a/ext/fts5/test/fts5faultI.test b/ext/fts5/test/fts5faultI.test new file mode 100644 index 0000000000..a2b04af8f5 --- /dev/null +++ b/ext/fts5/test/fts5faultI.test @@ -0,0 +1,349 @@ +# 2010 June 15 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# + +source [file join [file dirname [info script]] fts5_common.tcl] +source $testdir/malloc_common.tcl +set testprefix fts5faultI + +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. +ifcapable !fts5 { + finish_test + return +} + +set ::testprefix fts5faultI + +do_execsql_test 1.0 { + PRAGMA encoding = utf16; + CREATE VIRTUAL TABLE t1 USING fts5(x, locale=1); + INSERT INTO t1 VALUES('origintext unicode61 ascii porter trigram'); +} + +faultsim_save_and_close +faultsim_restore_and_reopen + +do_faultsim_test 1 -faults oom* -prep { +} -body { + execsql { + SELECT rowid FROM t1(fts5_locale('en_US', 'origintext')); + } +} -test { + faultsim_test_result {0 1} +} + +do_faultsim_test 2 -faults oom* -prep { + faultsim_restore_and_reopen + execsql { + SELECT * FROM t1('ascii'); + } +} -body { + execsql { + UPDATE t1 SET rowid=rowid+1; + } +} -test { + faultsim_test_result {0 {}} +} + +fts5_aux_test_functions db +do_faultsim_test 3 -faults oom* -prep { +} -body { + execsql { + SELECT fts5_columnlocale(t1, 0) FROM t1('unicode*'); + } +} -test { + faultsim_test_result {0 {{}}} {1 SQLITE_NOMEM} +} + +#------------------------------------------------------------------------- +reset_db +do_execsql_test 4.0 { + CREATE VIRTUAL TABLE w1 USING fts5(a); +} +faultsim_save_and_close + +do_faultsim_test 4 -faults oom* -prep { + faultsim_restore_and_reopen + execsql { + BEGIN; + INSERT INTO w1 VALUES('token token token'); + } +} -body { + execsql { + INSERT INTO w1(w1, rank) VALUES('rank', 'bm25()'); + } +} -test { + faultsim_test_result {0 {}} +} + +do_faultsim_test 5 -faults oom* -prep { + faultsim_restore_and_reopen + execsql { + BEGIN; + INSERT INTO w1 VALUES('one'); + SAVEPOINT one; + INSERT INTO w1 VALUES('two'); + ROLLBACK TO one; + } + +} -body { + execsql { + INSERT INTO w1 VALUES('string'); + } +} -test { + faultsim_test_result {0 {}} +} + +#------------------------------------------------------------------------- +reset_db +do_execsql_test 5.0 { + CREATE VIRTUAL TABLE w1 USING fts5(a); + INSERT INTO w1 VALUES('one two three'); +} +fts5_aux_test_functions db + +do_faultsim_test 5 -faults oom* -prep { +} -body { + execsql { + SELECT fts5_test_insttoken(w1, 0, 0) FROM w1('two'); + } +} -test { + faultsim_test_result {0 two} {1 SQLITE_NOMEM} +} + +#------------------------------------------------------------------------- +reset_db +do_execsql_test 6.0 { + CREATE VIRTUAL TABLE w1 USING fts5(a); + INSERT INTO w1 VALUES('one two three'); +} +fts5_aux_test_functions db +faultsim_save_and_close + +do_faultsim_test 6 -faults oom* -prep { + faultsim_restore_and_reopen + db eval { + BEGIN; + INSERT INTO w1 VALUES('four five six'); + SAVEPOINT abc; + INSERT INTO w1 VALUES('seven eight nine'); + SAVEPOINT def; + INSERT INTO w1 VALUES('ten eleven twelve'); + } +} -body { + execsql { + RELEASE abc; + } +} -test { + faultsim_test_result {0 {}} +} + +#------------------------------------------------------------------------- +reset_db +do_execsql_test 7.0 { + CREATE VIRTUAL TABLE w1 USING fts5(a); + INSERT INTO w1 VALUES('one two three'); + INSERT INTO w1 VALUES('three two one'); + DELETE FROM w1_content WHERE rowid=1; +} + +faultsim_save_and_close + +do_faultsim_test 7 -faults oom* -prep { + faultsim_restore_and_reopen + db eval { SELECT * FROM w1 } +} -body { + execsql { + PRAGMA integrity_check; + } +} -test { +} + +#------------------------------------------------------------------------- +reset_db +fts5_tclnum_register db +fts5_aux_test_functions db + +do_execsql_test 8.0 { + CREATE VIRTUAL TABLE ft USING fts5( + x, tokenize = "tclnum query", detail=columns + ); + INSERT INTO ft VALUES('one two three i ii iii'); + INSERT INTO ft VALUES('four five six iv v vi'); + INSERT INTO ft VALUES('eight nine ten viii ix x'); +} {} + +do_faultsim_test 8.1 -faults oom* -prep { +} -body { + execsql { + SELECT fts5_test_collist (ft) FROM ft('one two'); + } +} -test { + faultsim_test_result {0 {{0.0 1.0}}} {1 {SQL logic error}} {1 SQLITE_NOMEM} +} + +do_faultsim_test 8.2 -faults oom* -prep { +} -body { + execsql { + SELECT rowid FROM ft('one two') ORDER BY rank; + } +} -test { + faultsim_test_result {0 1} {1 {SQL logic error}} {1 SQLITE_NOMEM} +} + +#------------------------------------------------------------------------- +reset_db + +do_execsql_test 9.0 { + CREATE VIRTUAL TABLE ft USING fts5(x); + INSERT INTO ft VALUES('one two three i ii iii'); + INSERT INTO ft VALUES('four five six iv v vi'); + INSERT INTO ft VALUES('eight nine ten viii ix x'); +} {} + +faultsim_save_and_close + +do_faultsim_test 9.1 -faults oom* -prep { + faultsim_restore_and_reopen +} -body { + execsql { + UPDATE ft SET rowid=4 WHERE rowid=1 + } +} -test { + faultsim_test_result {0 {}} +} + +do_faultsim_test 9.2 -faults oom* -prep { + faultsim_restore_and_reopen +} -body { + execsql { + SELECT rowid FROM ft WHERE x MATCH 'one AND two AND three' + } +} -test { + faultsim_test_result {0 1} +} + +#------------------------------------------------------------------------- +reset_db + +do_execsql_test 10.0 { + CREATE VIRTUAL TABLE ft USING fts5(x, locale=1); + INSERT INTO ft VALUES(fts5_locale('hello', 'one two three i ii iii')); + INSERT INTO ft VALUES('four five six iv v vi'); + INSERT INTO ft VALUES('eight nine ten viii ix x'); +} {} + +do_execsql_test 10.1 { + SELECT fts5_get_locale(ft, 0) FROM ft WHERE x MATCH 'one AND two AND three' +} {hello} + +faultsim_save_and_close +do_faultsim_test 10.1 -faults oom* -prep { + faultsim_restore_and_reopen +} -body { + execsql { + SELECT fts5_get_locale(ft, 0) FROM ft WHERE x MATCH 'one AND two AND three' + } +} -test { + faultsim_test_result {0 hello} +} + +breakpoint +faultsim_save_and_close +do_faultsim_test 10.2 -faults oom-t* -prep { + faultsim_restore_and_reopen +} -body { + execsql { + INSERT INTO ft VALUES(zeroblob(10000)); + } +} -test { + faultsim_test_result {0 {}} +} + +#------------------------------------------------------------------------- +reset_db + +do_execsql_test 11.0 { + CREATE VIRTUAL TABLE f1 USING fts5(content); + CREATE TABLE g1(id, content); + INSERT INTO g1 VALUES(30000, 'a b c'); + INSERT INTO g1 VALUES(40000, 'd e f'); +} + +faultsim_save_and_close + +do_faultsim_test 11 -faults oom* -prep { + faultsim_restore_and_reopen +} -body { + execsql { + INSERT INTO f1(rowid, content) SELECT id, content FROM g1; + } +} -test { + faultsim_test_result {0 {}} +} + +#------------------------------------------------------------------------- +reset_db + +ifcapable foreignkey { + do_execsql_test 12.0 { + CREATE VIRTUAL TABLE f1 USING fts5(content); + CREATE TABLE p1(a INTEGER PRIMARY KEY); + CREATE TABLE c1(b REFERENCES p1 DEFERRABLE INITIALLY DEFERRED); + } + + faultsim_save_and_close + + do_faultsim_test 11 -faults oom* -prep { + faultsim_restore_and_reopen + execsql { + PRAGMA foreign_keys = 1; + BEGIN; + INSERT INTO c1 VALUES(123); + SAVEPOINT xyz; + } + } -body { + execsql { + INSERT INTO f1 VALUES('a b c'); + ROLLBACK TO xyz; + COMMIT; + } + } -test { + execsql { SELECT 123 } + faultsim_test_result \ + {1 {FOREIGN KEY constraint failed}} \ + {1 {out of memory}} \ + {1 {constraint failed}} + } +} + +#------------------------------------------------------------------------- +reset_db + +do_execsql_test 13.0 { + CREATE VIRTUAL TABLE t1 USING fts5(a, b); + INSERT INTO t1 VALUES('abc def', X'123456'); +} +faultsim_save_and_close + + +do_faultsim_test 13 -faults oom* -prep { + faultsim_restore_and_reopen +} -body { + execsql { + UPDATE t1 SET a='def abc' + } +} -test { + faultsim_test_result {0 {}} +} + +finish_test + diff --git a/ext/fts5/test/fts5first.test b/ext/fts5/test/fts5first.test index 357672de68..492681eed7 100644 --- a/ext/fts5/test/fts5first.test +++ b/ext/fts5/test/fts5first.test @@ -22,6 +22,7 @@ do_execsql_test 1.0 { CREATE VIRTUAL TABLE x1 USING fts5(a, b); } +unset -nocomplain res foreach {tn expr ok} { 1 {^abc} 1 2 {^abc + def} 1 diff --git a/ext/fts5/test/fts5full.test b/ext/fts5/test/fts5full.test index 751e874c3b..76fdc0288f 100644 --- a/ext/fts5/test/fts5full.test +++ b/ext/fts5/test/fts5full.test @@ -17,7 +17,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5full -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return diff --git a/ext/fts5/test/fts5hash.test b/ext/fts5/test/fts5hash.test index 5df55f226f..b3d8b562c8 100644 --- a/ext/fts5/test/fts5hash.test +++ b/ext/fts5/test/fts5hash.test @@ -15,7 +15,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5hash -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return diff --git a/ext/fts5/test/fts5integrity.test b/ext/fts5/test/fts5integrity.test index d922ad3b86..4bf120c446 100644 --- a/ext/fts5/test/fts5integrity.test +++ b/ext/fts5/test/fts5integrity.test @@ -15,7 +15,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5integrity -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return @@ -37,6 +37,12 @@ do_execsql_test 2.1 { INSERT INTO yy(yy) VALUES('integrity-check'); } +db close +sqlite3 db test.db +do_execsql_test 2.1 { + INSERT INTO yy(yy) VALUES('integrity-check'); +} + #-------------------------------------------------------------------- # do_execsql_test 3.0 { @@ -77,6 +83,9 @@ do_catchsql_test 4.2 { UPDATE aa_docsize SET sz = X'44' WHERE rowid = 3; INSERT INTO aa(aa) VALUES('integrity-check'); } {1 {database disk image is malformed}} +do_execsql_test 4.2.1 { + PRAGMA integrity_check(aa); +} {{malformed inverted index for FTS5 table main.aa}} do_catchsql_test 4.3 { ROLLBACK; @@ -150,6 +159,7 @@ do_execsql_test 5.3 { INSERT INTO gg(gg) VALUES('integrity-check'); } +unset -nocomplain res do_test 5.4.1 { set ok 0 for {set i 0} {$i < 10000} {incr i} { @@ -188,11 +198,11 @@ foreach {tn pgsz} { INSERT INTO hh(hh, rank) VALUES('pgsz', $pgsz); WITH s(i) AS (SELECT 0 UNION ALL SELECT i+1 FROM s WHERE i<999) - INSERT INTO hh SELECT printf("%.3d%.3d%.3d %.3d%.3d%.3d",i,i,i,i+1,i+1,i+1) + INSERT INTO hh SELECT printf('%.3d%.3d%.3d %.3d%.3d%.3d',i,i,i,i+1,i+1,i+1) FROM s; WITH s(i) AS (SELECT 0 UNION ALL SELECT i+1 FROM s WHERE i<999) - INSERT INTO hh SELECT printf("%.3d%.3d%.3d %.3d%.3d%.3d",i,i,i,i+1,i+1,i+1) + INSERT INTO hh SELECT printf('%.3d%.3d%.3d %.3d%.3d%.3d',i,i,i,i+1,i+1,i+1) FROM s; INSERT INTO hh(hh) VALUES('optimize'); @@ -317,4 +327,92 @@ do_catchsql_test 10.5.3 { INSERT INTO vt0(vt0) VALUES('integrity-check'); } {0 {}} +reset_db +proc slang {in} {return [string map {th d e eh} $in]} +db function slang -deterministic -innocuous slang +do_execsql_test 11.0 { + CREATE TABLE t1(a INTEGER PRIMARY KEY, b TEXT, c TEXT AS (slang(b))); + INSERT INTO t1(b) VALUES('the quick fox jumps over the lazy brown dog'); + SELECT c FROM t1; +} {{deh quick fox jumps ovehr deh lazy brown dog}} + +do_execsql_test 11.1 { + CREATE VIRTUAL TABLE t2 USING fts5(content="t1", c); + INSERT INTO t2(t2) VALUES('rebuild'); + SELECT rowid FROM t2 WHERE t2 MATCH 'deh'; +} {1} + +do_execsql_test 11.2 { + PRAGMA integrity_check(t2); +} {ok} +db close +sqlite3 db test.db + +# FIX ME? +# +# FTS5 integrity-check does not care if the content table is unreadable or +# does not exist. It only looks for internal inconsistencies in the +# inverted index. +# +do_execsql_test 11.3 { + PRAGMA integrity_check(t2); +} {ok} +do_execsql_test 11.4 { + DROP TABLE t1; + PRAGMA integrity_check(t2); +} {ok} + +#------------------------------------------------------------------- +reset_db + +do_execsql_test 12.1 { + CREATE VIRTUAL TABLE x1 USING fts5(a, b); + INSERT INTO x1 VALUES('one', 'two'); + INSERT INTO x1 VALUES('three', 'four'); + INSERT INTO x1 VALUES('five', 'six'); +} + +do_execsql_test 12.2 { + PRAGMA integrity_check +} {ok} + +db close +sqlite3 db test.db -readonly 1 + +explain_i { + PRAGMA integrity_check + } +do_execsql_test 12.3 { + PRAGMA integrity_check +} {ok} + + +#------------------------------------------------------------------- +reset_db +do_execsql_test 13.1 { + CREATE VIRTUAL TABLE t1 USING fts5(a, tokenize=ascii); + INSERT INTO t1 VALUES('a b c'), ('d e f'); + PRAGMA integrity_check; +} {ok} + +db close +sqlite3 db test.db +do_catchsql_test 13.2 { + PRAGMA integrity_check; +} {0 ok} + +do_execsql_test 13.3 { + PRAGMA writable_schema = 1; + UPDATE sqlite_schema SET sql = 'CREATE VIRTUAL TABLE t1 USING fts5(a, tokenize=blah)' + WHERE name = 't1'; +} + +db close +sqlite3 db test.db +breakpoint +do_catchsql_test 13.4 { + PRAGMA integrity_check; +} {1 {SQL logic error}} + + finish_test diff --git a/ext/fts5/test/fts5integrity2.test b/ext/fts5/test/fts5integrity2.test new file mode 100644 index 0000000000..968be3bddf --- /dev/null +++ b/ext/fts5/test/fts5integrity2.test @@ -0,0 +1,56 @@ +# 2024 September 3 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This file contains tests focused on the integrity-check procedure. +# + +source [file join [file dirname [info script]] fts5_common.tcl] +set testprefix fts5integrity2 + +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. +ifcapable !fts5 { + finish_test + return +} + +do_execsql_test 2.0 { + CREATE VIRTUAL TABLE t2 USING fts5(a, detail='none'); + BEGIN; + INSERT INTO t2(rowid, a) VALUES(-1, 'hello world'); + INSERT INTO t2(rowid, a) VALUES(9223372036854775807, 'hello world'); + COMMIT; +} + +do_execsql_test 2.1 { + SELECT rowid FROM t2('hello AND world'); +} {-1 9223372036854775807} + +#------------------------------------------------------------------------- +do_execsql_test 2.0 { + CREATE VIRTUAL TABLE t1 USING fts5(a, detail='none'); + CREATE TABLE r1(r); + + WITH c(x) AS (VALUES(1) UNION SELECT x<<1 FROM c) + INSERT INTO r1(r) SELECT -1-x FROM c; + + INSERT INTO t1(rowid, a) SELECT r, 'abc' FROM r1; +} + +do_execsql_test 2.1 { + PRAGMA integrity_check; +} {ok} + +do_execsql_test 2.2 { + SELECT rowid FROM t1('abc') ORDER BY +rowid; +} [db eval {SELECT r FROM r1 ORDER BY r}] + + +finish_test diff --git a/ext/fts5/test/fts5interrupt.test b/ext/fts5/test/fts5interrupt.test index ca682852c4..67ef5f7e97 100644 --- a/ext/fts5/test/fts5interrupt.test +++ b/ext/fts5/test/fts5interrupt.test @@ -33,6 +33,7 @@ proc progress_handler {args} { return 0 } +unset -nocomplain res foreach {tn sql} { 1 { INSERT INTO t1(rowid, a) VALUES(0, 'z z z z') } 2 { COMMIT } @@ -64,4 +65,3 @@ foreach {tn sql} { } finish_test - diff --git a/ext/fts5/test/fts5join.test b/ext/fts5/test/fts5join.test new file mode 100644 index 0000000000..e4d3b69b79 --- /dev/null +++ b/ext/fts5/test/fts5join.test @@ -0,0 +1,69 @@ +# 2014 June 17 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The +# focus of this script is testing the FTS5 module. +# + +source [file join [file dirname [info script]] fts5_common.tcl] +set testprefix fts5join + +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. +ifcapable !fts5 { + finish_test + return +} + +do_execsql_test 1.0 { + CREATE VIRTUAL TABLE vt USING fts5(x); + INSERT INTO vt VALUES('abc'); + INSERT INTO vt VALUES('xyz'); + + CREATE TABLE t1(a INTEGER PRIMARY KEY, b TIMESTAMP); + INSERT INTO t1 VALUES(1, 1), (2, 2); + CREATE INDEX i1 ON t1(b); +} + +# set sqlite_where_trace [expr 0xFFF] + +do_eqp_test 1.1 { + SELECT * FROM vt, t1 WHERE vt.rowid = t1.rowid ORDER BY t1.rowid; +} { + QUERY PLAN + |--SCAN t1 + `--SCAN vt VIRTUAL TABLE INDEX 0:= +} + +do_eqp_test 1.2 { + SELECT * FROM vt, t1 WHERE vt.rowid = t1.rowid AND b>? ORDER BY b LIMIT 10 +} { + QUERY PLAN + |--SEARCH t1 USING COVERING INDEX i1 (b>?) + `--SCAN vt VIRTUAL TABLE INDEX 0:= +} + +do_eqp_test 1.3 { + SELECT * FROM vt, t1 WHERE vt.rowid = t1.rowid AND b>? +} { + QUERY PLAN + |--SEARCH t1 USING COVERING INDEX i1 (b>?) + `--SCAN vt VIRTUAL TABLE INDEX 0:= +} + +do_eqp_test 1.4 { + SELECT * FROM vt, t1 WHERE vt.rowid = t1.rowid ORDER BY b +} { + QUERY PLAN + |--SCAN t1 USING COVERING INDEX i1 + `--SCAN vt VIRTUAL TABLE INDEX 0:= +} + + +finish_test diff --git a/ext/fts5/test/fts5lastrowid.test b/ext/fts5/test/fts5lastrowid.test index d152a8f09b..75866139d3 100644 --- a/ext/fts5/test/fts5lastrowid.test +++ b/ext/fts5/test/fts5lastrowid.test @@ -15,7 +15,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5lastrowid -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return diff --git a/ext/fts5/test/fts5leftjoin.test b/ext/fts5/test/fts5leftjoin.test index 4ef6a8961b..69a172bd45 100644 --- a/ext/fts5/test/fts5leftjoin.test +++ b/ext/fts5/test/fts5leftjoin.test @@ -40,4 +40,53 @@ do_execsql_test 1.2 { SELECT * FROM t1 LEFT JOIN vt ON (vt MATCH 'abc') } {1 abc 2 abc} + +do_execsql_test 1.3 { + DELETE FROM t1; + INSERT INTO t1 VALUES(14); +} + +do_execsql_test 1.4 { + SELECT * FROM vt LEFT JOIN t1 ON vt.rowid = 1; +} { + abc 14 + xyz {} +} + +#------------------------------------------------------------------------- +reset_db +do_execsql_test 2.0 { + CREATE VIRTUAL TABLE t0 USING fts5(a,b); + INSERT INTO t0(a,b)VALUES(1,0); + CREATE TABLE t1(x); +} + +do_execsql_test 2.1 { + SELECT * FROM t0 LEFT JOIN t1; +} {1 0 {}} + +breakpoint +do_catchsql_test 2.2 { + SELECT * FROM t0 LEFT JOIN t1 ON t0.b MATCH '1'; +} {1 {no query solution}} + +do_execsql_test 2.3 { + SELECT * FROM t0 LEFT JOIN t1 ON +b MATCH '1'; +} {1 0 {}} + +#------------------------------------------------------------------------- +reset_db + +do_execsql_test 3.0 { + CREATE VIRTUAL TABLE t0 USING fts5(c0, c1); + INSERT INTO t0(c0,c1) VALUES (1,0); +} + +do_catchsql_test 3.1 { + SELECT * FROM t0 + LEFT JOIN ( SELECT 0 AS col_0 ) + ON ((((t0.c1 MATCH '1')AND(CASE WHEN t0.c0 THEN CAST(t0.c1 AS INTEGER) ELSE 1 END)))); +} {1 {no query solution}} + + finish_test diff --git a/ext/fts5/test/fts5limits.test b/ext/fts5/test/fts5limits.test new file mode 100644 index 0000000000..90d175aa31 --- /dev/null +++ b/ext/fts5/test/fts5limits.test @@ -0,0 +1,47 @@ +# 2023 May 16 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# + +source [file join [file dirname [info script]] fts5_common.tcl] +set testprefix fts5limits +return_if_no_fts5 + + +do_execsql_test 1.0 { + CREATE VIRTUAL TABLE ft USING fts5(x); +} + +# Default limit for expression depth is 256 +# +foreach {tn nRepeat op bErr} { + 1 200 AND 0 + 2 200 NOT 0 + 3 200 OR 0 + + 4 260 AND 0 + 5 260 NOT 1 + 6 260 OR 0 +} { + set L [string repeat "abc " $nRepeat] + set Q [join $L " $op "] + + set res {0 {}} + if {$bErr} { + set res "1 {fts5 expression tree is too large (maximum depth 256)}" + } + + do_catchsql_test 1.$tn { + SELECT * FROM ft($Q) + } $res +} + +finish_test + diff --git a/ext/fts5/test/fts5locale.test b/ext/fts5/test/fts5locale.test new file mode 100644 index 0000000000..e5799fb7fd --- /dev/null +++ b/ext/fts5/test/fts5locale.test @@ -0,0 +1,748 @@ +# 2014 Dec 20 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# Tests focusing on the built-in fts5 tokenizers. +# + +source [file join [file dirname [info script]] fts5_common.tcl] +set testprefix fts5locale + +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. +ifcapable !fts5 { + finish_test + return +} + +proc transform_token {locale token} { + switch -- $locale { + reverse { + set ret "" + foreach c [split $token ""] { + set ret "$c$ret" + } + set token $ret + } + + default { + # no-op + } + } + + set token +} + +proc tcl_create {args} { return "tcl_tokenize" } +proc tcl_tokenize {tflags text} { + set iToken 1 + set bSkip 0 + if {[sqlite3_fts5_locale]=="second"} { set bSkip 1 } + foreach {w iStart iEnd} [fts5_tokenize_split $text] { + incr iToken + if {(($iToken) % ($bSkip + 1))} continue + + set w [transform_token [sqlite3_fts5_locale] $w] + sqlite3_fts5_token $w $iStart $iEnd + } +} + +#------------------------------------------------------------------------- +# Check that queries can have a locale attached to them. +# +reset_db +sqlite3_fts5_create_tokenizer -v2 db tcl tcl_create + +do_execsql_test 1.0 { + CREATE VIRTUAL TABLE t1 USING fts5(a, tokenize=tcl); + INSERT INTO t1 VALUES('abc'); + INSERT INTO t1 VALUES('cba'); +} {} + +do_execsql_test 1.1 { + SELECT rowid, a FROM t1( fts5_locale('en_US', 'abc') ); +} {1 abc} + +do_execsql_test 1.2 { + SELECT rowid, a FROM t1( fts5_locale('reverse', 'abc') ); +} {2 cba} + + +#------------------------------------------------------------------------- +# Test that the locale= option exists and seems to accept values. And +# that fts5_locale() values may only be inserted into an internal-content +# table if the locale=1 option was specified. +# +reset_db +sqlite3_fts5_create_tokenizer -v2 db tcl tcl_create + +do_execsql_test 2.1 { + CREATE VIRTUAL TABLE b1 USING fts5(x, y, locale=1, tokenize=tcl); + CREATE VIRTUAL TABLE b2 USING fts5(x, y, locale=0, tokenize=tcl); + + CREATE VIRTUAL TABLE ttt USING fts5vocab('b1', instance); +} + +do_catchsql_test 2.2.1 { + CREATE VIRTUAL TABLE b3 USING fts5(x, y, locale=2); +} {1 {malformed locale=... directive}} +do_catchsql_test 2.2.2 { + CREATE VIRTUAL TABLE b3 USING fts5(x, y, locale=111); +} {1 {malformed locale=... directive}} + +do_catchsql_test 2.3 { + INSERT INTO b1(b1, rank) VALUES('locale', 0); +} {1 {SQL logic error}} + +do_execsql_test 2.4.1 { + INSERT INTO b1 VALUES('abc', 'one two three'); +} + +do_execsql_test 2.4.2 { + INSERT INTO b1 VALUES('def', fts5_locale('reverse', 'four five six')); +} + +do_execsql_test 2.5 { + INSERT INTO b2 VALUES('abc', 'one two three'); +} + +do_catchsql_test 2.6 { + INSERT INTO b2 VALUES('def', fts5_locale('reverse', 'four five six')); +} {1 {fts5_locale() requires locale=1}} + +do_execsql_test 2.7 { SELECT rowid FROM b1('one') } {1} +do_execsql_test 2.8 { SELECT rowid FROM b1('four') } {} +do_execsql_test 2.9 { SELECT rowid FROM b1('ruof') } 2 +do_execsql_test 2.10 { SELECT rowid FROM b1(fts5_locale('reverse', 'five'))} 2 + +do_execsql_test 2.11 { + SELECT x, quote(y) FROM b1 +} { + abc {'one two three'} + def {'four five six'} +} + +do_execsql_test 2.12 { SELECT quote(y) FROM b1('ruof') } { + {'four five six'} +} + +do_execsql_test 2.13 { + INSERT INTO b1(b1) VALUES('integrity-check'); +} + +do_execsql_test 2.14 { + INSERT INTO b1(b1) VALUES('rebuild'); +} +do_execsql_test 2.15 { + INSERT INTO b1(b1) VALUES('integrity-check'); +} + +do_execsql_test 2.16 { + DELETE FROM b1 WHERE rowid=2 +} +do_execsql_test 2.17 { + INSERT INTO b1(b1) VALUES('integrity-check'); +} + +do_execsql_test 2.18 { + INSERT INTO b1(rowid, x, y) VALUES( + test_setsubtype(45, 76), 'abc def', 'def abc' + ); +} + +#------------------------------------------------------------------------- +# Test the 'delete' command with contentless tables. +# +reset_db +sqlite3_fts5_create_tokenizer -v2 db tcl tcl_create + +do_execsql_test 3.1 { + CREATE VIRTUAL TABLE c1 USING fts5(x, content=, tokenize=tcl, locale=1); + CREATE VIRTUAL TABLE c2 USING fts5vocab('c1', instance); + + INSERT INTO c1 VALUES('hello world'); + INSERT INTO c1 VALUES( fts5_locale('reverse', 'one two three') ); +} + +do_execsql_test 3.2 { + SELECT DISTINCT term FROM c2 ORDER BY 1 +} { + eerht eno hello owt world +} + +do_execsql_test 3.3 { + INSERT INTO c1(c1, rowid, x) + VALUES('delete', 2, fts5_locale('reverse', 'one two three') ); +} + +do_execsql_test 3.4 { + SELECT DISTINCT term FROM c2 ORDER BY 1 +} { + hello world +} + +#------------------------------------------------------------------------- +# Test that an UPDATE that updates a subset of the columns does not +# magically discard the locale from those columns not updated. +# +reset_db +sqlite3_fts5_create_tokenizer -v2 db tcl tcl_create + +do_execsql_test 4.1 { + CREATE VIRTUAL TABLE d1 USING fts5(x, y, locale=1, tokenize=tcl); + CREATE VIRTUAL TABLE d2 USING fts5vocab('d1', instance); + + INSERT INTO d1(rowid, x, y) VALUES(1, 'abc', 'def'); + INSERT INTO d1(rowid, x, y) VALUES(2, 'ghi', fts5_locale('reverse', 'hello')); +} + +do_execsql_test 4.2 { + SELECT DISTINCT term FROM d2 ORDER BY 1 +} { + abc def ghi olleh +} + +do_execsql_test 4.3 { + UPDATE d1 SET x='jkl' WHERE rowid=2; +} + +do_execsql_test 4.4 { + SELECT DISTINCT term FROM d2 ORDER BY 1 +} { + abc def jkl olleh +} + +do_execsql_test 4.5 { + SELECT rowid, * FROM d1 +} { + 1 abc def + 2 jkl hello +} + +do_execsql_test 4.6 { + UPDATE d1 SET rowid=4 WHERE rowid=2 +} + +do_execsql_test 4.7 { + SELECT rowid, * FROM d1 +} { + 1 abc def + 4 jkl hello +} + +fts5_aux_test_functions db + +do_execsql_test 4.8.1 { + SELECT fts5_test_columntext(d1) FROM d1('jkl') +} {{jkl hello}} +do_execsql_test 4.8.2 { + SELECT fts5_test_columntext(d1) FROM d1(fts5_locale('reverse', 'hello')) +} {{jkl hello}} + +do_execsql_test 4.9 { + SELECT fts5_test_columnlocale(d1) FROM d1(fts5_locale('reverse', 'hello')) +} {{{} reverse}} + +do_execsql_test 4.10 { + SELECT fts5_test_columnlocale(d1) FROM d1 +} { + {{} {}} + {{} reverse} +} + +#------------------------------------------------------------------------- +# Test that if an fts5_locale() value is written to an UNINDEXED +# column it is stored as text. This is so that blobs and other values +# can also be stored as is. +# +reset_db +sqlite3_fts5_create_tokenizer -v2 db tcl tcl_create + +do_execsql_test 5.1 { + CREATE VIRTUAL TABLE t1 USING fts5( + x, y UNINDEXED, locale=1, tokenize=tcl + ); + + INSERT INTO t1(rowid, x, y) VALUES(111, + fts5_locale('reverse', 'one two three'), + fts5_locale('reverse', 'four five six') + ); +} + +do_execsql_test 5.2 { + SELECT rowid, x, y FROM t1 +} { + 111 {one two three} {four five six} +} + +do_execsql_test 5.3 { + SELECT typeof(c0), typeof(c1), typeof(l0) FROM t1_content +} { + text text text +} + +#------------------------------------------------------------------------- + +foreach {tn opt} { + 1 {} + 2 {, columnsize=0} +} { + reset_db + sqlite3_fts5_create_tokenizer -v2 db tcl tcl_create + do_execsql_test 6.$tn.1 " + CREATE VIRTUAL TABLE y1 USING fts5(t, locale=1, tokenize=tcl $opt); + " + + do_execsql_test 6.$tn.2 { + INSERT INTO y1(rowid, t) VALUES + (1, fts5_locale('second', 'the city of London')), + (2, fts5_locale('second', 'shall have all the old')), + (3, fts5_locale('second', 'Liberties and Customs')), + (4, fts5_locale('second', 'which it hath been used to have')); + } + + fts5_aux_test_functions db + + do_execsql_test 6.$tn.3 { + SELECT fts5_test_columnsize(y1) FROM y1 + } { + 2 3 2 4 + } + + do_execsql_test 6.$tn.4 { + SELECT rowid, fts5_test_columnsize(y1) FROM y1('shall'); + } { + 2 3 + } + + do_execsql_test 6.$tn.5 { + SELECT rowid, fts5_test_columnsize(y1) FROM y1('shall'); + } { + 2 3 + } + + do_execsql_test 6.$tn.6 { + SELECT rowid, fts5_test_columnsize(y1) FROM y1('have'); + } { + 4 4 + } + + do_execsql_test 6.$tn.7 { + SELECT rowid, highlight(y1, 0, '[', ']') FROM y1('have'); + } { + 4 {which it hath been used to [have]} + } + + do_execsql_test 6.$tn.8 { + SELECT rowid, + highlight(y1, 0, '[', ']'), + snippet(y1, 0, '[', ']', '...', 10) + FROM y1('Liberties + Customs'); + } { + 3 {[Liberties and Customs]} + {[Liberties and Customs]} + } +} + +#------------------------------------------------------------------------- +reset_db +do_execsql_test 6.0 { + CREATE VIRTUAL TABLE x1 USING fts5(x); +} +do_catchsql_test 6.1 { + INSERT INTO x1(rowid, x) VALUES(123, fts5_locale('en_AU', 'hello world')); +} {1 {fts5_locale() requires locale=1}} + +do_execsql_test 6.2 { + SELECT typeof( fts5_locale(NULL, 'xyz') ), typeof( fts5_locale('', 'abc') ); +} {text text} + +#-------------------------------------------------------------------------- +# Test that fts5_locale() works with external-content tables. +# +reset_db +sqlite3_fts5_create_tokenizer -v2 db tcl tcl_create + +do_execsql_test 7.1 { + CREATE TABLE t1(ii INTEGER PRIMARY KEY, bb BLOB, tt TEXT, locale TEXT); + CREATE VIEW v1 AS + SELECT ii AS rowid, bb, fts5_locale(locale, tt) AS tt FROM t1; + + CREATE VIRTUAL TABLE ft USING fts5( + bb, tt, locale=1, tokenize=tcl, content=v1 + ); + + INSERT INTO t1 VALUES(1, NULL, 'one two three', NULL); + INSERT INTO t1 VALUES(2, '7800616263', 'four five six', 'reverse'); + INSERT INTO t1 VALUES(3, '000000007800616263', 'seven eight nine', 'second'); +} + +do_execsql_test 7.2 { + INSERT INTO ft(ft) VALUES('rebuild'); + INSERT INTO ft(ft) VALUES('integrity-check'); +} + +do_execsql_test 7.3 { + SELECT rowid, quote(bb), quote(tt) FROM ft +} { + 1 NULL {'one two three'} + 2 '7800616263' {'four five six'} + 3 '000000007800616263' {'seven eight nine'} +} + +do_execsql_test 7.4 { SELECT rowid FROM ft('six'); } +do_execsql_test 7.5 { SELECT rowid FROM ft(fts5_locale('reverse','six')); } 2 + +fts5_aux_test_functions db + +do_execsql_test 7.6 { + SELECT fts5_test_columnlocale(ft) FROM ft; +} { + {{} {}} {{} reverse} {{} second} +} + +#------------------------------------------------------------------------- +# Test that the porter tokenizer works with locales. +# +reset_db +sqlite3_fts5_create_tokenizer -v2 db tcl tcl_create + +do_execsql_test 8.1 { + CREATE VIRTUAL TABLE ft USING fts5(tt, locale=1, tokenize="porter tcl"); + CREATE VIRTUAL TABLE vocab USING fts5vocab('ft', instance); + + INSERT INTO ft(rowid, tt) VALUES + (111, fts5_locale('second', 'the porter tokenizer is a wrapper tokenizer')), + (222, fts5_locale('reverse', 'This value may also be set')); +} + +do_execsql_test 8.1 { + SELECT DISTINCT term FROM vocab ORDER BY 1 +} { + a eb eulav osla sihT te the token yam +} + +#------------------------------------------------------------------------- +# Test that position-lists (used by xInst, xPhraseFirst etc.) work with +# locales and modes other than detail=full. +# +foreach {tn detail} { + 1 detail=full + 2 detail=none + 3 detail=column +} { + reset_db + sqlite3_fts5_create_tokenizer -v2 db tcl tcl_create + do_execsql_test 9.$tn.0 " + CREATE VIRTUAL TABLE ft USING fts5(tt, locale=1, tokenize=tcl, $detail); + " + do_execsql_test 9.$tn.1 { + CREATE VIRTUAL TABLE vocab USING fts5vocab('ft', instance); + INSERT INTO ft(rowid, tt) VALUES + (-1, fts5_locale('second', 'it is an ancient mariner')); + } + + do_execsql_test 9.$tn.2 { + SELECT DISTINCT term FROM vocab + } {an it mariner} + + do_execsql_test 9.$tn.3 { + SELECT highlight(ft, 0, '[', ']') FROM ft('mariner') + } {{it is an ancient [mariner]}} +} + +#------------------------------------------------------------------------- +# Check some corrupt fts5_locale() blob formats are detected. +# +foreach_detail_mode $::testprefix { + + reset_db + sqlite3_fts5_create_tokenizer -v2 db tcl tcl_create + fts5_aux_test_functions db + do_execsql_test 10.1 { + CREATE TABLE x1(ii INTEGER PRIMARY KEY, x); + CREATE VIRTUAL TABLE ft USING fts5(x, + content=x1, content_rowid=ii, locale=1, detail=%DETAIL%, columnsize=0 + ); + + CREATE VIRTUAL TABLE ft2 USING fts5( + x, locale=1, detail=%DETAIL%, columnsize=0 + ); + } + + foreach {tn v} { + 1 X'001152' + 2 X'0011223344' + 3 X'00E0B2EB68656c6c6f' + 4 X'00E0B2EB0068656c6c6f' + } { + do_execsql_test 10.2.$tn.0 { INSERT INTO ft(ft) VALUES('delete-all') } + do_execsql_test 10.2.$tn.1 { DELETE FROM x1; } + do_execsql_test 10.2.$tn.2 " INSERT INTO x1 VALUES(NULL, $v) " + + do_catchsql_test 10.2.$tn.3 { + INSERT INTO ft(ft) VALUES('rebuild'); + } {0 {}} + + do_catchsql_test 10.2.$tn.4 " + SELECT * FROM ft( test_setsubtype($v, 76) ); + " {1 {fts5: syntax error near ""}} + + do_execsql_test 10.2.$tn.5 { + INSERT INTO ft(rowid, x) VALUES(1, 'hello world'); + } + + if {"%DETAIL%"=="full"} { + do_execsql_test 10.2.$tn.6 { + SELECT fts5_test_poslist(ft) FROM ft('world'); + } {0.0.1} + + do_execsql_test 10.2.$tn.7.1 { + SELECT fts5_test_columnsize(ft) FROM ft('world'); + } {1} + + do_execsql_test 10.2.$tn.7.2 { + SELECT fts5_test_columnlocale(ft) FROM ft('world'); + } {{{}}} + } + + do_catchsql_test 10.2.$tn.8 { + SELECT count(*) FROM ft('hello') + } {0 1} + + do_catchsql_test 10.2.$tn.9 { + PRAGMA integrity_check; + } {0 ok} + + do_execsql_test 10.2.$tn.10 { + DELETE FROM x1; + INSERT INTO x1(ii, x) VALUES(1, 'hello world'); + } + + do_catchsql_test 10.2.$tn.11 " + INSERT INTO ft(ft, rowid, x) VALUES('delete', 1, test_setsubtype($v,76) ) + " {0 {}} + + do_catchsql_test 10.2.$tn.12 " + INSERT INTO ft(rowid, x) VALUES(2, test_setsubtype($v,76) ) + " {0 {}} + + do_execsql_test 10.2.$tn.13 { + INSERT INTO ft2(rowid, x) VALUES(1, 'hello world'); + } + do_execsql_test 10.2.$tn.14 "UPDATE ft2_content SET c0=$v" + + do_catchsql_test 10.2.$tn.15 { + PRAGMA integrity_check; + } {0 {{malformed inverted index for FTS5 table main.ft2}}} + + do_execsql_test 10.2.$tn.16 { + DELETE FROM ft2_content; + INSERT INTO ft2(ft2) VALUES('rebuild'); + } + } + +} + +#------------------------------------------------------------------------- +# +reset_db +sqlite3_fts5_create_tokenizer -v2 db tcl tcl_create +fts5_aux_test_functions db +do_execsql_test 11.0 { + CREATE VIRTUAL TABLE x1 USING fts5(abc, locale=1); + INSERT INTO x1(rowid, abc) VALUES(123, fts5_locale('en_US', 'one two three')); +} + +do_catchsql_test 11.1 { + SELECT fts5_columnlocale(x1, -1) FROM x1('two'); +} {1 SQLITE_RANGE} +do_catchsql_test 11.2 { + SELECT fts5_columnlocale(x1, 1) FROM x1('two'); +} {1 SQLITE_RANGE} + +#------------------------------------------------------------------------- +# +reset_db +do_test 12.0 { + list [catch { + sqlite3_fts5_create_tokenizer -v2 -version 3 db tcl tcl_create + } msg] $msg +} {1 {error in fts5_api.xCreateTokenizer_v2()}} + +#------------------------------------------------------------------------- +# Tests for auxiliary function fts5_get_locale(). +# +reset_db + +# Check that if the table does not support locale=1, fts5_get_locale() +# always returns NULL. +do_execsql_test 13.1.0 { + CREATE VIRTUAL TABLE nolocale USING fts5(a, b); + INSERT INTO nolocale VALUES('one two three', 'four five six'); + INSERT INTO nolocale VALUES('three two one', 'seven eight nine'); +} +do_execsql_test 13.1.1 { + SELECT fts5_get_locale(nolocale, 0) IS NULL FROM nolocale; +} {1 1} +do_execsql_test 13.1.2 { + SELECT fts5_get_locale(nolocale, 1) IS NULL FROM nolocale('one + two'); +} {1} +do_execsql_test 13.1.3 { + SELECT fts5_get_locale(nolocale, 0) IS NULL FROM nolocale('one AND two'); +} {1 1} +do_execsql_test 13.1.4 { + SELECT + fts5_get_locale(nolocale, 1) IS NULL + FROM nolocale('three AND two') ORDER BY rank +} {1 1} +do_catchsql_test 13.1.5 { + SELECT fts5_get_locale(nolocale, 2) IS NULL FROM nolocale('three AND two'); +} {1 {column index out of range}} +do_catchsql_test 13.1.6 { + SELECT fts5_get_locale(nolocale, -1) IS NULL FROM nolocale('three AND two'); +} {1 {column index out of range}} +do_catchsql_test 13.1.7 { + SELECT fts5_get_locale(nolocale) IS NULL FROM nolocale('three AND two'); +} {1 {wrong number of arguments to function fts5_get_locale()}} +do_catchsql_test 13.1.8 { + SELECT fts5_get_locale(nolocale, 0, 0) IS NULL FROM nolocale('three AND two'); +} {1 {wrong number of arguments to function fts5_get_locale()}} +do_catchsql_test 13.1.9 { + SELECT fts5_get_locale(nolocale, 'text') FROM nolocale('three AND two'); +} {1 {non-integer argument passed to function fts5_get_locale()}} + + +# Check that if the table does support locale=1, fts5_get_locale() +# returns the locale of the identified row/column. +do_execsql_test 13.2.0 { + CREATE VIRTUAL TABLE ft USING fts5(a, b, locale=1); + INSERT INTO ft VALUES( + fts5_locale('th_TH', 'one two three'), 'four five six seven' + ); + INSERT INTO ft VALUES( + 'three two one', fts5_locale('en_AU', 'seven eight nine') + ); +} + +do_execsql_test 13.2.1 { + SELECT quote(fts5_get_locale(ft, 0)), quote(fts5_get_locale(ft, 1)) FROM ft +} { 'th_TH' NULL NULL 'en_AU' } +do_execsql_test 13.2.2 { + SELECT + quote(fts5_get_locale(ft, 0)), quote(fts5_get_locale(ft, 1)) + FROM ft('one AND three') +} { 'th_TH' NULL NULL 'en_AU' } +do_execsql_test 13.2.3 { + SELECT + quote(fts5_get_locale(ft, 0)), quote(fts5_get_locale(ft, 1)) + FROM ft('one AND three') ORDER BY rank +} { NULL 'en_AU' 'th_TH' NULL } +do_execsql_test 13.2.4 { + SELECT + quote(fts5_get_locale(ft, 0)), quote(fts5_get_locale(ft, 1)) + FROM ft('one AND three') ORDER BY rowid +} { 'th_TH' NULL NULL 'en_AU' } + +do_execsql_test 13.2.5 { + SELECT + quote(fts5_get_locale(ft, '0')), quote(fts5_get_locale(ft, 1)) + FROM ft('one AND three') ORDER BY rowid +} { 'th_TH' NULL NULL 'en_AU' } + +do_catchsql_test 13.2.6 { + SELECT + quote(fts5_get_locale(ft, '0.0')), quote(fts5_get_locale(ft, 1)) + FROM ft('one AND three') ORDER BY rowid +} {1 {non-integer argument passed to function fts5_get_locale()}} +do_catchsql_test 13.2.7 { + SELECT + quote(fts5_get_locale(ft, 0.0)), quote(fts5_get_locale(ft, 1)) + FROM ft('one AND three') ORDER BY rowid +} {1 {non-integer argument passed to function fts5_get_locale()}} + +#------------------------------------------------------------------------- +# Check that UPDATE statements that may affect more than one row work. +# +reset_db +do_execsql_test 14.1 { + CREATE VIRTUAL TABLE ft USING fts5(a, b, locale=1); +} + +do_execsql_test 14.2 { + INSERT INTO ft VALUES('hello', 'world'); +} + +do_execsql_test 14.3 { + UPDATE ft SET b = fts5_locale('en_AU', 'world'); +} + +do_execsql_test 14.4 { + INSERT INTO ft VALUES(X'abcd', X'1234'); +} {} + +do_execsql_test 14.5 { + SELECT quote(a), quote(b) FROM ft +} {'hello' 'world' X'ABCD' X'1234'} + +do_execsql_test 14.6 { + DELETE FROM ft; + INSERT INTO ft VALUES(NULL, 'null'); + INSERT INTO ft VALUES(123, 'int'); + INSERT INTO ft VALUES(345.0, 'real'); + INSERT INTO ft VALUES('abc', 'text'); + INSERT INTO ft VALUES(fts5_locale('abc', 'def'), 'text'); + + SELECT a, typeof(a), b FROM ft +} { + {} null null + 123 integer int + 345.0 real real + abc text text + def text text +} + +do_execsql_test 14.7 { + SELECT quote(c0), typeof(c0) FROM ft_content +} { + NULL null + 123 integer + 345.0 real + 'abc' text + 'def' text +} + +#------------------------------------------------------------------------- +# Check that inserting UNINDEXED columns between indexed columns of a +# locale=1 table does not cause a problem. +# +reset_db +sqlite3_fts5_create_tokenizer -v2 db tcl tcl_create +fts5_aux_test_functions db + +do_execsql_test 15.1 { + CREATE VIRTUAL TABLE ft USING fts5(a, b UNINDEXED, c, locale=1, tokenize=tcl); +} + +do_execsql_test 15.2 { + INSERT INTO ft VALUES('one', 'two', 'three'); + INSERT INTO ft VALUES('one', 'two', fts5_locale('loc', 'three')); +} + +do_execsql_test 15.3 { + SELECT c2, l2 FROM ft_content +} {three {} three loc} + +do_execsql_test 15.4 { + SELECT c, fts5_columnlocale(ft, 2) FROM ft +} {three {} three loc} + + +finish_test + diff --git a/ext/fts5/test/fts5matchinfo.test b/ext/fts5/test/fts5matchinfo.test index 570693373f..a3bce869fb 100644 --- a/ext/fts5/test/fts5matchinfo.test +++ b/ext/fts5/test/fts5matchinfo.test @@ -517,7 +517,31 @@ fts5_aux_test_functions db do_execsql_test 15.3 { SELECT fts5_test_all(t1) FROM t1 LIMIT 1; } { - {columnsize {0 0} columntext {c d} columntotalsize {2 2} poslist {} tokenize {c d} rowcount 2} + {columnsize {1 1} columntext {c d} columntotalsize {2 2} poslist {} tokenize {c d} rowcount 2} } +#------------------------------------------------------------------------- +# +reset_db +do_execsql_test 16.0 { + CREATE TABLE t1(x); + BEGIN EXCLUSIVE; +} + +do_test 16.1 { + set rc [catch { + sqlite3 db2 test.db + db2 eval {SELECT * FROM t1} + } errmsg] + lappend rc $errmsg +} {1 {database is locked}} + +do_execsql_test 16.2 { + ROLLBACK; +} + +do_test 16.3 { + catchsql { SELECT * FROM t1 } db2 +} {0 {}} + finish_test diff --git a/ext/fts5/test/fts5merge.test b/ext/fts5/test/fts5merge.test index 3b86167b0d..c57c21ded3 100644 --- a/ext/fts5/test/fts5merge.test +++ b/ext/fts5/test/fts5merge.test @@ -15,7 +15,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5merge -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return diff --git a/ext/fts5/test/fts5misc.test b/ext/fts5/test/fts5misc.test index e354d20e2c..817be9560c 100644 --- a/ext/fts5/test/fts5misc.test +++ b/ext/fts5/test/fts5misc.test @@ -35,21 +35,21 @@ do_catchsql_test 1.1.2 { do_catchsql_test 1.2.1 { SELECT highlight(t1, 4, '', '') FROM t1('*id'); -} {0 {{}}} +} {1 {no such cursor: 4}} do_catchsql_test 1.2.2 { SELECT a FROM t1 WHERE rank = (SELECT highlight(t1, 4, '', '') FROM t1('*id')); -} {0 {}} +} {1 {no such cursor: 6}} do_catchsql_test 1.3.1 { SELECT highlight(t1, 4, '', '') FROM t1('*reads'); -} {1 {no such cursor: 1}} +} {1 {no such cursor: 0}} do_catchsql_test 1.3.2 { SELECT a FROM t1 WHERE rank = (SELECT highlight(t1, 4, '', '') FROM t1('*reads')); -} {1 {no such cursor: 1}} +} {1 {no such cursor: 0}} db close sqlite3 db test.db @@ -57,7 +57,12 @@ sqlite3 db test.db do_catchsql_test 1.3.3 { SELECT a FROM t1 WHERE rank = (SELECT highlight(t1, 4, '', '') FROM t1('*reads')); -} {1 {no such cursor: 1}} +} {1 {no such cursor: 0}} + +fts5_aux_test_functions db +do_catchsql_test 1.3.4 { + SELECT fts5_columntext(t1) FROM t1('*reads'); +} {1 {no such cursor: 0}} #------------------------------------------------------------------------- reset_db @@ -91,7 +96,6 @@ do_execsql_test 2.2.1 { INSERT INTO vt0(c0) VALUES ('xyz'); } -breakpoint do_execsql_test 2.2.2 { ALTER TABLE t0 RENAME TO t1; } @@ -329,7 +333,33 @@ do_execsql_test 12.3 { reset_db sqlite3_db_config db DEFENSIVE 1 -do_execsql_test 13.0 { +do_execsql_test 13.1.0 { + CREATE TABLE a (id INTEGER PRIMARY KEY, name TEXT); + CREATE VIRTUAL TABLE b USING fts5(name); + CREATE TRIGGER a_trigger AFTER INSERT ON a BEGIN + INSERT INTO b (name) VALUES ('foo'); + END; +} + +do_test 13.1.1 { + set ::STMT [ + sqlite3_prepare db "INSERT INTO a VALUES (1, 'foo') RETURNING id;" -1 dummy + ] + sqlite3_step $::STMT +} {SQLITE_ROW} + +do_test 13.1.2 { + sqlite3_finalize $::STMT +} {SQLITE_OK} + +do_test 13.1.3 { + sqlite3_errmsg db +} {not an error} + +reset_db +sqlite3_db_config db DEFENSIVE 1 +do_execsql_test 13.2.0 { + BEGIN; CREATE TABLE a (id INTEGER PRIMARY KEY, name TEXT); CREATE VIRTUAL TABLE b USING fts5(name); CREATE TRIGGER a_trigger AFTER INSERT ON a BEGIN @@ -337,20 +367,335 @@ do_execsql_test 13.0 { END; } -do_test 13.1 { +do_test 13.2.1 { set ::STMT [ sqlite3_prepare db "INSERT INTO a VALUES (1, 'foo') RETURNING id;" -1 dummy ] sqlite3_step $::STMT } {SQLITE_ROW} -do_test 13.2 { +do_test 13.2.2 { sqlite3_finalize $::STMT } {SQLITE_OK} -do_test 13.3 { +do_test 13.2.3 { sqlite3_errmsg db } {not an error} +#------------------------------------------------------------------------- +reset_db +db close +sqlite3 db test.db -uri 1 + +do_execsql_test 14.0 { + PRAGMA locking_mode=EXCLUSIVE; + BEGIN; + ATTACH 'file:/one?vfs=memdb' AS aux1; + ATTACH 'file:/one?vfs=memdb' AS aux2; + CREATE VIRTUAL TABLE t1 USING fts5(x); +} {exclusive} +do_catchsql_test 14.1 { + ANALYZE; +} {1 {database is locked}} +do_catchsql_test 14.2 { + COMMIT; +} {1 {database is locked}} +do_catchsql_test 14.3 { + COMMIT; +} {1 {database is locked}} +do_catchsql_test 14.4 { + ROLLBACK; +} {0 {}} + +#------------------------------------------------------------------------- +reset_db +sqlite3 db2 test.db + +do_execsql_test 15.0 { + CREATE TABLE t1(a, b); + BEGIN; + SELECT * FROM t1; +} + +do_execsql_test -db db2 15.1 { + BEGIN; + CREATE VIRTUAL TABLE x1 USING fts5(y); +} +do_test 15.2 { + list [catch { db2 eval COMMIT } msg] $msg +} {1 {database is locked}} +do_execsql_test -db db2 15.3 { + SAVEPOINT one; +} {} +do_execsql_test 15.4 END +do_test 15.5 { + list [catch { db2 eval COMMIT } msg] $msg +} {0 {}} + +db2 close + +#------------------------------------------------------------------------- +reset_db +forcedelete test.db2 +sqlite3 db2 test.db +do_execsql_test 16.0 { + + ATTACH 'test.db2' AS aux; + CREATE TABLE aux.t2(x,y); + INSERT INTO t2 VALUES(1, 2); + CREATE VIRTUAL TABLE x1 USING fts5(a); + BEGIN; + INSERT INTO x1 VALUES('abc'); + INSERT INTO t2 VALUES(3, 4); +} + +do_execsql_test -db db2 16.1 { + ATTACH 'test.db2' AS aux; + BEGIN; + SELECT * FROM t2 +} {1 2} + +do_catchsql_test 16.2 { + COMMIT; +} {1 {database is locked}} + +do_execsql_test 16.3 { + INSERT INTO x1 VALUES('def'); +} + +do_execsql_test -db db2 16.4 { + END +} + +do_execsql_test 16.5 { + COMMIT +} + +do_execsql_test -db db2 16.6 { + SELECT * FROM x1 +} {abc def} + +db2 close + +#------------------------------------------------------------------------- +reset_db +do_execsql_test 17.1 { + CREATE VIRTUAL TABLE ft USING fts5(x, tokenize="unicode61 separators 'X'"); +} +do_execsql_test 17.2 { + SELECT 0 FROM ft WHERE ft MATCH 'X' AND ft MATCH 'X' +} +do_execsql_test 17.3 { + SELECT 0 FROM ft('X') +} + +do_execsql_test 17.4 { + CREATE VIRTUAL TABLE t0 USING fts5(c0, t="trigram"); + INSERT INTO t0 VALUES('assertionfaultproblem'); +} +do_execsql_test 17.5 { + SELECT 0 FROM t0(0) WHERE c0 GLOB 0; +} {} + +do_execsql_test 17.5 { + SELECT c0 FROM t0 WHERE c0 GLOB '*f*'; +} {assertionfaultproblem} +do_execsql_test 17.5 { + SELECT c0 FROM t0 WHERE c0 GLOB '*faul*'; +} {assertionfaultproblem} + +#------------------------------------------------------------------------- +reset_db +do_execsql_test 18.0 { + BEGIN; + CREATE VIRTUAL TABLE t1 USING fts5(text); + ALTER TABLE t1 RENAME TO t2; +} + +do_execsql_test 18.1 { + DROP TABLE t2; +} + +do_execsql_test 18.2 { + COMMIT; +} + +#------------------------------------------------------------------------- +reset_db +do_execsql_test 19.0 { + CREATE VIRTUAL TABLE t1 USING fts5(text); + CREATE TABLE t2(text); + BEGIN; + INSERT INTO t1 VALUES('one'); + INSERT INTO t1 VALUES('two'); + INSERT INTO t1 VALUES('three'); + INSERT INTO t1 VALUES('one'); + INSERT INTO t1 VALUES('two'); + INSERT INTO t1 VALUES('three'); + SAVEPOINT one; + INSERT INTO t2 VALUES('one'); + INSERT INTO t2 VALUES('two'); + INSERT INTO t2 VALUES('three'); + ROLLBACK TO one; + COMMIT; +} + +#------------------------------------------------------------------------- +reset_db +do_execsql_test 20.0 { + CREATE VIRTUAL TABLE x1 USING fts5(a); + INSERT INTO x1(rowid, a) VALUES + (1, 'a b c d'), + (2, 'x b c d'), + (3, 'x y z d'), + (4, 'a y c x'); +} + +do_execsql_test 20.1 { + SELECT rowid FROM x1 WHERE x1 MATCH 'a' AND x1 MATCH 'b'; +} {1} + +do_execsql_test 20.2 { + SELECT rowid FROM x1 WHERE x1 MATCH 'a' AND x1 MATCH 'y'; +} {4} + +do_execsql_test 20.3 { + SELECT rowid FROM x1 WHERE x1 MATCH 'a' OR x1 MATCH 'y'; +} {1 4 3} + +do_execsql_test 20.4 { + SELECT rowid FROM x1 WHERE x1 MATCH 'a' OR (x1 MATCH 'y' AND x1 MATCH 'd'); +} {1 4 3} + +do_execsql_test 20.5 { + SELECT rowid FROM x1 WHERE x1 MATCH 'z' OR (x1 MATCH 'a' AND x1 MATCH 'd'); +} {3 1} + +#------------------------------------------------------------------------- +reset_db +do_execsql_test 21.0 { + CREATE TABLE t1(ii INTEGER, x TEXT, y TEXT); + CREATE VIRTUAL TABLE xyz USING fts5(content_rowid=ii, content=t1, x, y); + INSERT INTO t1 VALUES(1, 'one', 'i'); + INSERT INTO t1 VALUES(2, 'two', 'ii'); + INSERT INTO t1 VALUES(3, 'tree', 'iii'); + INSERT INTO xyz(xyz) VALUES('rebuild'); +} + +do_execsql_test 21.1 { + UPDATE xyz SET y='TWO' WHERE rowid=2; + UPDATE t1 SET y='TWO' WHERE ii=2; +} + +do_execsql_test 21.2 { + PRAGMA integrity_check +} {ok} + +sqlite3_db_config db DEFENSIVE 1 +do_execsql_test 21.3 { + CREATE TABLE xyz_notashadow(x, y); + DROP TABLE xyz_notashadow; +} +sqlite3_db_config db DEFENSIVE 0 + +#------------------------------------------------------------------------- +reset_db +do_execsql_test 22.0 { + SELECT fts5(NULL); +} {{}} +do_execsql_test 22.1 { + SELECT count(*) FROM ( + SELECT fts5_source_id() + ) +} {1} +execsql_pp { + SELECT fts5_source_id() +} + +#------------------------------------------------------------------------- +reset_db +do_execsql_test 23.0 { + CREATE VIRTUAL TABLE x1 USING fts5(x); + INSERT INTO x1 VALUES('one + two + three'); + INSERT INTO x1 VALUES('one + xyz + three'); + INSERT INTO x1 VALUES('xyz + two + xyz'); +} +do_execsql_test 23.1 { + SELECT rowid FROM x1('one + two + three'); +} {1} + +do_execsql_test 23.2 { + SELECT rowid FROM x1('^".." AND one'); +} {} + +do_execsql_test 23.3 { + SELECT rowid FROM x1('abc NEAR ".." NEAR def'); +} {} + +#------------------------------------------------------------------------- +reset_db +do_execsql_test 24.0 { + CREATE VIRTUAL TABLE t1 USING fts5(a, detail='none'); + INSERT INTO t1(a) VALUES('a'); +} + +do_execsql_test 24.2 { + SELECT rank FROM ( SELECT rank FROM t1('a NOT "" NOT def') ) ORDER BY 1; +} {-1e-06} + +do_execsql_test 24.3 { + SELECT rank FROM ( SELECT rank FROM t1('a NOT � NOT def') ) ORDER BY 1; +} {-1e-06} + +do_execsql_test 24.4 { + SELECT rank FROM ( SELECT rank FROM t1('a NOT "" NOT def') ); +} {-1e-06} + +#------------------------------------------------------------------------- +reset_db +fts5_aux_test_functions db + +do_execsql_test 25.0 { + CREATE VIRTUAL TABLE t1 USING fts5(a, detail='none', content=''); + INSERT INTO t1(a) VALUES('a b c'); +} + +do_execsql_test 25.0 { + SELECT fts5_test_poslist(t1) FROM t1('b') ORDER BY rank; +} {{}} + +#------------------------------------------------------------------------- +reset_db + +do_execsql_test 26.0 { + PRAGMA foreign_keys = ON; + CREATE TABLE t1(x INTEGER PRIMARY KEY); + CREATE TABLE t2(y INTEGER PRIMARY KEY, + z INTEGER REFERENCES t1(x) DEFERRABLE INITIALLY DEFERRED + ); + CREATE VIRTUAL TABLE t3 USING fts5(a, b, content='', tokendata=1); +} + +do_execsql_test 26.1 { + BEGIN; + INSERT INTO t2 VALUES(1,111); + INSERT INTO t3 VALUES(3,3); + PRAGMA defer_foreign_keys=ON; + DELETE FROM t2 WHERE y+1; + COMMIT; +} + +#------------------------------------------------------------------------- +reset_db + +do_execsql_test 27.0 { + CREATE VIRTUAL TABLE ft1 USING fts5(a, b); + INSERT INTO ft1(rowid, a, b) VALUES(3, '3', '3'); +} + +do_execsql_test 27.1 { + SELECT * FROM ft1 WHERE rowid=3 AND b MATCH 'hello'; +} + finish_test diff --git a/ext/fts5/test/fts5near.test b/ext/fts5/test/fts5near.test index bbe144a898..318a169488 100644 --- a/ext/fts5/test/fts5near.test +++ b/ext/fts5/test/fts5near.test @@ -15,7 +15,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5near -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return diff --git a/ext/fts5/test/fts5onepass.test b/ext/fts5/test/fts5onepass.test index 01021ed348..b334096754 100644 --- a/ext/fts5/test/fts5onepass.test +++ b/ext/fts5/test/fts5onepass.test @@ -38,15 +38,15 @@ foreach {tn sql uses} { 1.2 { DELETE FROM ft WHERE rowid=? } 0 1.3 { DELETE FROM ft WHERE rowid=? } 0 1.4 { DELETE FROM ft WHERE ft MATCH '1' } 1 - 1.5 { DELETE FROM ft WHERE ft MATCH '1' AND rowid=? } 1 - 1.6 { DELETE FROM ft WHERE ft MATCH '1' AND rowid=? } 1 + 1.5 { DELETE FROM ft WHERE ft MATCH '1' AND rowid=? } 0 + 1.6 { DELETE FROM ft WHERE ft MATCH '1' AND rowid=? } 0 2.1 { UPDATE ft SET content='a b c' } 1 2.2 { UPDATE ft SET content='a b c' WHERE rowid=? } 0 2.3 { UPDATE ft SET content='a b c' WHERE rowid=? } 0 2.4 { UPDATE ft SET content='a b c' WHERE ft MATCH '1' } 1 - 2.5 { UPDATE ft SET content='a b c' WHERE ft MATCH '1' AND rowid=? } 1 - 2.6 { UPDATE ft SET content='a b c' WHERE ft MATCH '1' AND rowid=? } 1 + 2.5 { UPDATE ft SET content='a b c' WHERE ft MATCH '1' AND rowid=? } 0 + 2.6 { UPDATE ft SET content='a b c' WHERE ft MATCH '1' AND rowid=? } 0 } { do_test 1.$tn { sql_uses_stmt db $sql } $uses } diff --git a/ext/fts5/test/fts5optimize.test b/ext/fts5/test/fts5optimize.test index e0f0fd7242..610bf439c9 100644 --- a/ext/fts5/test/fts5optimize.test +++ b/ext/fts5/test/fts5optimize.test @@ -14,7 +14,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5optimize -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return diff --git a/ext/fts5/test/fts5optimize2.test b/ext/fts5/test/fts5optimize2.test new file mode 100644 index 0000000000..57f4e96b99 --- /dev/null +++ b/ext/fts5/test/fts5optimize2.test @@ -0,0 +1,45 @@ +# 2014 Dec 20 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# TESTRUNNER: superslow +# + +source [file join [file dirname [info script]] fts5_common.tcl] +set testprefix fts5optimize2 + +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. +ifcapable !fts5 { + finish_test + return +} + +set nLoop 2500 + +do_execsql_test 1.0 { + CREATE VIRTUAL TABLE t1 USING fts5(x); + INSERT INTO t1(t1, rank) VALUES('pgsz', 32); +} + +do_test 1.1 { + for {set ii 0} {$ii < $nLoop} {incr ii} { + execsql { + INSERT INTO t1 VALUES('abc def ghi'); + INSERT INTO t1 VALUES('jkl mno pqr'); + INSERT INTO t1(t1) VALUES('optimize'); + } + } +} {} + +do_execsql_test 1.2 { + SELECT count(*) FROM t1('mno') +} $nLoop + +finish_test diff --git a/ext/fts5/test/fts5optimize3.test b/ext/fts5/test/fts5optimize3.test new file mode 100644 index 0000000000..79e62f9f22 --- /dev/null +++ b/ext/fts5/test/fts5optimize3.test @@ -0,0 +1,45 @@ +# 2023 Aug 27 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# TESTRUNNER: superslow +# + +source [file join [file dirname [info script]] fts5_common.tcl] +set testprefix fts5optimize2 + +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. +ifcapable !fts5 { + finish_test + return +} + +set nLoop 2500 + +do_execsql_test 1.0 { + CREATE VIRTUAL TABLE t2 USING fts5(x); + INSERT INTO t2(t2, rank) VALUES('pgsz', 32); +} + +do_test 1.1 { + for {set ii 0} {$ii < $nLoop} {incr ii} { + execsql { + INSERT INTO t2 VALUES('abc def ghi'); + INSERT INTO t2 VALUES('jkl mno pqr'); + INSERT INTO t2(t2, rank) VALUES('merge', -1); + } + } +} {} + +do_execsql_test 1.2 { + SELECT count(*) FROM t2('mno') +} $nLoop + +finish_test diff --git a/ext/fts5/test/fts5origintext.test b/ext/fts5/test/fts5origintext.test new file mode 100644 index 0000000000..be77cbfca5 --- /dev/null +++ b/ext/fts5/test/fts5origintext.test @@ -0,0 +1,357 @@ +# 2014 Jan 08 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# Tests focused on phrase queries. +# + +source [file join [file dirname [info script]] fts5_common.tcl] +set testprefix fts5origintext + +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. +ifcapable !fts5 { + finish_test + return +} + +foreach_detail_mode $testprefix { +foreach {tn insttoken} { + 1 0 + 2 1 +} { +reset_db + +sqlite3_fts5_register_origintext db +do_execsql_test $tn.1.0 { + CREATE VIRTUAL TABLE ft USING fts5( + x, tokenize="origintext unicode61", detail=%DETAIL% + ); + INSERT INTO ft(ft, rank) VALUES('insttoken', $insttoken); + CREATE VIRTUAL TABLE vocab USING fts5vocab(ft, instance); +} + +do_execsql_test $tn.1.1 { + INSERT INTO ft VALUES('Hello world'); +} + +do_execsql_test $tn.1.2 { + INSERT INTO ft(ft) VALUES('integrity-check'); +} + +proc b {x} { string map [list "\0" "."] $x } +db func b b + +do_execsql_test $tn.1.3 { + select b(term) from vocab; +} { + hello.Hello + world +} + +do_execsql_test $tn.1.4 { + SELECT rowid FROM ft('Hello'); +} {1} + +#------------------------------------------------------------------------- +reset_db + +# Return a random integer between 0 and n-1. +# +proc random {n} { + expr {abs(int(rand()*$n))} +} + +proc select_one {list} { + set n [llength $list] + lindex $list [random $n] +} + +proc term {} { + set first_letter { + a b c d e f g h i j k l m n o p q r s t u v w x y z + A B C D E F G H I J K L M N O P Q R S T U V W X Y Z + } + + set term [select_one $first_letter] + append term [random 100] +} + +proc document {} { + set nTerm [expr [random 5] + 5] + set doc "" + for {set ii 0} {$ii < $nTerm} {incr ii} { + lappend doc [term] + } + set doc +} +db func document document + +sqlite3_fts5_register_origintext db +do_execsql_test $tn.2.0 { + CREATE VIRTUAL TABLE ft USING fts5( + x, tokenize="origintext unicode61", detail=%DETAIL% + ); + INSERT INTO ft(ft, rank) VALUES('insttoken', $insttoken); + INSERT INTO ft(ft, rank) VALUES('pgsz', 128); + CREATE VIRTUAL TABLE vocab USING fts5vocab(ft, instance); +} + +do_test $tn.2.1 { + for {set ii 0} {$ii < 500} {incr ii} { + execsql { INSERT INTO ft VALUES( document() ) } + } +} {} + +do_execsql_test $tn.2.2 { + INSERT INTO ft(ft) VALUES('integrity-check'); +} + +do_execsql_test $tn.2.3 { + INSERT INTO ft(ft, rank) VALUES('merge', 16); +} + +do_execsql_test $tn.2.4 { + INSERT INTO ft(ft) VALUES('integrity-check'); +} + +do_execsql_test $tn.2.5 { + INSERT INTO ft(ft) VALUES('optimize'); +} + +#------------------------------------------------------------------------- +reset_db + +sqlite3_fts5_register_origintext db +do_execsql_test $tn.3.0 { + CREATE VIRTUAL TABLE ft USING fts5( + x, tokenize="origintext unicode61", detail=%DETAIL% + ); + INSERT INTO ft(ft, rank) VALUES('insttoken', $insttoken); + CREATE VIRTUAL TABLE vocab USING fts5vocab(ft, instance); + + INSERT INTO ft(rowid, x) VALUES(1, 'hello'); + INSERT INTO ft(rowid, x) VALUES(2, 'Hello'); + INSERT INTO ft(rowid, x) VALUES(3, 'HELLO'); +} + +#proc b {x} { string map [list "\0" "."] $x } +#db func b b +#execsql_pp { SELECT b(term) FROM vocab } + +do_execsql_test $tn.3.1.1 { SELECT rowid FROM ft('hello') } 1 +do_execsql_test $tn.3.1.2 { SELECT rowid FROM ft('Hello') } 2 +do_execsql_test $tn.3.1.3 { SELECT rowid FROM ft('HELLO') } 3 + +do_execsql_test $tn.3.2 { + CREATE VIRTUAL TABLE ft2 USING fts5(x, + tokenize="origintext unicode61", + tokendata=1, + detail=%DETAIL% + ); + INSERT INTO ft2(ft2, rank) VALUES('insttoken', $insttoken); + CREATE VIRTUAL TABLE vocab2 USING fts5vocab(ft2, instance); + + INSERT INTO ft2(rowid, x) VALUES(1, 'hello'); + INSERT INTO ft2(rowid, x) VALUES(2, 'Hello'); + INSERT INTO ft2(rowid, x) VALUES(3, 'HELLO'); + + INSERT INTO ft2(rowid, x) VALUES(10, 'helloooo'); +} + +#proc b {x} { string map [list "\0" "."] $x } +#db func b b +#execsql_pp { SELECT b(term) FROM vocab } + +do_execsql_test $tn.3.3.1 { SELECT rowid FROM ft2('hello') } {1 2 3} +do_execsql_test $tn.3.3.2 { SELECT rowid FROM ft2('Hello') } {1 2 3} +do_execsql_test $tn.3.3.3 { SELECT rowid FROM ft2('HELLO') } {1 2 3} + +do_execsql_test $tn.3.3.4 { SELECT rowid FROM ft2('hello*') } {1 2 3 10} + +do_execsql_test $tn.3.3.5.1 { SELECT rowid FROM ft2('HELLO') ORDER BY rowid DESC} { + 3 2 1 +} +do_execsql_test $tn.3.3.5.2 { SELECT rowid FROM ft2('HELLO') ORDER BY +rowid DESC} { + 3 2 1 +} + +#------------------------------------------------------------------------- +# +reset_db +sqlite3_fts5_register_origintext db +proc querytoken {cmd iPhrase iToken} { + set txt [$cmd xQueryToken $iPhrase $iToken] + string map [list "\0" "."] $txt +} +sqlite3_fts5_create_function db querytoken querytoken + +do_execsql_test $tn.4.0 { + CREATE VIRTUAL TABLE ft USING fts5( + x, tokenize='origintext unicode61', tokendata=1, detail=%DETAIL% + ); + INSERT INTO ft(ft, rank) VALUES('insttoken', $insttoken); + INSERT INTO ft VALUES('one two three four'); +} + +do_execsql_test $tn.4.1 { + SELECT rowid, querytoken(ft, 0, 0) FROM ft('TwO') +} {1 two.TwO} +do_execsql_test $tn.4.2 { + SELECT rowid, querytoken(ft, 0, 0) FROM ft('one TWO ThreE') +} {1 one} +do_execsql_test $tn.4.3 { + SELECT rowid, querytoken(ft, 1, 0) FROM ft('one TWO ThreE') +} {1 two.TWO} + +if {"%DETAIL%"=="full"} { + # Phrase queries are only supported for detail=full. + # + do_execsql_test $tn.4.4 { + SELECT rowid, querytoken(ft, 0, 2) FROM ft('"one TWO ThreE"') + } {1 three.ThreE} + do_catchsql_test $tn.4.5 { + SELECT rowid, querytoken(ft, 0, 3) FROM ft('"one TWO ThreE"') + } {1 SQLITE_RANGE} + do_catchsql_test $tn.4.6 { + SELECT rowid, querytoken(ft, 1, 0) FROM ft('"one TWO ThreE"') + } {1 SQLITE_RANGE} + do_catchsql_test $tn.4.7 { + SELECT rowid, querytoken(ft, -1, 0) FROM ft('"one TWO ThreE"') + } {1 SQLITE_RANGE} +} + +#------------------------------------------------------------------------- +# +reset_db +sqlite3_fts5_register_origintext db +proc insttoken {cmd iIdx iToken} { + set txt [$cmd xInstToken $iIdx $iToken] + string map [list "\0" "."] $txt +} +sqlite3_fts5_create_function db insttoken insttoken +fts5_aux_test_functions db + +do_execsql_test $tn.5.0 { + CREATE VIRTUAL TABLE ft USING fts5( + x, tokenize='origintext unicode61', tokendata=1, detail=%DETAIL% + ); + INSERT INTO ft(ft, rank) VALUES('insttoken', $insttoken); + INSERT INTO ft VALUES('one ONE One oNe oNE one'); +} + +do_execsql_test $tn.5.1 { + SELECT insttoken(ft, 0, 0), + insttoken(ft, 1, 0), + insttoken(ft, 2, 0), + insttoken(ft, 3, 0), + insttoken(ft, 4, 0), + insttoken(ft, 5, 0) + FROM ft('one'); +} { + one one.ONE one.One one.oNe one.oNE one +} + +do_execsql_test $tn.5.2 { + SELECT insttoken(ft, 0, 0), + insttoken(ft, 1, 0), + insttoken(ft, 2, 0), + insttoken(ft, 3, 0), + insttoken(ft, 4, 0), + insttoken(ft, 5, 0) + FROM ft('on*'); +} { + one one.ONE one.One one.oNe one.oNE one +} + +do_execsql_test $tn.5.3 { + SELECT insttoken(ft, 0, 0), + insttoken(ft, 1, 0), + insttoken(ft, 2, 0), + insttoken(ft, 3, 0), + insttoken(ft, 4, 0), + insttoken(ft, 5, 0) + FROM ft(fts5_insttoken('on*')); +} { + one one.ONE one.One one.oNe one.oNE one +} + +do_execsql_test $tn.5.4 { + SELECT insttoken(ft, 1, 0) FROM ft('one'); +} { + one.ONE +} + +do_execsql_test $tn.5.5 { + SELECT fts5_test_poslist(ft) FROM ft('one'); +} { + {0.0.0 0.0.1 0.0.2 0.0.3 0.0.4 0.0.5} +} + +#------------------------------------------------------------------------- +# Test the xInstToken() API with: +# +# * a non tokendata=1 table. +# * prefix queries. +# +reset_db +sqlite3_fts5_register_origintext db +do_execsql_test $tn.6.0 { + CREATE VIRTUAL TABLE ft USING fts5( + x, y, tokenize='origintext unicode61', detail=%DETAIL%, tokendata=0 + ); + INSERT INTO ft(ft, rank) VALUES('insttoken', $insttoken); + + INSERT INTO ft VALUES('One Two', 'Three two'); + INSERT INTO ft VALUES('three Three', 'one One'); +} +proc tokens {cmd} { + set ret [list] + for {set iTok 0} {$iTok < [$cmd xInstCount]} {incr iTok} { + set txt [$cmd xInstToken $iTok 0] + set txt [string map [list "\0" "."] $txt] + lappend ret $txt + } + set ret +} +sqlite3_fts5_create_function db tokens tokens + +do_execsql_test $tn.6.1 { + SELECT rowid, tokens(ft) FROM ft('One'); +} {1 one.One 2 one.One} + +do_execsql_test $tn.6.2 { + SELECT rowid, tokens(ft) FROM ft('on*'); +} {1 one.One 2 {one one.One}} + +do_execsql_test $tn.6.3 { + SELECT rowid, tokens(ft) FROM ft('Three*'); +} {1 three.Three 2 three.Three} + +fts5_aux_test_functions db +do_catchsql_test $tn.6.4 { + SELECT fts5_test_insttoken(ft, -1, 0) FROM ft('one'); +} {1 SQLITE_RANGE} + +do_catchsql_test $tn.6.5 { + SELECT fts5_test_insttoken(ft, 1, 0) FROM ft('one'); +} {1 SQLITE_RANGE} + +do_catchsql_test $tn.6.6 { + CREATE VIRTUAL TABLE ft2 USING fts5(x, tokendata=2); +} {1 {malformed tokendata=... directive}} +do_catchsql_test $tn.6.7 { + CREATE VIRTUAL TABLE ft2 USING fts5(x, content='', tokendata=11); +} {1 {malformed tokendata=... directive}} + +} +} + +finish_test + diff --git a/ext/fts5/test/fts5origintext2.test b/ext/fts5/test/fts5origintext2.test new file mode 100644 index 0000000000..a8c7172344 --- /dev/null +++ b/ext/fts5/test/fts5origintext2.test @@ -0,0 +1,146 @@ +# 2014 Jan 08 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# Tests focused on phrase queries. +# + +source [file join [file dirname [info script]] fts5_common.tcl] +set testprefix fts5origintext2 + +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. +ifcapable !fts5 { + finish_test + return +} + +sqlite3_fts5_register_origintext db +do_execsql_test 1.0 { + CREATE VIRTUAL TABLE ft USING fts5( + x, tokenize="origintext unicode61", tokendata=1 + ); +} + +do_execsql_test 1.1 { + BEGIN; + INSERT INTO ft VALUES('Hello'); + INSERT INTO ft VALUES('hello'); + INSERT INTO ft VALUES('HELLO'); + INSERT INTO ft VALUES('today'); + INSERT INTO ft VALUES('today'); + INSERT INTO ft VALUES('today'); + INSERT INTO ft VALUES('World'); + INSERT INTO ft VALUES('world'); + INSERT INTO ft VALUES('WORLD'); + COMMIT; +} + +do_execsql_test 1.2 { SELECT rowid FROM ft('hello'); } {1 2 3} +do_execsql_test 1.3 { SELECT rowid FROM ft('today'); } {4 5 6} +do_execsql_test 1.4 { SELECT rowid FROM ft('world'); } {7 8 9} + +do_execsql_test 1.5 { + SELECT count(*) FROM ft_data +} 3 + +do_execsql_test 1.6 { + DELETE FROM ft; + INSERT INTO ft(ft, rank) VALUES('pgsz', 64); + BEGIN; + WITH s(i) AS ( + SELECT 1 UNION ALL SELECT i+1 FROM s WHERE i<100 + ) + INSERT INTO ft SELECT 'Hello Hello Hello Hello Hello Hello Hello' FROM s; + INSERT INTO ft VALUES ('hELLO hELLO hELLO'); + INSERT INTO ft VALUES('today today today today today today today'); + INSERT INTO ft VALUES('today today today today today today today'); + INSERT INTO ft VALUES('today today today today today today today'); + INSERT INTO ft VALUES('today today today today today today today'); + INSERT INTO ft VALUES('today today today today today today today'); + INSERT INTO ft VALUES('today today today today today today today'); + INSERT INTO ft VALUES('World World World World World World World'); + INSERT INTO ft VALUES('world world world world world world world'); + INSERT INTO ft VALUES('WORLD WORLD WORLD WORLD WORLD WORLD WORLD'); + INSERT INTO ft VALUES('World World World World World World World'); + INSERT INTO ft VALUES('world world world world world world world'); + INSERT INTO ft VALUES('WORLD WORLD WORLD WORLD WORLD WORLD WORLD'); + COMMIT; +} + +do_execsql_test 1.7 { + SELECT count(*) FROM ft_data; +} 23 + +do_execsql_test 1.8 { SELECT rowid FROM ft('hello') WHERE rowid>100; } {101} + +do_execsql_test 1.9 { + DELETE FROM ft; + INSERT INTO ft(ft) VALUES('optimize'); + SELECT count(*) FROM ft_data; +} {2} +do_execsql_test 1.10 { + BEGIN; + INSERT INTO ft VALUES('Hello'); + INSERT INTO ft VALUES('hello'); + INSERT INTO ft VALUES('HELLO'); + INSERT INTO ft VALUES('today'); + INSERT INTO ft VALUES('today'); + INSERT INTO ft VALUES('today'); + INSERT INTO ft VALUES('World'); + INSERT INTO ft VALUES('world'); + INSERT INTO ft VALUES('WORLD'); +} + +do_execsql_test 1.11 { SELECT rowid FROM ft('hello'); } {1 2 3} +do_execsql_test 1.12 { SELECT rowid FROM ft('today'); } {4 5 6} +do_execsql_test 1.13 { SELECT rowid FROM ft('world'); } {7 8 9} +do_execsql_test 1.14 { SELECT rowid FROM ft('hello') ORDER BY rank; } {1 2 3} + +#------------------------------------------------------------------------ +reset_db +sqlite3_fts5_register_origintext db +proc tokens {cmd} { + set ret [list] + for {set iTok 0} {$iTok < [$cmd xInstCount]} {incr iTok} { + set txt [$cmd xInstToken $iTok 0] + set txt [string map [list "\0" "."] $txt] + lappend ret $txt + } + set ret +} +sqlite3_fts5_create_function db tokens tokens + +do_execsql_test 2.0 { + CREATE VIRTUAL TABLE x1 USING fts5( + v, tokenize="origintext unicode61", tokendata=1, detail=none + ); + + INSERT INTO x1 VALUES('xxx Xxx XXX yyy YYY yyy'); + INSERT INTO x1 VALUES('xxx yyy xxx yyy yyy yyy'); +} + +do_execsql_test 2.1 { + SELECT tokens(x1) FROM x1('xxx'); +} { + {xxx xxx.Xxx xxx.XXX} {xxx xxx} +} + +do_execsql_test 2.2 { + UPDATE x1_content SET c0 = 'xxx xxX xxx yyy yyy yyy' WHERE id=1; +} + +do_execsql_test 2.3 { + SELECT tokens(x1) FROM x1('xxx'); +} { + {xxx {} xxx} {xxx xxx} +} + +finish_test + diff --git a/ext/fts5/test/fts5origintext3.test b/ext/fts5/test/fts5origintext3.test new file mode 100644 index 0000000000..351ab1f617 --- /dev/null +++ b/ext/fts5/test/fts5origintext3.test @@ -0,0 +1,141 @@ +# 2023 November 22 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# Tests focused on phrase queries. +# + +source [file join [file dirname [info script]] fts5_common.tcl] +set testprefix fts5origintext3 + +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. +ifcapable !fts5 { + finish_test + return +} + +foreach_detail_mode $testprefix { + foreach {tn insttoken} { + 1 0 + 2 1 + } { + + reset_db + + sqlite3_fts5_register_origintext db + fts5_aux_test_functions db + proc insttoken {cmd iIdx iToken} { + set txt [$cmd xInstToken $iIdx $iToken] + string map [list "\0" "."] $txt + } + sqlite3_fts5_create_function db insttoken insttoken + + do_execsql_test $tn.1.0 { + CREATE VIRTUAL TABLE ft USING fts5( + x, tokenize="origintext unicode61", tokendata=1, detail=%DETAIL% + ); + } + + do_execsql_test $tn.1.0.1 { + INSERT INTO ft(ft, rank) VALUES('insttoken', 1); + } + + do_execsql_test $tn.1.1 { + INSERT INTO ft VALUES('Hello world HELLO WORLD hello'); + } + + do_execsql_test $tn.1.2 { + SELECT fts5_test_poslist(ft) FROM ft('hello'); + } {{0.0.0 0.0.2 0.0.4}} + + do_execsql_test $tn.1.3 { + SELECT + insttoken(ft, 0, 0), + insttoken(ft, 1, 0), + insttoken(ft, 2, 0) + FROM ft('hello'); + } {hello.Hello hello.HELLO hello} + + do_execsql_test $tn.1.3.1 { + SELECT + insttoken(ft, 0, 0), + insttoken(ft, 1, 0), + insttoken(ft, 2, 0) + FROM ft('hel*'); + } {hello.Hello hello.HELLO hello} + + do_execsql_test $tn.1.4 { + SELECT + insttoken(ft, 0, 0), + insttoken(ft, 1, 0), + insttoken(ft, 2, 0) + FROM ft('hello') ORDER BY rank; + } {hello.Hello hello.HELLO hello} + + do_execsql_test $tn.1.5 { + CREATE VIRTUAL TABLE ft2 USING fts5( + x, tokenize="origintext unicode61", tokendata=1, detail=%DETAIL% + ); + INSERT INTO ft2(rowid, x) VALUES(1, 'ONE one two three ONE'); + INSERT INTO ft2(rowid, x) VALUES(2, 'TWO one two three TWO'); + INSERT INTO ft2(rowid, x) VALUES(3, 'THREE one two three THREE'); + } + + do_execsql_test $tn.1.6 { + SELECT insttoken(ft2, 0, 0), rowid FROM ft2('three') ORDER BY rank; + } {three.THREE 3 three 1 three 2} + + do_execsql_test $tn.1.7 { + INSERT INTO ft2(rowid, x) VALUES(10, 'aaa bbb BBB'); + INSERT INTO ft2(rowid, x) VALUES(12, 'bbb bbb bbb'); + INSERT INTO ft2(rowid, x) VALUES(13, 'bbb bbb bbb'); + INSERT INTO ft2(rowid, x) VALUES(14, 'bbb BBB bbb'); + INSERT INTO ft2(rowid, x) VALUES(15, 'bbb bbb bbb'); + INSERT INTO ft2(rowid, x) VALUES(16, 'bbb bbb bbb'); + INSERT INTO ft2(rowid, x) VALUES(17, 'bbb bbb bbb'); + INSERT INTO ft2(rowid, x) VALUES(18, 'bbb bbb bbb'); + INSERT INTO ft2(rowid, x) VALUES(19, 'bbb bbb bbb'); + INSERT INTO ft2(rowid, x) VALUES(20, 'bbb bbb bbb'); + INSERT INTO ft2(rowid, x) VALUES(21, 'bbb bbb bbb'); + INSERT INTO ft2(rowid, x) VALUES(22, 'bbb bbb bbb'); + INSERT INTO ft2(rowid, x) VALUES(23, 'bbb bbb bbb'); + INSERT INTO ft2(rowid, x) VALUES(24, 'aaa bbb BBB'); + } + + do_execsql_test $tn.1.8 { SELECT rowid FROM ft2('aaa AND bbb'); } {10 24} + do_execsql_test $tn.1.9 { SELECT rowid FROM ft2('bbb AND aaa'); } {10 24} + + do_execsql_test $tn.2.0 { + CREATE VIRTUAL TABLE ft3 USING fts5( + x, tokenize="origintext unicode61", tokendata=1, detail=%DETAIL%, + prefix=2 + ); + } + do_execsql_test $tn.2.1 { + INSERT INTO ft3(rowid, x) VALUES(1, 'one'); + INSERT INTO ft3(rowid, x) VALUES(2, 'ONE'); + INSERT INTO ft3(rowid, x) VALUES(3, 'ONT'); + INSERT INTO ft3(rowid, x) VALUES(4, 'on'); + INSERT INTO ft3(rowid, x) VALUES(5, 'On'); + } + + do_execsql_test $tn.2.2 { + SELECT rowid FROM ft3('on*'); + } {1 2 3 4 5} + + do_execsql_test $tn.2.3 { + SELECT rowid, insttoken(ft3, 0, 0) FROM ft3('on*'); + } {1 one 2 one.ONE 3 ont.ONT 4 on 5 on.On} + + } +} + +finish_test + diff --git a/ext/fts5/test/fts5origintext4.test b/ext/fts5/test/fts5origintext4.test new file mode 100644 index 0000000000..3b907ba2cc --- /dev/null +++ b/ext/fts5/test/fts5origintext4.test @@ -0,0 +1,80 @@ +# 2023 November 22 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# Tests focused on phrase queries. +# + +source [file join [file dirname [info script]] fts5_common.tcl] +set testprefix fts5origintext4 + +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. +ifcapable !fts5 { + finish_test + return +} + +# The tests below verify that a doclist-index is used to limit the number +# of pages loaded into the cache. It does this by querying sqlite3_db_status() +# for the amount of memory used by the pager cache. +# +# memsubsys1 effectively limits the page-cache to 24 pages. Which masks +# the effect tested by the tests in this file. And "mmap" prevents the +# cache from being used, also preventing these tests from working. +# +if {[permutation]=="memsubsys1" || [permutation]=="mmap"} { + finish_test + return +} + +sqlite3_fts5_register_origintext db +do_execsql_test 1.0 { + PRAGMA page_size = 4096; + CREATE VIRTUAL TABLE ft USING fts5( + x, tokenize="origintext unicode61", tokendata=1 + ); +} + +do_execsql_test 1.1 { + BEGIN; + INSERT INTO ft SELECT 'the first thing'; + + WITH s(i) AS ( + SELECT 1 UNION ALL SELECT i+1 FROM s WHERE i<90000 + ) + INSERT INTO ft SELECT 'The second thing' FROM s; + + INSERT INTO ft SELECT 'the first thing'; + COMMIT; + INSERT INTO ft(ft) VALUES('optimize'); +} + +foreach {tn sql expr} { + 1 { SELECT rowid FROM ft('the') } {$mem > 250000} + 2 { SELECT rowid FROM ft('first') } {$mem < 50000} + 3 { SELECT rowid FROM ft('the first') } {$mem < 50000} +} { + db close + sqlite3 db test.db + sqlite3_fts5_register_origintext db + + execsql $sql + do_test 1.2.$tn { + set mem [lindex [sqlite3_db_status db CACHE_USED 0] 1] + expr $expr + } 1 +} + +proc b {x} { string map [list "\0" "."] $x } +db func b b +# execsql_pp { SELECT segid, b(term), pgno from ft_idx } + +finish_test + diff --git a/ext/fts5/test/fts5origintext5.test b/ext/fts5/test/fts5origintext5.test new file mode 100644 index 0000000000..848cc15b5c --- /dev/null +++ b/ext/fts5/test/fts5origintext5.test @@ -0,0 +1,273 @@ +# 2023 Dec 04 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# Tests for tables that use both tokendata=1 and contentless_delete=1. +# + +source [file join [file dirname [info script]] fts5_common.tcl] +set testprefix fts5origintext + +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. +ifcapable !fts5 { + finish_test + return +} + +# Return a random integer between 0 and n-1. +# +proc random {n} { expr {abs(int(rand()*$n))} } + +# Select an element of the list passed as the only argument at random and +# return it. +# +proc select_one {list} { + set n [llength $list] + lindex $list [random $n] +} + +# Given a term that consists entirely of alphabet characters, return all +# permutations of the term using upper and lower case characters. e.g. +# +# "abc" -> {CBA cBA CbA cbA CBa cBa Cba cba} +# +proc casify {term {lRet {{}}}} { + if {$term==""} { return $lRet } + set t [string range $term 1 end] + set f1 [string toupper [string range $term 0 0]] + set f2 [string tolower [string range $term 0 0]] + set ret [list] + foreach x $lRet { + lappend ret "$x$f1" + lappend ret "$x$f2" + } + return [casify $t $ret] +} + +proc vocab {} { + list abc def ghi jkl mno pqr stu vwx yza +} + +# Return a random 3 letter term. +# +proc term {} { + if {[info exists ::expanded_vocab]==0} { + foreach v [vocab] { lappend ::expanded_vocab {*}[casify $v] } + } + + select_one $::expanded_vocab +} + +# Return a document - between 3 and 10 terms. +# +proc document {} { + set nTerm [expr [random 3] + 7] + set doc "" + for {set ii 0} {$ii < $nTerm} {incr ii} { + lappend doc [term] + } + set doc +} +db func document document + +#------------------------------------------------------------------------- + +expr srand(6) + +set NDOC 200 +set NLOOP 50 + +sqlite3_fts5_register_origintext db + +proc tokens {cmd} { + set ret [list] + for {set iTok 0} {$iTok < [$cmd xInstCount]} {incr iTok} { + set txt [$cmd xInstToken $iTok 0] + set txt [string map [list "\0" "."] $txt] + lappend ret $txt + } + set ret +} +sqlite3_fts5_create_function db tokens tokens + +proc rankfunc {cmd} { + $cmd xRowid +} +sqlite3_fts5_create_function db rankfunc rankfunc + +proc ctrl_tokens {term args} { + set ret [list] + set term [string tolower $term] + foreach doc $args { + foreach a $doc { + if {[string tolower $a]==$term} { + if {$a==$term} { + lappend ret $a + } else { + lappend ret [string tolower $a].$a + } + } + } + } + set ret +} +db func ctrl_tokens ctrl_tokens + +proc do_all_vocab_test {tn} { + foreach ::v [concat [vocab] nnn] { + set answer [execsql { + SELECT id, ctrl_tokens($::v, x) FROM ctrl WHERE x LIKE '%' || $::v || '%' + }] + do_execsql_test $tn.$::v.1 { + SELECT rowid, tokens(ft) FROM ft($::v) + } $answer + do_execsql_test $tn.$::v.2 { + SELECT rowid, tokens(ft) FROM ft($::v) ORDER BY rank + } $answer + } +} + +do_execsql_test 1.0 { + CREATE VIRTUAL TABLE ft USING fts5( + x, tokenize="origintext unicode61", content=, contentless_delete=1, + tokendata=1 + ); + + CREATE TABLE ctrl(id INTEGER PRIMARY KEY, x TEXT); + INSERT INTO ft(ft, rank) VALUES('pgsz', 64); + INSERT INTO ft(ft, rank) VALUES('rank', 'rankfunc()'); +} +do_test 1.1 { + for {set ii 0} {$ii < $NDOC} {incr ii} { + set doc [document] + execsql { + INSERT INTO ft(rowid, x) VALUES($ii, $doc); + INSERT INTO ctrl(id, x) VALUES($ii, $doc); + } + } +} {} + +#execsql_pp { SELECT * FROM ctrl } +#execsql_pp { SELECT * FROM ft } +#fts5_aux_test_functions db +#execsql_pp { SELECT rowid, tokens(ft), fts5_test_poslist(ft) FROM ft('ghi'); } + +do_all_vocab_test 1.2 + +for {set ii 0} {$ii < $NLOOP} {incr ii} { + set lRowid [execsql { SELECT id FROM ctrl WHERE random() % 2 }] + foreach r $lRowid { + execsql { DELETE FROM ft WHERE rowid = $r } + execsql { DELETE FROM ctrl WHERE rowid = $r } + + set doc [document] + execsql { INSERT INTO ft(rowid, x) VALUES($r, $doc) } + execsql { INSERT INTO ctrl(id, x) VALUES($r, $doc) } + } + do_all_vocab_test 1.3.$ii +} + +#------------------------------------------------------------------------- + +do_execsql_test 2.0 { + CREATE VIRTUAL TABLE ft2 USING fts5( + x, y, tokenize="origintext unicode61", content=, contentless_delete=1, + tokendata=1 + ); + + CREATE TABLE ctrl2(id INTEGER PRIMARY KEY, x TEXT, y TEXT); + INSERT INTO ft2(ft2, rank) VALUES('pgsz', 64); + INSERT INTO ft2(ft2, rank) VALUES('rank', 'rankfunc()'); +} +do_test 2.1 { + for {set ii 0} {$ii < $NDOC} {incr ii} { + set doc1 [document] + set doc2 [document] + execsql { + INSERT INTO ft2(rowid, x, y) VALUES($ii, $doc, $doc2); + INSERT INTO ctrl2(id, x, y) VALUES($ii, $doc, $doc2); + } + } +} {} + +proc do_all_vocab_test2 {tn} { + foreach ::v [vocab] { + set answer [execsql { + SELECT id, ctrl_tokens($::v, x, y) FROM ctrl2 + WHERE x LIKE '%' || $::v || '%' OR y LIKE '%' || $::v || '%'; + }] + do_execsql_test $tn.$::v.1 { + SELECT rowid, tokens(ft2) FROM ft2($::v) + } $answer + do_execsql_test $tn.$::v.2 { + SELECT rowid, tokens(ft2) FROM ft2($::v) ORDER BY rank + } $answer + } +} + +do_all_vocab_test2 2.2 + +for {set ii 0} {$ii < $NLOOP} {incr ii} { + set lRowid [execsql { SELECT id FROM ctrl2 WHERE random() % 2 }] + foreach r $lRowid { + execsql { DELETE FROM ft2 WHERE rowid = $r } + execsql { DELETE FROM ctrl2 WHERE rowid = $r } + + set doc1 [document] + set doc2 [document] + execsql { INSERT INTO ft2(rowid, x, y) VALUES($r, $doc, $doc1) } + execsql { INSERT INTO ctrl2(id, x, y) VALUES($r, $doc, $doc2) } + } + do_all_vocab_test 2.3.$ii +} + +#------------------------------------------------------------------------- + +unset -nocomplain ::expanded_vocab +proc vocab {} { + list abcde fghij klmno +} + +proc do_all_vocab_test3 {tn} { + foreach ::v [concat [vocab] nnn] { + set answer [execsql { + SELECT rowid, ctrl_tokens($::v, w) FROM ctrl3 WHERE w LIKE '%' || $::v || '%' + }] + do_execsql_test $tn.$::v.1 { + SELECT rowid, tokens(ft3) FROM ft3($::v) + } $answer + do_execsql_test $tn.$::v.2 { + SELECT rowid, tokens(ft3) FROM ft3($::v) ORDER BY rank + } $answer + } +} + +do_execsql_test 3.0 { + CREATE VIRTUAL TABLE ft3 USING fts5( + w, tokenize="origintext unicode61", content=, contentless_delete=1, + tokendata=1 + ); + INSERT INTO ft3(ft3, rank) VALUES('rank', 'rankfunc()'); + CREATE TABLE ctrl3(w); +} + +do_execsql_test 3.1 { + WITH s(i) AS ( + SELECT 1 UNION ALL SELECT i+1 FROM s WHERE i<2 + ) + INSERT INTO ctrl3 SELECT document() FROM s; + INSERT INTO ft3(rowid, w) SELECT rowid, w FROM ctrl3; +} + +do_all_vocab_test3 3.2 + + +finish_test + diff --git a/ext/fts5/test/fts5origintext6.test b/ext/fts5/test/fts5origintext6.test new file mode 100644 index 0000000000..7b27e310b9 --- /dev/null +++ b/ext/fts5/test/fts5origintext6.test @@ -0,0 +1,209 @@ +# 2014 Jan 08 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# Tests focused on phrase queries. +# + +source [file join [file dirname [info script]] fts5_common.tcl] +set testprefix fts5origintext6 + +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. +ifcapable !fts5 { + finish_test + return +} + +proc insert_data {tbl} { + db eval " + INSERT INTO $tbl (rowid, x, y) VALUES + (1, 'ChH BDd HhG efc BjJ BGi GBG FdD','ciJ AFf ADf fBJ fhC GFI JEH fcA'), + (2, 'deg AIG Fie jII cCd Hbf igF fEE','GeA Ija gJg EDc HFi DDI dCf aDd'), + (3, 'IJC hga deC Jfa Aeg hfh CcH dfb','ajD hgC Jaf IfH CHe jIG AjD adF'), + (4, 'FiH GJH IDA AiG bBc CGG Eih bIH','hHg JaH aii IHE Ggd gcH gji CGc'), + (5, 'ceg CAd jFI GAB BGg EeC IdH acG','bBC eIG ifH eDE Adj bjb GCj ebA'), + (6, 'Eac Fbh aFF Eea jeG EIj HCc JJH','hbd giE Gfe eiI dEF abE cJf cAb'), + (7, 'dic hAc jEC AiG FEF jHc HiD HBI','aEd ebE Gfi AJG EBA faj GiG jjE'), + (8, 'Fca iEe EgE jjJ gce ijf EGc EBi','gaI dhH bFg CFc HeC CjI Jfg ccH'), + (9, 'cfd iaa HCf iHJ HjG ffh ABb ibi','CfG bia Dai eii Ejg Jeg fCg hDb'), + (10, 'Jjf hJC IID HJj bGB EbJ cgg eBj','jci jhi JAF jIg Bei Bcd cAC AJd'), + (11, 'egG Cdi bFf fEB hfH jDH jia Efd','FAd eCg fAi aiC baC eJG acF iGE'), + (12, 'Ada Gde CJI ADG gJA Cbb ccF iAB','eAE ajC FBB ccd Jgh fJg ieg hGE'), + (13, 'gBb fDG Jdd HdD fiJ Bed Cig iGg','heC FeI iaj gdg ebB giC HaD FIe'), + (14, 'FiI iDd Ffe igI bgB EJf FHG hDF','cjC AeI abf Fah cbJ ffH jEb aib'), + (15, 'jaF hBI jIH Gdh FEc Fij hgj jFh','dGA ADH feh AAI AfJ DbC gBi hGH'), + (16, 'gjH BGg iGj aFE CAH edI idf HEH','hIf DDg fjB hGi cHF BCH FjG Bgd'), + (17, 'iaI JGH hji gcj Dda eeG jDd CBi','cHg jeh caG gIc feF ihG hgJ Abj'), + (18, 'jHI iDB eFf AiH EFB CDb IAj GbC','Ghe dEI gdI jai gib dAG BIa djb'), + (19, 'abI fHG Ccf aAc FDa fiC agF bdB','afi hde IgE bGF cfg DHD diE aca'), + (20, 'IFh eDJ jfh cDg dde JGJ GAf fIJ','IBa EfH faE aeI FIF baJ FGj EIH'), + (21, 'Dee bFC bBA dEI CEj aJI ghA dCH','hBA ddA HJh dfj egI Dij dFE bGE'), + (22, 'JFE BCj FgA afc Jda FGD iHJ HDh','eAI jHe BHD Gah bbD Bgj gbh eGB'), + (23, 'edE CJE FjG aFI edA Cea FId iFe','ABG jcA ddj EEc Dcg hAI agA biA'), + (24, 'AgE cfc eef cGh aFB DcH efJ hcH','eGF HaB diG fgi bdc iGJ FGJ fFB'), + (25, 'aCa AgI GhC DDI hGJ Hgc Gcg bbG','iID Fga jHa jIj idj DFD bAC AFJ'), + (26, 'gjC JGh Fge faa eCA iGG gHE Gai','bDi hFE BbI DHD Adb Fgi hCa Hij'), + (27, 'Eji jEI jhF DFC afH cDh AGc dHA','IDe GcA ChF DIb Bif HfH agD DGh'), + (28, 'gDD AEE Dfg ICf Cbi JdE jgH eEi','eEb dBG FDE jgf cAI FaJ jaA cDd'), + (29, 'cbe Gec hgB Egi bca dHg bAJ jBf','EFB DgD GJc fDb EeE bBA GFC Hbe'), + (30, 'Adc eHB afI hDc Bhh baE hcJ BBd','JAH deg bcF Dab Bgj Gbb JHi FIB'), + (31, 'agF dIj AJJ Hfg cCG hED Igc fHC','JEf eia dHf Ggc Agj geD bEE Gei'), + (32, 'DAd cCe cbJ FjG gJe gba dJA GCf','eAf hFc bGE ABI hHA IcE abF CCE'), + (33, 'fFh jJe DhJ cDJ EBi AfD eFI IhG','fEG GCc Bjd EFF ggg CFe EHd ciB'), + (34, 'Ejb BjI eAF HaD eEJ FaG Eda AHC','Iah hgD EJG fdD cIE Daj IFf eJh'), + (35, 'aHG eCe FjA djJ dAJ jiJ IaE GGB','Acg iEF JfB FIC Eei ggj dic Iii'), + (36, 'Fdb EDF GaF JjB ehH IgC hgi DCG','cag DHI Fah hAJ bbh egG Hia hgJ'), + (37, 'HGg icC JEC AFJ Ddh dhi hfC Ich','fEg bED Bff hCJ EiA cIf bfG cGA'), + (38, 'aEJ jGI BCi FaA ebA BHj cIJ GcC','dCH ADd bGB cFE AgF geD cbG jIc'), + (39, 'JFB bBi heA BFA hgB Ahj EIE CgI','EIJ JFG FJE GeA Hdg HeH ACh GiA'), + (40, 'agB DDC CED igC Dfc DhI eiC fHi','dAB dcg iJF cej Fcc cAc AfB Fdd'), + (41, 'BdF DHj Ege hcG DEd eFa dCf gBb','FBG ChB cej iGd Hbh fCc Ibe Abh'), + (42, 'Bgc DjI cbC jGD bdb hHB IJA IJH','heg cii abb IGf eDe hJc dii fcE'), + (43, 'fhf ECa FiA aDh Jbf CiB Jhe ajD','GFE bIF aeD gDE BIE Jea DfC BEc'), + (44, 'GjE dBj DbJ ICF aDh EEH Ejb jFb','dJj aEc IBg bEG Faf fjA hjf FAF'), + (45, 'BfA efd IIJ AHG dDF eGg dIJ Gcb','Bfj jeb Ahc dAE ACH Dfb ieb dhC'), + (46, 'Ibj ege geC dJh CIi hbD EAG fGA','DEb BFe Bjg FId Fhg HeF JAc BbE'), + (47, 'dhB afC hgG bEJ aIe Cbe iEE JCD','bdg Ajc FGA jbh Jge iAj fIA jbE'), + (48, 'egH iDi bfH iiI hGC jFF Hfd AHB','bjE Beb iCc haB gIH Dea bga dfd'), + (49, 'jgf chc jGc Baj HBb jdE hgh heI','FFB aBd iEB EIG HGf Bbj EIi JbI'), + (50, 'jhe EGi ajA fbH geh EHe FdC bij','jDE bBC gbH HeE dcH iBH IFE AHi'), + (51, 'aCb JiD cgJ Bjj iAI Hbe IAF FhH','ijf bhE Jdf FED dCH bbG HcJ ebH'); + " +} + +foreach_detail_mode $testprefix { +foreach external {0 1 2} { + reset_db + + proc tokens {cmd} { + set ret [list] + for {set iTok 0} {$iTok < [$cmd xInstCount]} {incr iTok} { + set txt [$cmd xInstToken $iTok 0] + set txt [string map [list "\0" "."] $txt] + lappend ret $txt + } + set ret + } + sqlite3_fts5_create_function db tokens tokens + sqlite3_fts5_register_origintext db + + set E(0) internal + set E(1) external + set E(2) contentless + set e $E($external) + + db eval { CREATE TABLE ex(x, y) } + switch -- $external { + 0 { + do_execsql_test 1.$e.0 { + CREATE VIRTUAL TABLE ft USING fts5( + x, y, tokenize="origintext unicode61", tokendata=1, detail=%DETAIL% + ); + } + } + + 1 { + do_execsql_test 1.$e.0 { + CREATE VIRTUAL TABLE ft USING fts5( + x, y, tokenize="origintext unicode61", tokendata=1, detail=%DETAIL%, + content=ex + ); + } + } + + 2 { + do_execsql_test 1.$e.0 { + CREATE VIRTUAL TABLE ft USING fts5( + x, y, tokenize="origintext unicode61", tokendata=1, detail=%DETAIL%, + content= + ); + } + } + } + insert_data ex + insert_data ft + + proc prefixquery {prefix bInst bYOnly} { + set ret [list] + db eval { SELECT rowid, x, y FROM ex ORDER BY rowid } { + set row [list] + set bSeen 0 + + set T [concat $x $y] + if {$bYOnly} { set T $y } + + foreach w $T { + if {[string match -nocase $prefix $w]} { + set bSeen 1 + if {$bInst} { + set v [string tolower $w] + if {$w != $v} { append v ".$w" } + lappend row $v + } + } + } + + if {$bSeen} { + lappend ret $rowid + lappend ret $row + } + } + + set ret + } + + proc do_prefixquery_test {tn prefix} { + set bInst [expr {$::e!="contentless" || "%DETAIL%"=="full"}] + set expect [prefixquery $prefix $bInst 0] + set expect2 [prefixquery $prefix $bInst 1] + + uplevel [list do_execsql_test $tn.1 " + SELECT rowid, tokens(ft) FROM ft('$prefix') + " $expect] + uplevel [list do_execsql_test $tn.2 " + SELECT rowid, tokens(ft) FROM ft(fts5_insttoken('$prefix')) + " $expect] + db eval { INSERT INTO ft(ft, rank) VALUES('insttoken', 1) } + uplevel [list do_execsql_test $tn.3 " + SELECT rowid, tokens(ft) FROM ft('$prefix') + " $expect] + db eval { INSERT INTO ft(ft, rank) VALUES('insttoken', 0) } + + if {"%DETAIL%"!="none"} { + uplevel [list do_execsql_test $tn.4 " + SELECT rowid, tokens(ft) FROM ft('y: $prefix') + " $expect2] + uplevel [list do_execsql_test $tn.5 " + SELECT rowid, tokens(ft) FROM ft(fts5_insttoken('y: $prefix')) + " $expect2] + db eval { INSERT INTO ft(ft, rank) VALUES('insttoken', 1) } + uplevel [list do_execsql_test $tn.6 " + SELECT rowid, tokens(ft) FROM ft('y: $prefix') + " $expect2] + db eval { INSERT INTO ft(ft, rank) VALUES('insttoken', 0) } + } + } + + do_prefixquery_test 1.$e.1 a* + do_prefixquery_test 1.$e.2 b* + do_prefixquery_test 1.$e.3 c* + do_prefixquery_test 1.$e.4 d* + do_prefixquery_test 1.$e.5 e* + do_prefixquery_test 1.$e.6 f* + do_prefixquery_test 1.$e.7 g* + do_prefixquery_test 1.$e.8 h* + do_prefixquery_test 1.$e.9 i* + do_prefixquery_test 1.$e.10 j* +}} + + + +finish_test + diff --git a/ext/fts5/test/fts5phrase.test b/ext/fts5/test/fts5phrase.test index 10598ccf43..708cdfd83e 100644 --- a/ext/fts5/test/fts5phrase.test +++ b/ext/fts5/test/fts5phrase.test @@ -15,7 +15,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5phrase -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return @@ -93,15 +93,21 @@ foreach {tn cols tokens} { 10 {b} "i e" 11 {a} "i e" } { - set fts "{$cols}:[join $tokens +]" set where [list] foreach c $cols { lappend where "pmatch($c, '$tokens')" } set where [join $where " OR "] - set res [db eval "SELECT rowid FROM t3 WHERE $where"] - do_execsql_test "1.$tn.$fts->([llength $res] rows)" { - SELECT rowid FROM t3($fts) - } $res + foreach fts [list \ + "{$cols}:[join $tokens +]" \ + "{$cols}:NEAR([join $tokens +])" \ + "{$cols}:NEAR([join $tokens +],1)" \ + "{$cols}:NEAR([join $tokens +],111)" \ + ] { + set res [db eval "SELECT rowid FROM t3 WHERE $where"] + do_execsql_test "1.$tn.$fts->([llength $res] rows)" { + SELECT rowid FROM t3($fts) + } $res + } } do_execsql_test 2.0 { diff --git a/ext/fts5/test/fts5plan.test b/ext/fts5/test/fts5plan.test index 6862acf179..57d5254a35 100644 --- a/ext/fts5/test/fts5plan.test +++ b/ext/fts5/test/fts5plan.test @@ -15,7 +15,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5plan -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return diff --git a/ext/fts5/test/fts5porter.test b/ext/fts5/test/fts5porter.test index c7b1ce6f3f..de1c3e15a3 100644 --- a/ext/fts5/test/fts5porter.test +++ b/ext/fts5/test/fts5porter.test @@ -17,7 +17,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5porter -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return diff --git a/ext/fts5/test/fts5porter2.test b/ext/fts5/test/fts5porter2.test index 6e81b2d310..556060baa3 100644 --- a/ext/fts5/test/fts5porter2.test +++ b/ext/fts5/test/fts5porter2.test @@ -18,7 +18,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5porter2 -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return diff --git a/ext/fts5/test/fts5prefix.test b/ext/fts5/test/fts5prefix.test index 279f312f22..7a29628ea1 100644 --- a/ext/fts5/test/fts5prefix.test +++ b/ext/fts5/test/fts5prefix.test @@ -15,7 +15,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5prefix -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return diff --git a/ext/fts5/test/fts5prefix2.test b/ext/fts5/test/fts5prefix2.test index bf16e81a73..0860b3cddd 100644 --- a/ext/fts5/test/fts5prefix2.test +++ b/ext/fts5/test/fts5prefix2.test @@ -15,7 +15,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5prefix2 -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return @@ -52,6 +52,36 @@ do_execsql_test 2.1 { SELECT * FROM t2('to*'); } {top to tommy} +#------------------------------------------------------------------------- + +foreach {tn newrowid} { + 1 122 + 2 123 + 3 124 +} { + reset_db + do_execsql_test 3.$tn.0 { + CREATE VIRTUAL TABLE t12 USING fts5(x); + INSERT INTO t12(rowid, x) VALUES(123, 'wwww'); + } + do_execsql_test 3.$tn.1 { + BEGIN; + DELETE FROM t12 WHERE rowid=123; + SELECT * FROM t12('wwww*'); + INSERT INTO t12(rowid, x) VALUES($newrowid, 'wwww'); + SELECT * FROM t12('wwww*'); + END; + } {wwww} + do_execsql_test 3.$tn.2 { + INSERT INTO t12(t12) VALUES('integrity-check'); + } + do_execsql_test 3.$tn.3 { + SELECT rowid FROM t12('wwww*'); + } $newrowid +} + +finish_test + finish_test diff --git a/ext/fts5/test/fts5query.test b/ext/fts5/test/fts5query.test index 5237e8e250..4e8bab8cf7 100644 --- a/ext/fts5/test/fts5query.test +++ b/ext/fts5/test/fts5query.test @@ -15,7 +15,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5query -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return diff --git a/ext/fts5/test/fts5rank.test b/ext/fts5/test/fts5rank.test index 22534e8e03..7a700cb97f 100644 --- a/ext/fts5/test/fts5rank.test +++ b/ext/fts5/test/fts5rank.test @@ -15,7 +15,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5rank -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return @@ -180,4 +180,28 @@ do_execsql_test 6.1 { {table table table} {the table names.} {rank on an fts5 table} } + +#------------------------------------------------------------------------- +# forum post: https://sqlite.org/forum/forumpost/a2dd636330 +# +reset_db +do_execsql_test 1.0 { + CREATE VIRTUAL TABLE t USING fts5 (a, b); + INSERT INTO t (a, b) VALUES ('data1', 'sentence1'), ('data2', 'sentence2'); + INSERT INTO t(t, rank) VALUES ('rank', 'bm25(10.0,1.0)'); +} + +sqlite3 db2 test.db +do_execsql_test -db db2 1.1 { + SELECT *, rank<0.0 FROM t('data*') ORDER BY RANK; +} {data1 sentence1 1 data2 sentence2 1} + +do_execsql_test 1.2 { + INSERT INTO t(t, rank) VALUES ('rank', 'bm25(10.0,1.0)'); +} +do_execsql_test -db db2 1.3 { + SELECT *, rank<0.0 FROM t('data*') ORDER BY RANK; +} {data1 sentence1 1 data2 sentence2 1} +db2 close + finish_test diff --git a/ext/fts5/test/fts5rebuild.test b/ext/fts5/test/fts5rebuild.test index ae881c02f0..065d16b910 100644 --- a/ext/fts5/test/fts5rebuild.test +++ b/ext/fts5/test/fts5rebuild.test @@ -14,7 +14,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5rebuild -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return @@ -46,7 +46,7 @@ do_execsql_test 1.5 { do_catchsql_test 1.6 { INSERT INTO f1(f1) VALUES('integrity-check'); -} {1 {database disk image is malformed}} +} {/.*fts5: corrupt.*/} do_execsql_test 1.7 { INSERT INTO f1(f1) VALUES('rebuild'); diff --git a/ext/fts5/test/fts5restart.test b/ext/fts5/test/fts5restart.test index db2c62b675..da58fe3aed 100644 --- a/ext/fts5/test/fts5restart.test +++ b/ext/fts5/test/fts5restart.test @@ -15,7 +15,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5restart -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return @@ -29,6 +29,7 @@ do_execsql_test 1.0 { # Run the 'optimize' command. Check that it does not disturb ongoing # full-text queries. # +unset -nocomplain lRowid do_test 1.1 { for {set i 1} {$i < 1000} {incr i} { execsql { INSERT INTO f1 VALUES('a b c d e') } diff --git a/ext/fts5/test/fts5rowid.test b/ext/fts5/test/fts5rowid.test index 8935ecfea7..e4e4f6b844 100644 --- a/ext/fts5/test/fts5rowid.test +++ b/ext/fts5/test/fts5rowid.test @@ -15,7 +15,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5rowid -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return diff --git a/ext/fts5/test/fts5savepoint.test b/ext/fts5/test/fts5savepoint.test index e431f9f5fd..bf66052282 100644 --- a/ext/fts5/test/fts5savepoint.test +++ b/ext/fts5/test/fts5savepoint.test @@ -13,7 +13,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5savepoint -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return @@ -48,7 +48,7 @@ do_catchsql_test 2.0 { SAVEPOINT two; INSERT INTO ft1 VALUES('b'); COMMIT; -} {1 {SQL logic error}} +} {1 {database disk image is malformed}} reset_db ifcapable fts3 { @@ -71,7 +71,7 @@ ifcapable fts3 { do_catchsql_test 3.2 { DROP TABLE vt1; - } {1 {SQL logic error}} + } {0 {}} do_execsql_test 3.3 { SAVEPOINT x; diff --git a/ext/fts5/test/fts5secure.test b/ext/fts5/test/fts5secure.test new file mode 100644 index 0000000000..7314946162 --- /dev/null +++ b/ext/fts5/test/fts5secure.test @@ -0,0 +1,348 @@ +# 2023 Feb 17 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# + +source [file join [file dirname [info script]] fts5_common.tcl] +ifcapable !fts5 { finish_test ; return } +set ::testprefix fts5secure + +proc dump {tname} { + execsql_pp "SELECT * FROM ${tname}_idx" + execsql_pp "SELECT id, quote(block), fts5_decode(id,block) FROM ${tname}_data" +} + + +do_execsql_test 0.0 { + CREATE VIRTUAL TABLE t1 USING fts5(ab); + CREATE VIRTUAL TABLE v1 USING fts5vocab('t1', 'instance'); + INSERT INTO t1(rowid, ab) VALUES + (0,'abc'), (1,'abc'), (2,'abc'), (3,'abc'), (4,'def'); +} + +do_execsql_test 0.1 { + INSERT INTO t1(t1, rank) VALUES('secure-delete', 1); +} + +do_execsql_test 0.2 { + DELETE FROM t1 WHERE rowid=2; +} + +do_execsql_test 0.3 { + SELECT count(*) FROM t1_data +} 3 + +do_execsql_test 0.4 { + INSERT INTO t1(t1) VALUES('integrity-check'); +} + +do_execsql_test 0.5 { + DELETE FROM t1 WHERE rowid=3; +} + +do_execsql_test 0.6 { + INSERT INTO t1(t1) VALUES('integrity-check'); +} + +do_execsql_test 0.7 { + DELETE FROM t1 WHERE rowid=0; +} + +do_execsql_test 0.8 { + INSERT INTO t1(t1) VALUES('integrity-check'); +} + +#---------------------------------- + +do_execsql_test 1.0 { + CREATE VIRTUAL TABLE t2 USING fts5(ab); + INSERT INTO t2(rowid, ab) VALUES (5, 'key'), (6, 'value'); + INSERT INTO t2(t2, rank) VALUES('secure-delete', 1); +} + +#execsql_pp { SELECT id, quote(block) FROM t1_data } +#execsql_pp { SELECT segid, quote(term), pgno FROM t1_idx } + +do_execsql_test 1.1 { + DELETE FROM t2 WHERE rowid = 5; +} + +do_execsql_test 1.2 { + INSERT INTO t2(t2) VALUES('integrity-check'); +} + +do_execsql_test 1.3 { + DELETE FROM t2 WHERE rowid = 6; +} + +do_execsql_test 1.4 { + INSERT INTO t2(t2) VALUES('integrity-check'); +} + +do_execsql_test 1.5 { + SELECT * FROM t2('value'); + SELECT * FROM t2('v*'); +} + +do_execsql_test 1.6 { + SELECT * FROM t2('value') ORDER BY rowid DESC; + SELECT * FROM t2('v*') ORDER BY rowid DESC; +} +execsql_pp { + SELECT id, quote(block) FROM t2_data; +} + +#---------------------------------- + +do_execsql_test 2.0 { + CREATE VIRTUAL TABLE ft USING fts5(ab); + CREATE VIRTUAL TABLE vocab USING fts5vocab('ft', 'instance'); + INSERT INTO ft(rowid, ab) VALUES + (1, 'one'), + (2, 'two'), + (3, 'three'), + (4, 'four'), + (5, 'one one'), + (6, 'one two'), + (7, 'one three'), + (8, 'one four'), + (9, 'two one'), + (10, 'two two'), + (11, 'two three'), + (12, 'two four'), + (13, 'three one'), + (14, 'three two'), + (15, 'three three'), + (16, 'three four'); +} + +do_execsql_test 2.1 { + SELECT count(*) FROM ft_data; +} {3} + +do_execsql_test 2.2 { + INSERT INTO ft(ft, rank) VALUES('secure-delete', 1); +} + +do_execsql_test 2.3 { + DELETE FROM ft WHERE rowid=9; +} + +do_execsql_test 2.4 { + INSERT INTO ft(ft) VALUES('integrity-check'); +} + +do_execsql_test 2.5 { + DELETE FROM ft WHERE ab LIKE '%two%' +} + +do_execsql_test 2.6 { + INSERT INTO ft(ft) VALUES('integrity-check'); +} + +do_execsql_test 2.7 { + SELECT count(*) FROM ft_data; +} {3} + +#---------------------------------- +reset_db + +set ::vocab { + one two three four five six seven eight nine ten + eleven twelve thirteen fourteen fifteen sixteen + seventeen eighteen nineteen twenty +} +proc rnddoc {} { + set nVocab [llength $::vocab] + set ret [list] + for {set ii 0} {$ii < 8} {incr ii} { + lappend ret [lindex $::vocab [expr int(abs(rand()) * $nVocab)]] + } + set ret +} + +proc contains {list val} { + expr {[lsearch $list $val]>=0} +} + +foreach {tn pgsz} { + 2 64 + 1 1000 +} { + reset_db + db function rnddoc rnddoc + db function contains contains + + expr srand(1) + + do_execsql_test 3.$tn.0 { + CREATE VIRTUAL TABLE t1 USING fts5(x); + INSERT INTO t1(t1, rank) VALUES('pgsz', $pgsz); + WITH s(i) AS ( + VALUES(1) UNION SELECT i+1 FROM s WHERE i<20 + ) + INSERT INTO t1 SELECT rnddoc() FROM s; + } + + do_execsql_test 3.$tn.1 { + INSERT INTO t1(t1, rank) VALUES('secure-delete', 1); + } + + foreach {rowid} { + 6 16 3 4 9 14 13 7 20 15 19 10 11 2 5 18 17 1 12 8 + } { + + do_execsql_test 3.$tn.2.$rowid { + DELETE FROM t1 WHERE rowid=$rowid; + } + do_execsql_test 3.$tn.2.$rowid.ic { + INSERT INTO t1(t1) VALUES('integrity-check'); + } + + foreach v $::vocab { + do_execsql_test 3.$tn.2.$rowid.q.$v { + SELECT rowid FROM t1($v) + } [db eval {SELECT rowid FROM t1 WHERE contains(x, $v)}] + + do_execsql_test 3.$tn.2.$rowid.q.$v.DESC { + SELECT rowid FROM t1($v) ORDER BY 1 DESC + } [db eval {SELECT rowid FROM t1 WHERE contains(x, $v) ORDER BY 1 DESC}] + } + } +} + +do_execsql_test 3.3 { + INSERT INTO t1(x) VALUES('optimize'); + INSERT INTO t1(t1) VALUES('optimize'); + SELECT count(*) FROM t1_data; +} {3} + +#---------------------------------- +reset_db +do_execsql_test 4.0 { + CREATE VIRTUAL TABLE t1 USING fts5(x); + INSERT INTO t1(t1, rank) VALUES('pgsz', 32); + INSERT INTO t1(t1, rank) VALUES('secure-delete', 1); +} + +set L1 [string repeat abcdefghij 10] +set L2 [string repeat 1234567890 10] + +do_execsql_test 4.1 { + INSERT INTO t1 VALUES('aa' || $L1 || ' ' || $L2); +} +do_execsql_test 4.2 { + DELETE FROM t1 WHERE rowid=1 +} +do_execsql_test 4.3 { + INSERT INTO t1(t1) VALUES('integrity-check'); +} + +#---------------------------------- +reset_db +do_execsql_test 5.0 { + CREATE VIRTUAL TABLE t1 USING fts5(x); + INSERT INTO t1(t1, rank) VALUES('pgsz', 32); + INSERT INTO t1(t1, rank) VALUES('secure-delete', 1); +} + +set doc "aa [string repeat {abc } 60]" + +do_execsql_test 5.1 { + BEGIN; + INSERT INTO t1 VALUES($doc); + INSERT INTO t1 VALUES('aa abc'); + COMMIT; +} + +do_execsql_test 5.2 { + DELETE FROM t1 WHERE rowid = 1; +} + +do_execsql_test 5.3 { + INSERT INTO t1(t1) VALUES('integrity-check'); +} + +do_execsql_test 5.4 { SELECT rowid FROM t1('abc'); } 2 +do_execsql_test 5.5 { SELECT rowid FROM t1('aa'); } 2 + +#------------------------------------------------------------------------- +# Tests for the bug fixed by https://sqlite.org/src/info/4b60a1c3 +# +reset_db +do_execsql_test 6.0 { + CREATE VIRTUAL TABLE fts USING fts5(content); + INSERT INTO fts(fts, rank) VALUES ('secure-delete', 1); + INSERT INTO fts(rowid, content) VALUES + (3407, 'profile profile profile profile profile profile profile profile pull pulling pulling really'); + DELETE FROM fts WHERE rowid IS 3407; + INSERT INTO fts(fts) VALUES ('integrity-check'); +} + +foreach {tn detail} { + 1 full + 2 column + 3 none +} { + do_execsql_test 6.1.$detail " + DROP TABLE IF EXISTS t1; + CREATE VIRTUAL TABLE t1 USING fts5(x, detail=$detail); + " + + do_execsql_test 6.2.$detail { + INSERT INTO t1(t1, rank) VALUES('secure-delete', 1); + } + + for {set ii 1} {$ii < 100} {incr ii} { + do_execsql_test 6.3.$detail.$ii.1 { + BEGIN; + INSERT INTO t1(rowid, x) VALUES(10, 'word1'); + WITH s(i) AS ( + SELECT 1 UNION ALL SELECT i+1 FROM s WHERE i0} { + set idx [expr int(abs(rand()) * [llength $in])] + lappend out [lindex $in $idx] + set in [lreplace $in $idx $idx] + } + set out + } + + #dump fff + + set iTest 1 + foreach ii [lshuffle [db eval {SELECT rowid FROM fff}]] { + #if {$iTest==1} { dump fff } + #if {$iTest==1} { breakpoint } + do_execsql_test 3.$tn.1.$iTest.$ii { + DELETE FROM fff WHERE rowid=$ii; + } + #if {$iTest==1} { dump fff } + if {($iTest % 20)==0} { + do_execsql_test 3.$tn.1.$iTest.$ii.ic { + INSERT INTO fff(fff) VALUES('integrity-check'); + } + } + #if {$iTest==1} { break } + incr iTest + } +} + +#execsql_pp { SELECT rowid FROM fff('post') ORDER BY rowid ASC } +#breakpoint +#execsql_pp { +# SELECT rowid FROM fff('post') ORDER BY rowid DESC +#} +# +#dump fff + + +finish_test + diff --git a/ext/fts5/test/fts5secure4.test b/ext/fts5/test/fts5secure4.test new file mode 100644 index 0000000000..7588a34683 --- /dev/null +++ b/ext/fts5/test/fts5secure4.test @@ -0,0 +1,170 @@ +# 2023 April 14 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# + +source [file join [file dirname [info script]] fts5_common.tcl] +return_if_no_fts5 +set ::testprefix fts5secure4 + +#------------------------------------------------------------------------- +# Test using the 'delete' command to attempt to delete a token that +# is not present in the index in secure-delete mode. +# +do_execsql_test 1.0 { + CREATE VIRTUAL TABLE t1 USING fts5(a, b, content=x1); + + CREATE TABLE x1(rowid INTEGER PRIMARY KEY, a, b); + INSERT INTO x1 VALUES + (1, 'hello world', 'today xyz'), + (2, 'not the day', 'crunch crumble and chomp'), + (3, 'one', 'two'); + INSERT INTO t1(t1) VALUES('rebuild'); +} + +do_execsql_test 1.1 { + INSERT INTO t1(t1, rank) VALUES('secure-delete', 1); +} + +do_execsql_test 1.2 { + INSERT INTO t1(t1, rowid, a, b) VALUES('delete', 4, 'nosuchtoken', ''); +} + +do_execsql_test 1.3 { + INSERT INTO t1(t1) VALUES('integrity-check'); +} + +do_execsql_test 1.4 { + INSERT INTO t1(t1, rowid, a, b) VALUES('delete', 1, 'crunch', ''); +} + +do_execsql_test 1.5 { + INSERT INTO t1(t1, rowid, a, b) VALUES('delete', 3, 'crunch', ''); +} + +do_execsql_test 1.6 { + INSERT INTO t1(t1) VALUES('integrity-check'); +} + +do_execsql_test 1.7 { +CREATE VIRTUAL TABLE y1 USING fts5(xx, prefix='1,2'); +INSERT INTO y1(y1, rank) VALUES('pgsz', 64); +INSERT INTO y1(y1, rank) VALUES('secure-delete', 1); +} +do_execsql_test 1.8 { + BEGIN; + INSERT INTO y1(rowid, xx) VALUES(1, 'abc def'); + INSERT INTO y1(rowid, xx) VALUES(2, 'reallyreallylongtoken'); + COMMIT; +} +do_execsql_test 1.9 { + DELETE FROM y1 WHERE rowid=1; + INSERT INTO y1(y1) VALUES('integrity-check'); +} + +do_execsql_test 1.10 { + CREATE VIRTUAL TABLE w1 USING fts5(ww, content=""); + INSERT INTO w1(rowid, ww) VALUES(123, ''); +} +do_catchsql_test 1.11 { + INSERT INTO w1(w1, rowid, ww) VALUES('delete', 123, 'xyz'); +} {1 {database disk image is malformed}} +do_catchsql_test 1.12 { + DROP TABLE w1; + CREATE VIRTUAL TABLE w1 USING fts5(ww, content=""); + INSERT INTO w1(rowid, ww) VALUES(123, ''); + DELETE FROM w1_data WHERE id>10; + INSERT INTO w1(w1, rowid, ww) VALUES('delete', 123, 'xyz'); +} {1 {database disk image is malformed}} + +#------------------------------------------------------------------------- +# Test using secure-delete with detail=none or detail=col. +# +foreach {tn d} {1 full 2 none 3 column} { + reset_db + do_execsql_test 2.$tn.1 " + CREATE VIRTUAL TABLE x1 USING fts5(xx, yy, zz, detail=$d, prefix='10,20'); + INSERT INTO x1(x1, rank) VALUES('pgsz', 64); + INSERT INTO x1(x1, rank) VALUES('secure-delete', 1); + " + + do_execsql_test 2.$tn.2 { + BEGIN; + INSERT INTO x1(xx, yy, zz) VALUES('a b c', 'd e f', 'a b c'); + INSERT INTO x1(xx, yy, zz) VALUES('a b c', 'd e f', 'a b c'); + INSERT INTO x1(xx, yy, zz) VALUES('a b c', 'd e f', 'a b c'); + INSERT INTO x1(xx, yy, zz) VALUES('a b c', 'd e f', 'a b c'); + INSERT INTO x1(xx, yy, zz) VALUES('a b c', 'd e f', 'a b c'); + COMMIT; + INSERT INTO x1(x1) VALUES('integrity-check'); + } + + do_execsql_test 2.$tn.3 { + DELETE FROM x1 WHERE rowid IN (2, 4, 6); + INSERT INTO x1(x1) VALUES('integrity-check'); + } + + do_execsql_test 2.$tn.4 { + DELETE FROM x1 WHERE rowid IN (1, 3, 5); + INSERT INTO x1(x1) VALUES('integrity-check'); + } + + do_execsql_test 2.$tn.5 { + WITH s(i) AS ( + SELECT 1 UNION ALL SELECT i+1 FROM s WHERE i<100 + ) + INSERT INTO x1 + SELECT 'seems to be', 'used brew to', 'everything is working' FROM s + UNION ALL + SELECT 'used brew to', 'everything is working', 'seems to be' FROM s + UNION ALL + SELECT 'everything is working', 'seems to be', 'used brew to' FROM s + UNION ALL + SELECT 'abc', 'zzz', 'a b c d' + UNION ALL + SELECT 'z', 'z', 'z' FROM s + } + + do_test 2.$tn.6 { + for {set i 300} {$i > 200} {incr i -1} { + execsql { + DELETE FROM x1 WHERE rowid=$i; + INSERT INTO x1(x1) VALUES('integrity-check'); + } + } + } {} + + do_test 2.$tn.7 { + for {set i 1} {$i < 100} {incr i} { + execsql { + DELETE FROM x1 WHERE rowid=$i; + INSERT INTO x1(x1) VALUES('integrity-check'); + } + } + } {} + + do_test 2.$tn.8 { + foreach i [db eval {SELECT rowid FROM x1}] { + execsql { + DELETE FROM x1 WHERE rowid=$i; + INSERT INTO x1(x1) VALUES('integrity-check'); + } + } + } {} + + do_execsql_test 2.$tn.9 { + SELECT * FROM x1 + } {} +} + + + +finish_test + diff --git a/ext/fts5/test/fts5secure5.test b/ext/fts5/test/fts5secure5.test new file mode 100644 index 0000000000..ca8570211c --- /dev/null +++ b/ext/fts5/test/fts5secure5.test @@ -0,0 +1,129 @@ +# 2023 April 14 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# + +source [file join [file dirname [info script]] fts5_common.tcl] +return_if_no_fts5 +set ::testprefix fts5secure5 +return_if_no_fts5 + +proc dump {} { + execsql_pp { + SELECT id, quote(block), fts5_decode_none(id, block) FROM ft1_data + } +} + +do_execsql_test 1.0 { + CREATE VIRTUAL TABLE ft1 USING fts5(a, detail=none); + INSERT INTO ft1(ft1, rank) VALUES('secure-delete', 1); +} + +do_execsql_test 1.1 { + BEGIN; + INSERT INTO ft1(rowid, a) VALUES(1, 'abcd'); + INSERT INTO ft1(rowid, a) VALUES(2, 'abcd'); + INSERT INTO ft1(rowid, a) VALUES(3, 'abcd'); + COMMIT; +} +do_execsql_test 1.2 { + DELETE FROM ft1 WHERE rowid=1; +} +do_execsql_test 1.3 { + INSERT INTO ft1(ft1) VALUES('integrity-check'); +} +do_execsql_test 1.4 { + DELETE FROM ft1 WHERE rowid=3; +} +do_execsql_test 1.5 { + INSERT INTO ft1(ft1) VALUES('integrity-check'); +} +do_execsql_test 1.6 { + DELETE FROM ft1 WHERE rowid=3; +} +do_execsql_test 1.7 { + INSERT INTO ft1(ft1) VALUES('integrity-check'); +} + +#------------------------------------------------------------------------- +reset_db +do_execsql_test 2.0 { + CREATE VIRTUAL TABLE ft1 USING fts5(a, detail=none); + INSERT INTO ft1(ft1, rank) VALUES('secure-delete', 1); +} + +do_execsql_test 2.1 { + BEGIN; + INSERT INTO ft1(rowid, a) VALUES(1, 'abcd one'); + INSERT INTO ft1(rowid, a) VALUES(2, 'abcd two'); + INSERT INTO ft1(rowid, a) VALUES(3, 'abcd two'); + INSERT INTO ft1(rowid, a) VALUES(4, 'abcd two'); + INSERT INTO ft1(rowid, a) VALUES(5, 'abcd three'); + COMMIT; +} + +do_execsql_test 2.2a { + DELETE FROM ft1 WHERE rowid=3; +} +do_execsql_test 2.2b { + INSERT INTO ft1(ft1) VALUES('integrity-check'); +} +do_execsql_test 2.3a { + DELETE FROM ft1 WHERE rowid=2; +} +do_execsql_test 2.3b { + INSERT INTO ft1(ft1) VALUES('integrity-check'); +} +do_execsql_test 2.4a { + DELETE FROM ft1 WHERE rowid=4; +} +do_execsql_test 2.4b { + INSERT INTO ft1(ft1) VALUES('integrity-check'); +} + +#------------------------------------------------------------------------- +reset_db +do_execsql_test 3.0 { + CREATE VIRTUAL TABLE ft1 USING fts5(a, detail=none, prefix=1); + INSERT INTO ft1(ft1, rank) VALUES('secure-delete', 1); + INSERT INTO ft1(ft1, rank) VALUES('pgsz', 64); +} +do_execsql_test 3.1 { + BEGIN; + INSERT INTO ft1(a) VALUES('c'); + COMMIT; +} +do_execsql_test 3.2 { + DELETE FROM ft1 WHERE rowid IN (1); + INSERT INTO ft1(ft1) VALUES('integrity-check'); +} + +#------------------------------------------------------------------------- +reset_db +do_execsql_test 4.0 { + CREATE VIRTUAL TABLE ft1 USING fts5(a, detail=none); + INSERT INTO ft1(ft1, rank) VALUES('secure-delete', 1); + INSERT INTO ft1(ft1, rank) VALUES('pgsz', 64); + + WITH s(i) AS ( + SELECT 1 UNION ALL SELECT i+1 FROM s WHERE i<500 + ) + INSERT INTO ft1 SELECT 'abcdefg' FROM s; +} + +do_test 4.1 { + for {set i 500} {$i > 0} {incr i -1} { + execsql { DELETE FROM ft1 WHERE rowid=$i } + execsql { INSERT INTO ft1(ft1) VALUES('integrity-check') } + } +} {} + +finish_test + diff --git a/ext/fts5/test/fts5secure6.test b/ext/fts5/test/fts5secure6.test new file mode 100644 index 0000000000..e2f4ceabc8 --- /dev/null +++ b/ext/fts5/test/fts5secure6.test @@ -0,0 +1,141 @@ +# 2023 Feb 17 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# + +source [file join [file dirname [info script]] fts5_common.tcl] +ifcapable !fts5 { finish_test ; return } +set ::testprefix fts5secure6 + +db progress 1 progress_handler +set ::PHC 0 +proc progress_handler {args} { + incr ::PHC + # if {($::PHC % 100000)==0} breakpoint + return 0 +} + +proc setup {} { + db eval { + DROP TABLE IF EXISTS t1; + CREATE VIRTUAL TABLE t1 USING fts5(x); + WITH s(i) AS ( + VALUES(1) UNION ALL SELECT i+1 FROM s WHERE i<1000 + ) + INSERT INTO t1 SELECT 'a b c d e f g h i j k' FROM s; + } +} + +foreach {tn sd} { + 1 0 + 2 1 +} { + setup + do_execsql_test 1.$tn.0 { + INSERT INTO t1(t1, rank) VALUES('secure-delete', $sd) + } + set PHC 0 + do_execsql_test 1.$tn.1 { DELETE FROM t1; } + set phc($tn) $PHC +} + +do_test 1.3 { + expr $phc(1)*5 < $phc(2) +} {1} + +#------------------------------------------------------------------------- +reset_db + +do_execsql_test 2.0 { + CREATE VIRTUAL TABLE t1 USING fts5(x); + INSERT INTO t1(t1, rank) VALUES('secure-delete', $sd) +} + +do_execsql_test 2.1 { + BEGIN; + INSERT INTO t1(rowid, x) VALUES(-100000, 'abc def ghi'); + INSERT INTO t1(rowid, x) VALUES(-99999, 'abc def ghi'); + INSERT INTO t1(rowid, x) VALUES(9223372036854775800, 'abc def ghi'); + COMMIT; +} + +do_execsql_test 2.2 { + SELECT rowid FROM t1('def') +} {-100000 -99999 9223372036854775800} + +#------------------------------------------------------------------------- +reset_db + +do_execsql_test 3.0 { + CREATE VIRTUAL TABLE t1 USING fts5(x); + INSERT INTO t1(t1, rank) VALUES('secure-delete', $sd) +} + +do_execsql_test 3.1 { + BEGIN; + INSERT INTO t1(rowid, x) + VALUES(51869, 'when whenever where weress what turn'), + (51871, 'to were'); + COMMIT; +} + +do_execsql_test 3.2 { + DELETE FROM t1 WHERE rowid=51871; + INSERT INTO t1(t1) VALUES('integrity-check'); +} + +#------------------------------------------------------------------------- +reset_db + +do_execsql_test 4.0 { + CREATE VIRTUAL TABLE t1 USING fts5(x); + INSERT INTO t1(rowid, x) VALUES(10, 'one two'); +} +do_execsql_test 4.1 { + UPDATE t1 SET x = 'one three' WHERE rowid=10; + INSERT INTO t1(t1, rank) VALUES('secure-delete', 1); +} +do_execsql_test 4.2 { + DELETE FROM t1 WHERE rowid=10; +} +do_execsql_test 4.3 { + INSERT INTO t1(t1) VALUES('integrity-check'); +} + +#------------------------------------------------------------------------- +reset_db + +do_execsql_test 5.0 { + CREATE VIRTUAL TABLE t1 USING fts5(content); + + INSERT INTO t1(t1,rank) VALUES('secure-delete',1); + INSERT INTO t1 VALUES('active'),('boomer'),('atom'),('atomic'), + ('alpha channel backup abandon test aback boomer atom alpha active'); + DELETE FROM t1 WHERE t1 MATCH 'abandon'; +} + +do_execsql_test 5.1 { + INSERT INTO t1(t1) VALUES('rebuild'); +} + +do_execsql_test 5.2 { + DELETE FROM t1 WHERE rowid NOTNULL<5; +} + +db close +sqlite3 db test.db + +do_execsql_test 5.3 { + PRAGMA integrity_check; +} {ok} + + +finish_test + diff --git a/ext/fts5/test/fts5secure7.test b/ext/fts5/test/fts5secure7.test new file mode 100644 index 0000000000..16a044f538 --- /dev/null +++ b/ext/fts5/test/fts5secure7.test @@ -0,0 +1,116 @@ +# 2023 Feb 17 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# +# TESTRUNNER: slow +# + +source [file join [file dirname [info script]] fts5_common.tcl] +ifcapable !fts5 { finish_test ; return } +set ::testprefix fts5secure7 + + +set NVOCAB 500 +set NDOC [expr 1000] + +set NREP 100 +set nDeletePerRep [expr 5] + +set VOCAB [list] + +proc select_one {list} { + set n [llength $list] + lindex $list [expr {abs(int(rand()*$n))}] +} + +proc init_vocab {} { + set L [split "abcdefghijklmnopqrstuvwxyz" {}] + set nL [llength $L] + for {set i 0} {$i < $::NVOCAB} {incr i} { + set n [expr {6 + int(rand()*8)}] + set word "" + for {set j 0} {$j < $n} {incr j} { + append word [select_one $L] + } + lappend ::VOCAB $word + } +} + +proc get_word {} { + select_one $::VOCAB +} + +proc get_document {nWord} { + set ret [list] + for {set i 0} {$i < $nWord} {incr i} { + lappend ret [get_word] + } + return $ret +} + +init_vocab + +db func document [list get_document 12] + +do_execsql_test 1.0 { + CREATE VIRTUAL TABLE t1 USING fts5(body); + INSERT INTO t1(t1, rank) VALUES('secure-delete', 1); +} +do_execsql_test 1.1 { + WITH s(i) AS ( + SELECT 1 UNION ALL SELECT i+1 FROM s WHERE i<$NDOC + ) + INSERT INTO t1 SELECT document() FROM s; +} + +for {set iRep 0} {$iRep < $NREP} {incr iRep} { + set lRowid [db eval {SELECT rowid FROM t1}] + for {set iDel 0} {$iDel < $nDeletePerRep} {incr iDel} { + set idx [select_one $lRowid] + db eval { + DELETE FROM t1 WHERE rowid=$idx + } + } + db eval { + WITH s(i) AS ( + SELECT 1 UNION ALL SELECT i+1 FROM s WHERE i<$nDeletePerRep + ) + INSERT INTO t1 SELECT document() FROM s; + } + do_execsql_test 1.2.$iRep { + INSERT INTO t1(t1) VALUES('integrity-check'); + } +} + +reset_db +db func document [list get_document 12] +do_execsql_test 2.0 { + CREATE VIRTUAL TABLE t1 USING fts5(body); + INSERT INTO t1(t1, rank) VALUES('secure-delete', 1); + INSERT INTO t1(t1, rank) VALUES('pgsz', 128); +} +do_execsql_test 2.1 { + WITH s(i) AS ( + SELECT 1 UNION ALL SELECT i+1 FROM s WHERE i<$NDOC + ) + INSERT INTO t1 SELECT document() FROM s; +} +for {set ii 0} {$ii < $NDOC} {incr ii} { + set lRowid [db eval {SELECT rowid FROM t1}] + set idx [select_one $lRowid] + db eval { DELETE FROM t1 WHERE rowid=$idx } + do_execsql_test 2.2.$ii { + INSERT INTO t1(t1) VALUES('integrity-check'); + } +} + +finish_test + + diff --git a/ext/fts5/test/fts5secure8.test b/ext/fts5/test/fts5secure8.test new file mode 100644 index 0000000000..8b65b7c59f --- /dev/null +++ b/ext/fts5/test/fts5secure8.test @@ -0,0 +1,71 @@ +# 2023 Nov 23 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# + +source [file join [file dirname [info script]] fts5_common.tcl] +ifcapable !fts5 { finish_test ; return } +set ::testprefix fts5secure8 + +proc sql_repeat {txt n} { + string repeat $txt $n +} +db func repeat sql_repeat + +do_execsql_test 1.0 { + CREATE VIRTUAL TABLE ft USING fts5(x); + + INSERT INTO ft(ft, rank) VALUES('pgsz', 64); + + INSERT INTO ft(rowid, x) VALUES(100, 'hello world'); + INSERT INTO ft(rowid, x) VALUES(200, 'one day'); + + BEGIN; + INSERT INTO ft(rowid, x) VALUES(45, 'one two three'); + UPDATE ft SET x = repeat('hello world ', 500) WHERE rowid=100; + COMMIT +} + +do_execsql_test 1.1 { + INSERT INTO ft(ft, rank) VALUES('secure-delete', 1); + DELETE FROM ft WHERE rowid=100; +} + +do_execsql_test 1.2 { + PRAGMA integrity_check; +} {ok} + +do_execsql_test 2.0 { + CREATE VIRTUAL TABLE xyz USING fts5 ( + name, + content='' + ); + + INSERT INTO xyz(xyz, rank) VALUES('secure-delete', 1); + INSERT INTO xyz (rowid, name) VALUES(1, 'A'); + INSERT INTO xyz (rowid, name) VALUES(2, 'A'); + INSERT INTO xyz(xyz, rowid, name) VALUES('delete', 2, 'A'); +} + +do_execsql_test 2.1 { + pragma quick_check; +} {ok} + +do_catchsql_test 2.2 { + INSERT INTO xyz(xyz, rank) VALUES('secure-delete', 'hello world'); +} {1 {SQL logic error}} + + + + + +finish_test + + diff --git a/ext/fts5/test/fts5securefault.test b/ext/fts5/test/fts5securefault.test new file mode 100644 index 0000000000..2959ab65ce --- /dev/null +++ b/ext/fts5/test/fts5securefault.test @@ -0,0 +1,225 @@ +# 2023 April 14 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The +# focus of this script is testing the FTS5 module. +# + +source [file join [file dirname [info script]] fts5_common.tcl] +source $testdir/malloc_common.tcl +set testprefix fts5securefault + +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. +return_if_no_fts5 + +do_execsql_test 1.0 { + CREATE VIRTUAL TABLE t1 USING fts5(ab); + INSERT INTO t1(rowid, ab) VALUES + (0, 'abc'), (1, 'abc'), (2, 'abc'), (3, 'abc'), (4, 'def'); +} +faultsim_save_and_close + +do_faultsim_test 1.1 -faults oom* -prep { + faultsim_restore_and_reopen + execsql { + INSERT INTO t1(t1, rank) VALUES('secure-delete', 1); + } +} -body { + execsql { DELETE FROM t1 WHERE rowid=2 } +} -test { + faultsim_test_result {0 {}} +} +do_faultsim_test 1.2 -faults oom* -prep { + faultsim_restore_and_reopen + execsql { + INSERT INTO t1(t1, rank) VALUES('secure-delete', 1); + } +} -body { + execsql { DELETE FROM t1 WHERE rowid IN(0, 1, 2, 3, 4) } +} -test { + faultsim_test_result {0 {}} +} + +#------------------------------------------------------------------------- +# +reset_db +set big [string repeat abcdefghij 5] +set big2 [string repeat klmnopqrst 5] +set doc "$big $big2" + +do_execsql_test 2.0 { + CREATE VIRTUAL TABLE t1 USING fts5(ab); + INSERT INTO t1(t1, rank) VALUES('pgsz', 64); + WITH s(i) AS ( + SELECT 1 UNION ALL SELECT i+1 FROM s WHERE i<4 + ) + INSERT INTO t1(rowid, ab) SELECT i, $doc FROM s; +} +faultsim_save_and_close + +do_faultsim_test 2.1 -faults oom* -prep { + faultsim_restore_and_reopen + execsql { + INSERT INTO t1(t1, rank) VALUES('secure-delete', 1); + } +} -body { + execsql { DELETE FROM t1 WHERE rowid = 3 } + execsql { DELETE FROM t1 WHERE rowid = 4 } +} -test { + faultsim_test_result {0 {}} +} + +#------------------------------------------------------------------------- +# +reset_db +set big [string repeat abcdefghij 5] +set big2 [string repeat klmnopqrst 5] +set doc "$big $big2" + +do_execsql_test 3.0 { + CREATE VIRTUAL TABLE t1 USING fts5(ab); + INSERT INTO t1(t1, rank) VALUES('pgsz', 64); + WITH s(i) AS ( + SELECT 1 UNION ALL SELECT i+1 FROM s WHERE i<25 + ) + INSERT INTO t1(rowid, ab) SELECT i, $doc FROM s; + + INSERT INTO t1(t1, rank) VALUES('secure-delete', 1); + DELETE FROM t1 WHERE rowid BETWEEN 3 AND 23; +} +faultsim_save_and_close + +do_faultsim_test 3.1 -faults oom* -prep { + faultsim_restore_and_reopen + execsql { + INSERT INTO t1(t1, rank) VALUES('secure-delete', 1); + } +} -body { + execsql { DELETE FROM t1 WHERE rowid = 24 } + execsql { DELETE FROM t1 WHERE rowid = 25 } +} -test { + faultsim_test_result {0 {}} +} + +#------------------------------------------------------------------------- +# +reset_db +set doc [string repeat "tok " 400] + +do_execsql_test 4.0 { + CREATE VIRTUAL TABLE t1 USING fts5(ab); + INSERT INTO t1(t1, rank) VALUES('pgsz', 64); + INSERT INTO t1(rowid, ab) VALUES(1, $doc), (2, $doc), (3, $doc); +} +faultsim_save_and_close + +do_faultsim_test 4.1 -faults oom* -prep { + faultsim_restore_and_reopen + execsql { + INSERT INTO t1(t1, rank) VALUES('secure-delete', 1); + } +} -body { + execsql { DELETE FROM t1 WHERE rowid = 2 } +} -test { + faultsim_test_result {0 {}} +} + +#------------------------------------------------------------------------- +# +reset_db + +set doc1 [string repeat "abc " 10] +set doc2 [string repeat "def " 10] + +do_test 5.0 { + execsql { + CREATE VIRTUAL TABLE t1 USING fts5(ab); + INSERT INTO t1(t1, rank) VALUES('pgsz', 64); + BEGIN; + } + for {set i 0} {$i < 50} {incr i} { + execsql { + INSERT INTO t1(rowid, ab) VALUES($i, 'abcdefg'); + } + } + execsql { + INSERT INTO t1(rowid, ab) VALUES(105, 'def'); + COMMIT; + } +} {} +faultsim_save_and_close + +do_faultsim_test 5.1 -faults oom* -prep { + faultsim_restore_and_reopen + execsql { + INSERT INTO t1(t1, rank) VALUES('secure-delete', 1); + } +} -body { + execsql { DELETE FROM t1 WHERE rowid = 105 } +} -test { + faultsim_test_result {0 {}} +} + +#------------------------------------------------------------------------- +# +reset_db +do_test 6.0 { + execsql { + CREATE VIRTUAL TABLE t1 USING fts5(ab); + INSERT INTO t1(t1, rank) VALUES('pgsz', 64); + BEGIN; + INSERT INTO t1(rowid, ab) VALUES(1, 'abcdefg'); + INSERT INTO t1(rowid, ab) VALUES(2, 'abcdefg'); + INSERT INTO t1(rowid, ab) VALUES(3, 'abcdefg'); + COMMIT; + } +} {} +faultsim_save_and_close + +do_faultsim_test 6.1 -faults oom* -prep { + faultsim_restore_and_reopen + execsql { + INSERT INTO t1(t1, rank) VALUES('secure-delete', 1); + } +} -body { + execsql { + UPDATE t1 SET ab='abcdefg' WHERE rowid=2; + } +} -test { + faultsim_test_result {0 {}} +} + +#------------------------------------------------------------------------- +# +reset_db +do_test 7.0 { + execsql { + CREATE VIRTUAL TABLE t1 USING fts5(ab); + INSERT INTO t1(t1, rank) VALUES('pgsz', 32); + INSERT INTO t1(t1, rank) VALUES('secure-delete', 1); + } +} {} +faultsim_save_and_close + +do_faultsim_test 7.1 -faults oom* -prep { + faultsim_restore_and_reopen + set big1 "[string repeat x 50] [string repeat y 50] [string repeat z 50]" + execsql { + BEGIN; + INSERT INTO t1 VALUES($big1); + } +} -body { + execsql { COMMIT } +} -test { + faultsim_test_result {0 {}} +} + + +finish_test diff --git a/ext/fts5/test/fts5simple.test b/ext/fts5/test/fts5simple.test index 936bbb2549..ad59bf0d9e 100644 --- a/ext/fts5/test/fts5simple.test +++ b/ext/fts5/test/fts5simple.test @@ -13,7 +13,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5simple -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return @@ -350,7 +350,7 @@ do_execsql_test 14.3 { do_execsql_test 14.4 { SELECT rowid, x, x1 FROM x1 WHERE x1 MATCH '*reads' -} {0 {} 3} +} {0 {} 2} #------------------------------------------------------------------------- reset_db @@ -480,4 +480,33 @@ do_execsql_test 22.0 { do_catchsql_test 22.1 {SELECT * FROM x1('')} {1 {fts5: syntax error near ""}} do_catchsql_test 22.2 {SELECT * FROM x1(NULL)} {1 {fts5: syntax error near ""}} +#------------------------------------------------------------------------- +reset_db +do_execsql_test 23.0 { + CREATE VIRTUAL TABLE x1 USING fts5(x); + SELECT count(*) FROM x1_data; +} {2} + +do_execsql_test 23.1 { + BEGIN; + INSERT INTO x1 VALUES('a b c d'); + INSERT INTO x1 VALUES('a b c d'); + INSERT INTO x1 VALUES('a b c d'); +} + +do_execsql_test 23.2 { + SELECT count(*) FROM x1_data; +} {2} + +do_execsql_test 23.3 { + INSERT INTO x1(x1) VALUES('flush'); + SELECT count(*) FROM x1_data; +} {3} + +do_execsql_test 23.4 { + ROLLBACK; + SELECT count(*) FROM x1_data; +} {2} + + finish_test diff --git a/ext/fts5/test/fts5simple2.test b/ext/fts5/test/fts5simple2.test index e57cea70fa..01c590c9f7 100644 --- a/ext/fts5/test/fts5simple2.test +++ b/ext/fts5/test/fts5simple2.test @@ -13,7 +13,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5simple2 -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return @@ -343,7 +343,9 @@ do_execsql_test 17.0 { INSERT INTO t2 VALUES('a aa aaa', 'b bb bbb'); COMMIT; } -do_execsql_test 17.1 { SELECT * FROM t2('y:a*') WHERE rowid BETWEEN 10 AND 20 } +do_execsql_test 17.1 { + SELECT * FROM t2('y:a*') WHERE rowid BETWEEN 10 AND 20 +} do_execsql_test 17.2 { BEGIN; INSERT INTO t2 VALUES('a aa aaa', 'b bb bbb'); diff --git a/ext/fts5/test/fts5simple3.test b/ext/fts5/test/fts5simple3.test index 0d4972b372..bc3ebfc7ca 100644 --- a/ext/fts5/test/fts5simple3.test +++ b/ext/fts5/test/fts5simple3.test @@ -13,7 +13,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5simple3 -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return @@ -81,7 +81,7 @@ do_execsql_test 3.0 { } #------------------------------------------------------------------------- -# Test that a crash occuring when the second or subsequent tokens in a +# Test that a crash occurring when the second or subsequent tokens in a # phrase matched zero rows has been fixed. # do_execsql_test 4.0 { diff --git a/ext/fts5/test/fts5synonym.test b/ext/fts5/test/fts5synonym.test index 86610ee9eb..55e2f186a9 100644 --- a/ext/fts5/test/fts5synonym.test +++ b/ext/fts5/test/fts5synonym.test @@ -15,7 +15,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5synonym -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return diff --git a/ext/fts5/test/fts5synonym2.test b/ext/fts5/test/fts5synonym2.test index 8bbfb07566..ec8b750c57 100644 --- a/ext/fts5/test/fts5synonym2.test +++ b/ext/fts5/test/fts5synonym2.test @@ -15,7 +15,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5synonym2 -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return @@ -42,7 +42,7 @@ proc fts5_test_bothlist {cmd} { } sqlite3_fts5_create_function db fts5_test_bothlist fts5_test_bothlist -proc fts5_rowid {cmd} { expr [$cmd xColumnText -1] } +proc fts5_rowid {cmd} { expr [$cmd xRowid] } sqlite3_fts5_create_function db fts5_rowid fts5_rowid do_execsql_test 1.$tok.0.1 " @@ -122,6 +122,9 @@ foreach {tn expr} { 4.1 "NEAR(one two, 2)" 4.2 "NEAR(one two three, 2)" 4.3 "NEAR(eight nine, 1) OR NEAR(six seven, 1)" + + 5.1 "one + two" + 5.2 "1 + two" } { if {[fts5_expr_ok $expr ss]==0} { do_test 1.$tok.$tn.OMITTED { list } [list] diff --git a/ext/fts5/test/fts5tokendata.test b/ext/fts5/test/fts5tokendata.test new file mode 100644 index 0000000000..7f75f4fa8e --- /dev/null +++ b/ext/fts5/test/fts5tokendata.test @@ -0,0 +1,105 @@ +# 2014 Jan 08 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# Tests focused on phrase queries. +# + +source [file join [file dirname [info script]] fts5_common.tcl] +set testprefix fts5tokendata + +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. +ifcapable !fts5 { + finish_test + return +} + +foreach_detail_mode $testprefix { + + sqlite3_fts5_register_origintext db + fts5_aux_test_functions db + proc b {x} { string map [list "\0" "."] $x } + db func b b + + do_execsql_test 1.0 { + CREATE VIRTUAL TABLE ft USING fts5(a, b, tokendata=1, + tokenize="origintext unicode61", detail=%DETAIL% + ); + CREATE VIRTUAL TABLE vocab USING fts5vocab(ft, instance); + } + + do_execsql_test 1.1 { + INSERT INTO ft(rowid, a, b) VALUES + (1, 'Pedagog Pedal Pedant', 'Peculier Day Today'), + (2, 'Pedant pedantic pecked', 'Peck Penalize Pen'); + + INSERT INTO ft(rowid, a, b) VALUES + (3, 'Penalty Pence Penciled', 'One Two Three'), + (4, 'Pedant Pedal Pedant', 'Peculier Day Today'); + } + + do_execsql_test 1.2 { + SELECT DISTINCT b(term) FROM vocab + } { + day.Day one.One peck.Peck pecked peculier.Peculier pedagog.Pedagog + pedal.Pedal pedant.Pedant pedantic pen.Pen penalize.Penalize + penalty.Penalty pence.Pence penciled.Penciled three.Three + today.Today two.Two + } + + do_execsql_test 1.3.1 { + SELECT rowid FROM ft('pe*') + } { + 1 2 3 4 + } + + do_execsql_test 1.3.2 { + SELECT rowid FROM ft('pe*') ORDER BY rowid DESC + } { + 4 3 2 1 + } + + if {"%DETAIL%"!="none"} { + do_execsql_test 1.3.3 { + SELECT rowid FROM ft WHERE a MATCH 'pe*' ORDER BY rowid DESC + } { + 4 3 2 1 + } + } + + do_execsql_test 1.4 { + SELECT rowid, b( fts5_test_insttoken(ft, 0, 0) ) FROM ft('pedant') + } { + 1 pedant.Pedant + 2 pedant.Pedant + 4 pedant.Pedant + } + + do_execsql_test 1.5 { + SELECT rowid, b( fts5_test_insttoken(ft, 0, 0) ) FROM ft('pe*') + } { + 1 pedagog.Pedagog + 2 pedant.Pedant + 3 penalty.Penalty + 4 pedant.Pedant + } + + do_execsql_test 1.6 { + SELECT rowid, fts5_test_poslist(ft) FROM ft('pe*') + } { + 1 {0.0.0 0.0.1 0.0.2 0.1.0} + 2 {0.0.0 0.0.1 0.0.2 0.1.0 0.1.1 0.1.2} + 3 {0.0.0 0.0.1 0.0.2} + 4 {0.0.0 0.0.1 0.0.2 0.1.0} + } +} + +finish_test + diff --git a/ext/fts5/test/fts5tokenizer.test b/ext/fts5/test/fts5tokenizer.test index 27370657ed..a828e3a22b 100644 --- a/ext/fts5/test/fts5tokenizer.test +++ b/ext/fts5/test/fts5tokenizer.test @@ -15,7 +15,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5tokenizer -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return @@ -300,5 +300,75 @@ set ::flags [list] do_execsql_test 9.5.1 { SELECT * FROM t1('"abc xyz*"'); } {} do_test 9.5.2 { set ::flags } {query} +#------------------------------------------------------------------------- +# +reset_db +do_execsql_test 10.1 { + CREATE VIRTUAL TABLE x1 USING fts5(x, tokenize=unicode61); + PRAGMA writable_schema = 1; + UPDATE sqlite_schema + SET sql = 'CREATE VIRTUAL TABLE x1 USING fts5(x, tokenize="unicode61 error");' + WHERE name = 'x1'; +} + +db close +sqlite3 db test.db + +do_catchsql_test 10.2 { + SELECT * FROM x1('abc'); +} {1 {error in tokenizer constructor}} + +do_catchsql_test 10.3 { + INSERT INTO x1 VALUES('abc'); +} {1 {error in tokenizer constructor}} + +do_execsql_test 10.4 { + PRAGMA writable_schema = 1; + UPDATE sqlite_schema + SET sql = 'CREATE VIRTUAL TABLE x1 USING fts5(x, tokenize="nosuch error");' + WHERE name = 'x1'; +} + +db close +sqlite3 db test.db + +do_catchsql_test 10.5 { + SELECT * FROM x1('abc'); +} {1 {no such tokenizer: nosuch}} +do_catchsql_test 10.6 { + INSERT INTO x1 VALUES('abc'); +} {1 {no such tokenizer: nosuch}} + +do_execsql_test 10.7 { + DROP TABLE x1; + SELECT * FROM sqlite_schema; +} + +reset_db +do_execsql_test 10.8 { + CREATE VIRTUAL TABLE x1 USING fts5(x, tokenize=unicode61); + INSERT INTO x1 VALUES('a b c'), ('d e f'), ('a b c'); + CREATE VIRTUAL TABLE x1v USING fts5vocab(x1, row); + + PRAGMA writable_schema = 1; + UPDATE sqlite_schema + SET sql = 'CREATE VIRTUAL TABLE x1 USING fts5(x, tokenize=simplify);' + WHERE name = 'x1'; +} + +do_execsql_test 10.9 { + SELECT * FROM x1v +} { + a 2 2 b 2 2 c 2 2 d 1 1 e 1 1 f 1 1 +} + +db close +sqlite3 db test.db + +do_execsql_test 10.10 { + SELECT * FROM x1v +} { + a 2 2 b 2 2 c 2 2 d 1 1 e 1 1 f 1 1 +} finish_test diff --git a/ext/fts5/test/fts5tokenizer2.test b/ext/fts5/test/fts5tokenizer2.test new file mode 100644 index 0000000000..4fe31d22c4 --- /dev/null +++ b/ext/fts5/test/fts5tokenizer2.test @@ -0,0 +1,109 @@ +# 2023 Nov 03 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# Tests focusing on the built-in fts5 tokenizers. +# + +source [file join [file dirname [info script]] fts5_common.tcl] +set testprefix fts5tokenizer2 + +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. +ifcapable !fts5 { + finish_test + return +} + +sqlite3_fts5_create_tokenizer db tst get_tst_tokenizer +proc get_tst_tokenizer {args} { + return "tst_tokenizer" +} +proc tst_tokenizer {flags txt} { + set token "" + set lTok [list] + + foreach c [split $txt {}] { + if {$token==""} { + append token $c + } else { + set t1 [string is upper $token] + set t2 [string is upper $c] + + if {$t1!=$t2} { + lappend lTok $token + set token "" + } + append token $c + } + } + if {$token!=""} { lappend lTok $token } + + set iOff 0 + foreach t $lTok { + set n [string length $t] + sqlite3_fts5_token $t $iOff [expr $iOff+$n] + incr iOff $n + } +} + +do_execsql_test 1.0 { + CREATE VIRTUAL TABLE t1 USING fts5(t, tokenize=tst); +} + +do_execsql_test 1.1 { + INSERT INTO t1 VALUES('AAdontBBmess'); +} + +do_execsql_test 1.2 { + SELECT snippet(t1, 0, '>', '<', '...', 4) FROM t1('BB'); +} {AAdont>BB', '<') FROM t1('BB'); +} {AAdont>BB', '<') FROM t1('AA'); +} {>AA', '<') FROM t1('dont'); +} {AA>dont', '<') FROM t1('mess'); +} {AAdontBB>mess<} + +do_execsql_test 1.7 { + SELECT highlight(t1, 0, '>', '<') FROM t1('BB mess'); +} {AAdont>BBmess<} + +# 2024-08-06 https://sqlite.org/forum/forumpost/171bcc2bcd +# Error handling of tokenize= arguments. +# +foreach {n tkz} { + 1 {ascii none} + 2 {unicode61 none} + 3 {porter none} + 4 {trigram none} + 5 {ascii none 0} + 6 {unicode61 none 0} + 7 {porter none 0} + 8 {trigram none 0} +} { + db eval {DROP TABLE IF EXISTS t2;} + do_catchsql_test 2.$n " + DROP TABLE IF EXISTS t2; + CREATE VIRTUAL TABLE t2 USING fts5(a,b,c,tokenize='$tkz'); + " {1 {error in tokenizer constructor}} +} + + +finish_test diff --git a/ext/fts5/test/fts5tokenizer3.test b/ext/fts5/test/fts5tokenizer3.test new file mode 100644 index 0000000000..5cdab743c2 --- /dev/null +++ b/ext/fts5/test/fts5tokenizer3.test @@ -0,0 +1,77 @@ +# 2024 Aug 10 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# Tests focusing on the built-in fts5 tokenizers. +# + +source [file join [file dirname [info script]] fts5_common.tcl] +set testprefix fts5tokenizer3 + +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. +ifcapable !fts5 { + finish_test + return +} + + +proc get_sod {args} { return "split_on_dot" } +proc get_lowercase {args} { return "lowercase" } + +proc lowercase {flags txt} { + set n [string length $txt] + sqlite3_fts5_token [string tolower $txt] 0 $n + return 0 +} + +proc split_on_dot {flags txt} { + set iOff 0 + foreach t [split $txt "."] { + set n [string length $txt] + sqlite3_fts5_token $t $iOff [expr $iOff+$n] + incr iOff [expr {$n+1}] + } + return "" +} + +foreach {tn script} { + 1 { + sqlite3_fts5_create_tokenizer db lowercase get_lowercase + sqlite3_fts5_create_tokenizer -parent lowercase db split_on_dot get_sod + } + 2 { + sqlite3_fts5_create_tokenizer -v2 db lowercase get_lowercase + sqlite3_fts5_create_tokenizer -parent lowercase db split_on_dot get_sod + } + 3 { + sqlite3_fts5_create_tokenizer db lowercase get_lowercase + sqlite3_fts5_create_tokenizer -v2 -parent lowercase db split_on_dot get_sod + } + 4 { + sqlite3_fts5_create_tokenizer -v2 db lowercase get_lowercase + sqlite3_fts5_create_tokenizer -v2 -parent lowercase db split_on_dot get_sod + } +} { + reset_db + eval $script + + do_execsql_test 1.$tn.0 { + CREATE VIRTUAL TABLE t1 USING fts5(x, tokenize=split_on_dot); + CREATE VIRTUAL TABLE t1vocab USING fts5vocab(t1, instance); + INSERT INTO t1 VALUES('ABC.Def.ghi'); + } + + do_execsql_test 1.$tn.1 { + SELECT term FROM t1vocab ORDER BY 1 + } {abc def ghi} +} + + +finish_test diff --git a/ext/fts5/test/fts5trigram.test b/ext/fts5/test/fts5trigram.test index fb66efed68..377e3f7813 100644 --- a/ext/fts5/test/fts5trigram.test +++ b/ext/fts5/test/fts5trigram.test @@ -55,6 +55,8 @@ foreach {tn like res} { 6 {abc%klm} 1 7 {ABCDEFG%} 1 8 {%รุงเ%} 2 + 9 {%งเ%} 2 + 10 {%"งเ"%} {} } { do_execsql_test 1.3.$tn { SELECT rowid FROM t1 WHERE y LIKE $like @@ -68,6 +70,9 @@ do_execsql_test 2.0 { INSERT INTO t1 VALUES('abcdefghijklm'); INSERT INTO t1 VALUES('กรุงเทพมหานคร'); } +do_catchsql_test 2.0.1 { + CREATE VIRTUAL TABLE t2 USING fts5(z, tokenize='trigram case_sensitive'); +} {1 {error in tokenizer constructor}} foreach {tn s res} { 1 abc "(abc)defghijklm" @@ -196,5 +201,166 @@ do_eqp_test 6.3 { do_eqp_test 6.4 { SELECT * FROM ci1 WHERE x GLOB ? } {VIRTUAL TABLE INDEX 0:G0} +do_eqp_test 6.5 { + SELECT * FROM ci1 WHERE x < ? +} {{SCAN ci1 VIRTUAL TABLE INDEX 0:}} +do_eqp_test 6.6 { + SELECT * FROM ci0 WHERE x < ? +} {{SCAN ci0 VIRTUAL TABLE INDEX 0:}} + +reset_db +do_execsql_test 7.0 { + CREATE VIRTUAL TABLE f USING FTS5(filename, tokenize="trigram"); + INSERT INTO f (rowid, filename) VALUES + (10, "giraffe.png"), + (20, "жираф.png"), + (30, "cat.png"), + (40, "кот.png"), + (50, "misic-🎵-.mp3"); +} +do_execsql_test 7.1 { + SELECT rowid FROM f WHERE +filename GLOB '*ир*'; +} {20} +do_execsql_test 7.2 { + SELECT rowid FROM f WHERE filename GLOB '*ир*'; +} {20} + + +#------------------------------------------------------------------------- +reset_db +do_execsql_test 8.0 { + CREATE VIRTUAL TABLE t1 USING fts5(y, tokenize=trigram); + INSERT INTO t1 VALUES('abcdefghijklm'); +} + +foreach {tn match res} { + 1 "abc ghi" "(abc)def(ghi)jklm" + 2 "def ghi" "abc(defghi)jklm" + 3 "efg ghi" "abcd(efghi)jklm" + 4 "efghi" "abcd(efghi)jklm" + 5 "abcd jklm" "(abcd)efghi(jklm)" + 6 "ijkl jklm" "abcdefgh(ijklm)" + 7 "ijk ijkl hijk" "abcdefg(hijkl)m" + +} { + do_execsql_test 8.1.$tn { + SELECT highlight(t1, 0, '(', ')') FROM t1($match) + } $res +} + +do_execsql_test 8.2 { + CREATE VIRTUAL TABLE ft2 USING fts5(a, tokenize="trigram"); + INSERT INTO ft2 VALUES('abc x cde'); + INSERT INTO ft2 VALUES('abc cde'); + INSERT INTO ft2 VALUES('abcde'); +} + +do_execsql_test 8.3 { + SELECT highlight(ft2, 0, '[', ']') FROM ft2 WHERE ft2 MATCH 'abc AND cde'; +} { + {[abc] x [cde]} + {[abc] [cde]} + {[abcde]} +} + +#------------------------------------------------------------------------- +reset_db +do_execsql_test 9.0 { + CREATE VIRTUAL TABLE t1 USING fts5( + a0, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, + tokenize=trigram + ); + + INSERT INTO t1(rowid, a12) VALUES(111, 'thats a tricky case though'); + INSERT INTO t1(rowid, a12) VALUES(222, 'the query planner cannot do'); +} + +do_execsql_test 9.1 { + SELECT rowid FROM t1 WHERE a12 LIKE '%tricky%' +} {111} + +do_execsql_test 9.2 { + SELECT rowid FROM t1 WHERE a12 LIKE '%tricky%' AND a12 LIKE '%case%' +} {111} + +do_execsql_test 9.3 { + SELECT rowid FROM t1 WHERE a12 LIKE NULL +} {} + +#------------------------------------------------------------------------- +reset_db +do_execsql_test 10.0 { + CREATE VIRTUAL TABLE t1 USING fts5(a, tokenize=trigram); +} + +do_test 10.1 { + foreach {val} { + "abc \UFFjkl\UFF" + "abc \UFFFjkl\UFFF" + "abc \UFFFFjkl\UFFFF" + "abc \UFFFFFjkl\UFFFFF" + "\UFFjkl\UFF abc" + "\UFFFjkl\UFFF abc" + "\UFFFFjkl\UFFFF abc" + "\UFFFFFjkl\UFFFFF abc" + "\U10001jkl\U10001 abc" + } { + execsql { INSERT INTO t1 VALUES( $val ) } + } +} {} + +do_test 10.2 { + foreach {val} { + X'E18000626320646566' + X'61EDA0806320646566' + X'61EDA0806320646566' + X'61EFBFBE6320646566' + X'76686920E18000626320646566' + X'7668692061EDA0806320646566' + X'7668692061EDA0806320646566' + X'7668692061EFBFBE6320646566' + } { + execsql " INSERT INTO t1 VALUES( $val ) " + } +} {} + +do_test 10.3 { + set a [binary format c* {0x61 0xF7 0xBF 0xBF 0xBF 0x62}] + set b [binary format c* {0x61 0xF7 0xBF 0xBF 0xBF 0xBF 0x62}] + set c [binary format c* {0x61 0xF7 0xBF 0xBF 0xBF 0xBF 0xBF 0x62}] + set d [binary format c* {0x61 0xF7 0xBF 0xBF 0xBF 0xBF 0xBF 0xBF 0x62}] + execsql { + INSERT INTO t1 VALUES($a); + INSERT INTO t1 VALUES($b); + INSERT INTO t1 VALUES($c); + INSERT INTO t1 VALUES($d); + + INSERT INTO t1 VALUES('abcd' || $a); + INSERT INTO t1 VALUES('abcd' || $b); + INSERT INTO t1 VALUES('abcd' || $c); + INSERT INTO t1 VALUES('abcd' || $d); + } +} {} + +do_execsql_test 11.0 { + CREATE VIRTUAL TABLE t4 USING fts5(y, tokenize=trigram); +} +sqlite3_fts5_register_str db +do_execsql_test 11.1 { + INSERT INTO t4 VALUES( str('') ); +} + +do_test 12.0 { + sqlite3_fts5_tokenize db trigram "abcd" +} {abc 0 3 bcd 1 4} + +do_test 12.1 { + sqlite3_fts5_tokenize db trigram "a" +} {} + +do_test 12.2 { + sqlite3_fts5_tokenize db trigram "" +} {} finish_test + diff --git a/ext/fts5/test/fts5trigram2.test b/ext/fts5/test/fts5trigram2.test new file mode 100644 index 0000000000..c81684a22b --- /dev/null +++ b/ext/fts5/test/fts5trigram2.test @@ -0,0 +1,137 @@ +# 2023 October 24 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# +# Tests for the fts5 "trigram" tokenizer. +# + +source [file join [file dirname [info script]] fts5_common.tcl] +ifcapable !fts5 { finish_test ; return } +set ::testprefix fts5trigram2 + +do_execsql_test 1.0 " + CREATE VIRTUAL TABLE t1 USING fts5(y, tokenize='trigram remove_diacritics 1'); + INSERT INTO t1 VALUES('abc\u0303defghijklm'); + INSERT INTO t1 VALUES('a\u0303b\u0303c\u0303defghijklm'); +" +do_catchsql_test 1.0.1 { + CREATE VIRTUAL TABLE t2 USING fts5(z, tokenize='trigram remove_diacritics'); +} {1 {error in tokenizer constructor}} + +do_execsql_test 1.1 { + SELECT highlight(t1, 0, '(', ')') FROM t1('abc'); +} [list \ + "(abc\u0303)defghijklm" \ + "(a\u0303b\u0303c\u0303)defghijklm" \ +] + +do_execsql_test 1.2 { + SELECT highlight(t1, 0, '(', ')') FROM t1('bcde'); +} [list \ + "a(bc\u0303de)fghijklm" \ + "a\u0303(b\u0303c\u0303de)fghijklm" \ +] + +do_execsql_test 1.3 { + SELECT highlight(t1, 0, '(', ')') FROM t1('cdef'); +} [list \ + "ab(c\u0303def)ghijklm" \ + "a\u0303b\u0303(c\u0303def)ghijklm" \ +] + +do_execsql_test 1.4 { + SELECT highlight(t1, 0, '(', ')') FROM t1('def'); +} [list \ + "abc\u0303(def)ghijklm" \ + "a\u0303b\u0303c\u0303(def)ghijklm" \ +] + + +#------------------------------------------------------------------------- +do_catchsql_test 2.0 { + CREATE VIRTUAL TABLE t2 USING fts5( + z, tokenize='trigram case_sensitive 1 remove_diacritics 1' + ); +} {1 {error in tokenizer constructor}} + +do_execsql_test 2.1 { + CREATE VIRTUAL TABLE t2 USING fts5( + z, tokenize='trigram case_sensitive 0 remove_diacritics 1' + ); +} +do_execsql_test 2.2 " + INSERT INTO t2 VALUES('\u00E3bcdef'); + INSERT INTO t2 VALUES('b\u00E3cdef'); + INSERT INTO t2 VALUES('bc\u00E3def'); + INSERT INTO t2 VALUES('bcd\u00E3ef'); +" + +do_execsql_test 2.3 { + SELECT highlight(t2, 0, '(', ')') FROM t2('abc'); +} "(\u00E3bc)def" +do_execsql_test 2.4 { + SELECT highlight(t2, 0, '(', ')') FROM t2('bac'); +} "(b\u00E3c)def" +do_execsql_test 2.5 { + SELECT highlight(t2, 0, '(', ')') FROM t2('bca'); +} "(bc\u00E3)def" +do_execsql_test 2.6 " + SELECT highlight(t2, 0, '(', ')') FROM t2('\u00E3bc'); +" "(\u00E3bc)def" + +#------------------------------------------------------------------------- +do_execsql_test 3.0 { + CREATE VIRTUAL TABLE t3 USING fts5( + z, tokenize='trigram remove_diacritics 1' + ); +} {} +do_execsql_test 3.1 " + INSERT INTO t3 VALUES ('\u0303abc\u0303'); +" +do_execsql_test 3.2 { + SELECT highlight(t3, 0, '(', ')') FROM t3('abc'); +} "\u0303(abc\u0303)" + +#------------------------------------------------------------------------- +do_execsql_test 4.0 { + CREATE VIRTUAL TABLE t4 USING fts5(z, tokenize=trigram); +} {} + +do_execsql_test 4.1 { + INSERT INTO t4 VALUES('ABCD'); + INSERT INTO t4 VALUES('DEFG'); +} {} + +db close +sqlite3 db test.db + +do_eqp_test 4.1 { + SELECT rowid FROM t4 WHERE z LIKE '%abc%' +} {VIRTUAL TABLE INDEX 0:L0} + +do_execsql_test 4.2 { + SELECT rowid FROM t4 WHERE z LIKE '%abc%' +} {1} + +#------------------------------------------------------------------------- +reset_db +do_execsql_test 5.0 { + CREATE VIRTUAL TABLE t5 USING fts5( + c1, tokenize='trigram', detail='none' + ); + INSERT INTO t5(rowid, c1) VALUES(1, 'abc_____xyx_yxz'); + INSERT INTO t5(rowid, c1) VALUES(2, 'abc_____xyxz'); + INSERT INTO t5(rowid, c1) VALUES(3, 'ac_____xyxz'); +} {} +do_execsql_test 5.1 { + SELECT rowid FROM t5 WHERE c1 LIKE 'abc%xyxz' +} {2} + +finish_test diff --git a/ext/fts5/test/fts5ubsan.test b/ext/fts5/test/fts5ubsan.test new file mode 100644 index 0000000000..76382a1e15 --- /dev/null +++ b/ext/fts5/test/fts5ubsan.test @@ -0,0 +1,60 @@ +# 2022 August 9 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# This test is focused on edge cases that cause ubsan errors. +# + +source [file join [file dirname [info script]] fts5_common.tcl] +set testprefix fts5ubsan + +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. +ifcapable !fts5 { + finish_test + return +} + +do_execsql_test 1.0 { + CREATE VIRTUAL TABLE x1 USING fts5(x); +} + +set BIG 9000000000000000000 +set SMALL -9000000000000000000 + +do_execsql_test 1.1 { + BEGIN; + INSERT INTO x1 (rowid, x) VALUES($BIG, 'aaa aba acc'); + INSERT INTO x1 (rowid, x) VALUES($SMALL, 'aaa abc acb'); + COMMIT; +} + +do_execsql_test 1.2 { + SELECT rowid, x FROM x1('ab*'); +} [list $SMALL {aaa abc acb} $BIG {aaa aba acc}] + +do_execsql_test 1.3 { + SELECT rowid, x FROM x1('ac*'); +} [list $SMALL {aaa abc acb} $BIG {aaa aba acc}] + +reset_db +do_execsql_test 2.0 { + CREATE VIRTUAL TABLE x1 USING fts5(x); +} + +do_execsql_test 2.1 { + INSERT INTO x1 (rowid, x) VALUES($BIG, 'aaa aba acc'); + INSERT INTO x1 (rowid, x) VALUES($SMALL, 'aaa abc acb'); +} + +do_execsql_test 2.2 { + INSERT INTO x1 (x1) VALUES('optimize'); +} + +finish_test diff --git a/ext/fts5/test/fts5unicode.test b/ext/fts5/test/fts5unicode.test index e2d0f60124..f10e0d02d8 100644 --- a/ext/fts5/test/fts5unicode.test +++ b/ext/fts5/test/fts5unicode.test @@ -15,7 +15,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5unicode -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return @@ -60,6 +60,7 @@ do_execsql_test 2.1 " # require 17 or more bits to store). # +unset -nocomplain A B C D set A [db one {SELECT char(0x1F75E)}] ;# Type So set B [db one {SELECT char(0x1F5FD)}] ;# Type So set C [db one {SELECT char(0x2F802)}] ;# Type Lo diff --git a/ext/fts5/test/fts5unicode2.test b/ext/fts5/test/fts5unicode2.test index 662b9dd87b..7a49a1d83f 100644 --- a/ext/fts5/test/fts5unicode2.test +++ b/ext/fts5/test/fts5unicode2.test @@ -17,7 +17,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5unicode2 -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return @@ -116,6 +116,7 @@ set docs [list { connected by OR. }] +unset -nocomplain map set map(a) [list "\u00C4" "\u00E4"] ; # LATIN LETTER A WITH DIAERESIS set map(e) [list "\u00CB" "\u00EB"] ; # LATIN LETTER E WITH DIAERESIS set map(i) [list "\u00CF" "\u00EF"] ; # LATIN LETTER I WITH DIAERESIS @@ -470,119 +471,23 @@ do_execsql_test 8.2.3 { } {2 4} #------------------------------------------------------------------------- -# -if 0 { -foreach {tn sql} { - 1 { - CREATE VIRTUAL TABLE t5 USING fts4(tokenize=unicode61 [tokenchars= .]); - CREATE VIRTUAL TABLE t6 USING fts4( - tokenize=unicode61 [tokenchars=="] "tokenchars=[]"); - CREATE VIRTUAL TABLE t7 USING fts4(tokenize=unicode61 [separators=x\xC4]); - } - 2 { - CREATE VIRTUAL TABLE t5 USING fts4(tokenize=unicode61 "tokenchars= ."); - CREATE VIRTUAL TABLE t6 USING fts4(tokenize=unicode61 "tokenchars=[=""]"); - CREATE VIRTUAL TABLE t7 USING fts4(tokenize=unicode61 "separators=x\xC4"); - } - 3 { - CREATE VIRTUAL TABLE t5 USING fts4(tokenize=unicode61 'tokenchars= .'); - CREATE VIRTUAL TABLE t6 USING fts4(tokenize=unicode61 'tokenchars=="[]'); - CREATE VIRTUAL TABLE t7 USING fts4(tokenize=unicode61 'separators=x\xC4'); - } - 4 { - CREATE VIRTUAL TABLE t5 USING fts4(tokenize=unicode61 `tokenchars= .`); - CREATE VIRTUAL TABLE t6 USING fts4(tokenize=unicode61 `tokenchars=[="]`); - CREATE VIRTUAL TABLE t7 USING fts4(tokenize=unicode61 `separators=x\xC4`); - } -} { - do_execsql_test 9.$tn.0 { - DROP TABLE IF EXISTS t5; - DROP TABLE IF EXISTS t5aux; - DROP TABLE IF EXISTS t6; - DROP TABLE IF EXISTS t6aux; - DROP TABLE IF EXISTS t7; - DROP TABLE IF EXISTS t7aux; - } - do_execsql_test 9.$tn.1 $sql - - do_execsql_test 9.$tn.2 { - CREATE VIRTUAL TABLE t5aux USING fts4aux(t5); - INSERT INTO t5 VALUES('one two three/four.five.six'); - SELECT * FROM t5aux; - } { - four.five.six * 1 1 four.five.six 0 1 1 - {one two three} * 1 1 {one two three} 0 1 1 - } - - do_execsql_test 9.$tn.3 { - CREATE VIRTUAL TABLE t6aux USING fts4aux(t6); - INSERT INTO t6 VALUES('alpha=beta"gamma/delta[epsilon]zeta'); - SELECT * FROM t6aux; - } { - {alpha=beta"gamma} * 1 1 {alpha=beta"gamma} 0 1 1 - {delta[epsilon]zeta} * 1 1 {delta[epsilon]zeta} 0 1 1 - } - - do_execsql_test 9.$tn.4 { - CREATE VIRTUAL TABLE t7aux USING fts4aux(t7); - INSERT INTO t7 VALUES('alephxbeth\xC4gimel'); - SELECT * FROM t7aux; - } { - aleph * 1 1 aleph 0 1 1 - beth * 1 1 beth 0 1 1 - gimel * 1 1 gimel 0 1 1 - } -} - -# Check that multiple options are handled correctly. -# -do_execsql_test 10.1 { - DROP TABLE IF EXISTS t1; - CREATE VIRTUAL TABLE t1 USING fts4(tokenize=unicode61 - "tokenchars=xyz" "tokenchars=.=" "separators=.=" "separators=xy" - "separators=a" "separators=a" "tokenchars=a" "tokenchars=a" - ); - - INSERT INTO t1 VALUES('oneatwoxthreeyfour'); - INSERT INTO t1 VALUES('a.single=word'); - CREATE VIRTUAL TABLE t1aux USING fts4aux(t1); - SELECT * FROM t1aux; -} { - .single=word * 1 1 .single=word 0 1 1 - four * 1 1 four 0 1 1 - one * 1 1 one 0 1 1 - three * 1 1 three 0 1 1 - two * 1 1 two 0 1 1 -} - -# Test that case folding happens after tokenization, not before. -# -do_execsql_test 10.2 { - DROP TABLE IF EXISTS t2; - CREATE VIRTUAL TABLE t2 USING fts4(tokenize=unicode61 "separators=aB"); - INSERT INTO t2 VALUES('oneatwoBthree'); - INSERT INTO t2 VALUES('onebtwoAthree'); - CREATE VIRTUAL TABLE t2aux USING fts4aux(t2); - SELECT * FROM t2aux; -} { - one * 1 1 one 0 1 1 - onebtwoathree * 1 1 onebtwoathree 0 1 1 - three * 1 1 three 0 1 1 - two * 1 1 two 0 1 1 -} -# Test that the tokenchars and separators options work with the -# fts3tokenize table. -# -do_execsql_test 11.1 { - CREATE VIRTUAL TABLE ft1 USING fts3tokenize( - "unicode61", "tokenchars=@.", "separators=1234567890" - ); - SELECT token FROM ft1 WHERE input = 'berlin@street123sydney.road'; +foreach {tn val bErr} { + 1 0 0 + 2 1 0 + 3 2 0 + 4 3 1 + 5 11 1 } { - berlin@street sydney.road -} - + reset_db + set aRes(0) {0 {}} + set aRes(1) {1 {error in tokenizer constructor}} + set res $aRes($bErr) + do_catchsql_test 9.1.$tn " + CREATE VIRTUAL TABLE bl USING fts5( + s, tokenize='trigram remove_diacritics $val' + ); + " $res } finish_test diff --git a/ext/fts5/test/fts5unicode3.test b/ext/fts5/test/fts5unicode3.test index 30eb3c4166..ddb61a9997 100644 --- a/ext/fts5/test/fts5unicode3.test +++ b/ext/fts5/test/fts5unicode3.test @@ -14,7 +14,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return diff --git a/ext/fts5/test/fts5unicode4.test b/ext/fts5/test/fts5unicode4.test index dfd7f5a254..f006d6c0a6 100644 --- a/ext/fts5/test/fts5unicode4.test +++ b/ext/fts5/test/fts5unicode4.test @@ -14,7 +14,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5unicode4 -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return @@ -28,4 +28,34 @@ do_execsql_test 1.1 { INSERT INTO sss VALUES('まりや'); } +foreach {tn enc tok} { + 1 utf-8 ascii + 2 utf-16 ascii + 3 utf-8 unicode61 + 4 utf-16 unicode61 +} { + reset_db + + do_execsql_test 1.$tn.0 " + PRAGMA encoding = '$enc'; + CREATE VIRTUAL TABLE vt2 USING fts5(c0, c1, tokenize=$tok); + " + + do_execsql_test 1.$tn.1 { + INSERT INTO vt2(c0, c1) VALUES ('bhal', x'17db'); + } + + do_execsql_test 1.$tn.2 { + UPDATE vt2 SET c0='bhal'; + } + + do_execsql_test 1.$tn.3 { + INSERT INTO vt2(vt2) VALUES('integrity-check') + } + + do_execsql_test 1.$tn.4 { + SELECT quote(c1) FROM vt2 + } {X'17DB'} +} + finish_test diff --git a/ext/fts5/test/fts5unindexed.test b/ext/fts5/test/fts5unindexed.test index 8b72c4c776..5099a89693 100644 --- a/ext/fts5/test/fts5unindexed.test +++ b/ext/fts5/test/fts5unindexed.test @@ -15,7 +15,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5unindexed -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return diff --git a/ext/fts5/test/fts5unindexed2.test b/ext/fts5/test/fts5unindexed2.test new file mode 100644 index 0000000000..c0abfc3980 --- /dev/null +++ b/ext/fts5/test/fts5unindexed2.test @@ -0,0 +1,297 @@ +# 2024 Sep 13 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#*********************************************************************** +# +# The tests in this file focus on "unindexed" columns in contentless +# tables. +# + +source [file join [file dirname [info script]] fts5_common.tcl] +set testprefix fts5unindexed2 + +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. +ifcapable !fts5 { + finish_test + return +} + + +do_execsql_test 1.1 { + CREATE VIRTUAL TABLE t1 USING fts5( + a, b UNINDEXED, content=, contentless_unindexed=1 + ); +} {} + +do_execsql_test 1.2 { + INSERT INTO t1 VALUES('abc def', 'ghi jkl'); +} + +do_execsql_test 1.3 { + SELECT rowid, a, b FROM t1 +} {1 {} {ghi jkl}} + +do_execsql_test 1.4 { + INSERT INTO t1(rowid, a, b) VALUES(11, 'hello world', 'one two three'); +} + +do_execsql_test 1.5 { + INSERT INTO t1(t1, rowid, a, b) VALUES('delete', 1, 'abc def', 'ghi jkl'); +} + +do_execsql_test 1.6 { + SELECT rowid, a, b FROM t1 +} { + 11 {} {one two three} +} + +do_execsql_test 1.7 { + PRAGMA integrity_check +} {ok} + +do_execsql_test 1.8 { + INSERT INTO t1(rowid, a, b) VALUES(12, 'abc def', 'ghi jkl'); +} + +do_execsql_test 1.9 { + SELECT rowid, a, b FROM t1('def') +} {12 {} {ghi jkl}} + +do_execsql_test 1.10 { + SELECT rowid, a, b FROM t1('def OR hello') ORDER BY rank +} {11 {} {one two three} 12 {} {ghi jkl}} + +do_execsql_test 1.11 { + SELECT rowid, a, b FROM t1 WHERE rowid=11 +} {11 {} {one two three}} + +do_execsql_test 1.12 { + SELECT rowid, a, b FROM t1 +} {11 {} {one two three} 12 {} {ghi jkl}} + + +fts5_aux_test_functions db +do_execsql_test 1.12.2 { + SELECT rowid, fts5_test_columntext(t1) FROM t1('def OR hello') +} {11 {{} {one two three}} 12 {{} {ghi jkl}}} + +do_execsql_test 1.13 { + INSERT INTO t1(t1) VALUES('delete-all'); +} + +do_execsql_test 1.14 { + SELECT rowid, a, b FROM t1 +} + +do_execsql_test 1.15 { + PRAGMA integrity_check +} {ok} + +do_execsql_test 2.0 { + CREATE VIRTUAL TABLE t4 USING fts5( + x, y UNINDEXED, z, columnsize=0, content='', contentless_unindexed=1 + ); +} + +do_execsql_test 2.1 { + INSERT INTO t4(rowid, x, y, z) VALUES(1, 'a a', 'b b b', 'c'); +} + +#------------------------------------------------------------------------- +reset_db + +do_execsql_test 3.0 { + CREATE VIRTUAL TABLE x1 USING fts5( + a UNINDEXED, b, c UNINDEXED, d, content=, contentless_delete=1, + contentless_unindexed=1 + ); +} + +do_execsql_test 3.1 { + INSERT INTO x1(rowid, a, b, c, d) VALUES(131, 'aaa', 'bbb', 'ccc', 'ddd'); +} + +do_execsql_test 3.2 { + SELECT * FROM x1 +} {aaa {} ccc {}} + +do_execsql_test 3.3 { + INSERT INTO x1(rowid, a, b, c, d) VALUES(1000, 'AAA', 'BBB', 'CCC', 'DDD'); +} + +do_execsql_test 3.4 { + SELECT rowid, * FROM x1 +} { + 131 aaa {} ccc {} + 1000 AAA {} CCC {} +} + +do_execsql_test 3.5 { + DELETE FROM x1 WHERE rowid=131; + SELECT rowid, * FROM x1 +} { + 1000 AAA {} CCC {} +} + +do_execsql_test 3.6 { + INSERT INTO x1(rowid, a, b, c, d) VALUES(112, 'aaa', 'bbb', 'ccc', 'ddd'); + SELECT rowid, * FROM x1 +} { + 112 aaa {} ccc {} + 1000 AAA {} CCC {} +} + +do_execsql_test 3.7 { + UPDATE x1 SET b='hello', d='world', rowid=1120 WHERE rowid=112 +} + +do_execsql_test 3.8 { + SELECT rowid, * FROM x1 +} { + 1000 AAA {} CCC {} + 1120 aaa {} ccc {} +} + +do_execsql_test 3.9 { + SELECT rowid, * FROM x1('hello'); +} { + 1120 aaa {} ccc {} +} + +do_execsql_test 3.9 { + SELECT rowid, * FROM x1('bbb'); +} { + 1000 AAA {} CCC {} +} + +fts5_aux_test_functions db +do_execsql_test 3.10 { + SELECT rowid, fts5_test_columntext(x1) FROM x1('b*') +} {1000 {AAA {} CCC {}}} + +#------------------------------------------------------------------------- +# Check that if contentless_unindexed=1 is not specified, the values +# of UNINDEXED columns are not stored in the database. +# +# Also check that contentless_unindexed=1 is not allowed unless the table +# is actually contentless. +# +reset_db +do_execsql_test 4.0 { + CREATE VIRTUAL TABLE ft USING fts5(a, b, c UNINDEXED, content=''); + INSERT INTO ft VALUES('one', 'two', 'three'); + SELECT rowid, * FROM ft; +} {1 {} {} {}} + +do_execsql_test 4.1 { + SELECT name FROM sqlite_schema ORDER BY 1 +} { + ft ft_config ft_data ft_docsize ft_idx +} + +do_catchsql_test 4.2 { + CREATE VIRTUAL TABLE ft2 USING fts5( + a, b, c UNINDEXED, contentless_unindexed=1 + ); +} {1 {contentless_unindexed=1 requires a contentless table}} + +do_catchsql_test 4.3 { + DELETE FROM ft WHERE rowid=1 +} {1 {cannot DELETE from contentless fts5 table: ft}} + +#------------------------------------------------------------------------- +# Check that the usual restrictions on contentless tables apply to +# contentless_unindexed=1 tables. +# +reset_db +do_execsql_test 5.0 { + CREATE VIRTUAL TABLE ft USING fts5( + a, b UNINDEXED, c, content='', contentless_unindexed=1 + ); + INSERT INTO ft VALUES('one', 'two', 'three'); + INSERT INTO ft VALUES('four', 'five', 'six'); + INSERT INTO ft VALUES('seven', 'eight', 'nine'); + SELECT rowid, * FROM ft; +} { + 1 {} two {} + 2 {} five {} + 3 {} eight {} +} + +do_execsql_test 5.1 { + PRAGMA integrity_check +} {ok} + +do_catchsql_test 5.2 { + DELETE FROM ft WHERE rowid=2 +} {1 {cannot DELETE from contentless fts5 table: ft}} + +do_execsql_test 5.3 { + SELECT rowid, * FROM ft('six') +} { + 2 {} five {} +} + +do_catchsql_test 5.4 { + UPDATE ft SET a='x', b='y', c='z' WHERE rowid=3 +} {1 {cannot UPDATE contentless fts5 table: ft}} + +fts5_aux_test_functions db + +do_execsql_test 5.5 { + SELECT fts5_test_columntext(ft) FROM ft WHERE rowid=3 +} { + {{} eight {}} +} +do_execsql_test 5.6 { + SELECT fts5_test_columntext(ft) FROM ft('three'); +} { + {{} two {}} +} + +#------------------------------------------------------------------------- +# Check that it is possible to UPDATE a contentless_unindexed=1 table +# if the only columns being modified are UNINDEXED. +# +# If the contentless_unindexed=1 table is also contentless_delete=1, then +# it is also possible to update indexed columns - but only if *all* indexed +# columns are updated. +# +reset_db +do_execsql_test 6.0 { + CREATE VIRTUAL TABLE ft1 USING fts5(a, b UNINDEXED, c UNINDEXED, d, + contentless_unindexed=1, content='' + ); + + INSERT INTO ft1(rowid, a, b, c, d) VALUES + (100, 'x y', 'b1', 'c1', 'a b'), + (200, 'c d', 'b2', 'c2', 'a b'), + (300, 'e f', 'b3', 'c3', 'a b'); +} + +do_execsql_test 6.1 { + UPDATE ft1 SET b='b1.1', c='c1.1' WHERE rowid=100; +} +do_execsql_test 6.2 { + UPDATE ft1 SET b='b2.1' WHERE rowid=200; +} +do_execsql_test 6.3 { + UPDATE ft1 SET c='c3.1' WHERE rowid=300; +} + +do_execsql_test 6.4 { + SELECT rowid, a, b, c, d FROM ft1 +} { + 100 {} b1.1 c1.1 {} + 200 {} b2.1 c2 {} + 300 {} b3 c3.1 {} +} + +finish_test + diff --git a/ext/fts5/test/fts5update2.test b/ext/fts5/test/fts5update2.test new file mode 100644 index 0000000000..d04af4800d --- /dev/null +++ b/ext/fts5/test/fts5update2.test @@ -0,0 +1,177 @@ +# 2024 Sep 27 +# +# The author disclaims copyright to this source code. In place of +# a legal notice, here is a blessing: +# +# May you do good and not evil. +# May you find forgiveness for yourself and forgive others. +# May you share freely, never taking more than you give. +# +#************************************************************************* +# This file implements regression tests for SQLite library. The +# focus of this script is testing the FTS5 module. +# + +source [file join [file dirname [info script]] fts5_common.tcl] +set testprefix fts5update2 + +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. +ifcapable !fts5 { + finish_test + return +} + + +#------------------------------------------------------------------------- +# Test that the various types of UPDATE statement are handled correctly +# by different table types. +# +foreach_detail_mode $testprefix { +foreach {tn cu} { + 1 0 + 2 1 +} { + reset_db + do_execsql_test 1.$tn.1 " + CREATE VIRTUAL TABLE ft1 USING fts5(a, b UNINDEXED, c UNINDEXED, d, + content='', + contentless_unindexed=$cu, + detail=%DETAIL% + ); + CREATE VIRTUAL TABLE ft2 USING fts5(a, b UNINDEXED, c UNINDEXED, d, + content='', + contentless_unindexed=$cu, contentless_delete=1, + detail=%DETAIL% + ); + " + + do_execsql_test 1.$tn.2 { + INSERT INTO ft1(rowid, a, b, c, d) VALUES(1, 'a1', 'b1', 'c1', 'd1'); + INSERT INTO ft1(rowid, a, b, c, d) VALUES(2, 'a2', 'b2', 'c2', 'd2'); + INSERT INTO ft1(rowid, a, b, c, d) VALUES(3, 'a3', 'b3', 'c3', 'd3'); + + INSERT INTO ft2(rowid, a, b, c, d) VALUES(1, 'a1', 'b1', 'c1', 'd1'); + INSERT INTO ft2(rowid, a, b, c, d) VALUES(2, 'a2', 'b2', 'c2', 'd2'); + INSERT INTO ft2(rowid, a, b, c, d) VALUES(3, 'a3', 'b3', 'c3', 'd3'); + } + + # It should be possible to update a subset of the UNINDEXED columns of + # a contentless table. Regardless of whether or not contentless_unindexed=1 + # or contentless_delete=1 is set. + do_execsql_test 1.$tn.3 { + UPDATE ft1 SET b=b||'.1'; + UPDATE ft2 SET b=b||'.1'; + } + do_execsql_test 1.$tn.4 { + UPDATE ft1 SET b=b||'.2', c=c||'.2'; + UPDATE ft2 SET b=b||'.2', c=c||'.2'; + } + + set res(0) { + 1 {} {} {} {} + 2 {} {} {} {} + 3 {} {} {} {} + } + set res(1) { + 1 {} b1.1.2 c1.2 {} + 2 {} b2.1.2 c2.2 {} + 3 {} b3.1.2 c3.2 {} + } + + do_execsql_test 1.$tn.5 { + SELECT rowid, * FROM ft2 + } $res($cu) + + do_execsql_test 1.6.1 { SELECT rowid FROM ft1('a2') } {2} + do_execsql_test 1.6.2 { SELECT rowid FROM ft2('a2') } {2} + + # It should be possible to update all indexed columns (but no other subset) + # if the contentless_delete=1 option is set, as it is for "ft2". + do_execsql_test 1.$tn.7 { + UPDATE ft2 SET a='a22', d='d22' WHERE rowid=2; + } + do_execsql_test 1.$tn.8 { SELECT rowid FROM ft2('a22 AND d22') } {2} + + do_execsql_test 1.$tn.9 { + UPDATE ft2 SET a='a33', d='d33', b='b3' WHERE rowid=3; + } + + set res(1) { + 1 {} b1.1.2 c1.2 {} + 2 {} b2.1.2 c2.2 {} + 3 {} b3 c3.2 {} + } + do_execsql_test 1.$tn.10 { + SELECT rowid, * FROM ft2 + } $res($cu) + + do_catchsql_test 1.$tn.11 { + UPDATE ft2 SET a='a11' WHERE rowid=1 + } {1 {cannot UPDATE a subset of columns on fts5 contentless-delete table: ft2}} + do_catchsql_test 1.$tn.12 { + UPDATE ft2 SET d='d11' WHERE rowid=1 + } {1 {cannot UPDATE a subset of columns on fts5 contentless-delete table: ft2}} + + # It is not possible to update the values of indexed columns if + # contentless_delete=1 is not set. + do_catchsql_test 1.$tn.13 { + UPDATE ft1 SET a='a11' WHERE rowid=1 + } {1 {cannot UPDATE contentless fts5 table: ft1}} + do_catchsql_test 1.$tn.14 { + UPDATE ft1 SET d='d11' WHERE rowid=1 + } {1 {cannot UPDATE contentless fts5 table: ft1}} + + # It should be possible to update the rowid if contentless_delete=1 is + # set and all indexed columns are updated. + do_execsql_test 1.$tn.15 { + UPDATE ft2 SET a='aXone', d='dXone', rowid=11 WHERE rowid=1 + } + + set res(0) { + 2 {} {} {} {} + 3 {} {} {} {} + 11 {} {} {} {} + } + set res(1) { + 2 {} b2.1.2 c2.2 {} + 3 {} b3 c3.2 {} + 11 {} b1.1.2 c1.2 {} + } + do_execsql_test 1.$tn.16 { + SELECT rowid, * FROM ft2 + } $res($cu) + + # Should not be possible to update the rowid of a contentless_delete=1 + # table if no indexed columns are updated. + do_catchsql_test 1.$tn.17 { + UPDATE ft2 SET rowid=12 WHERE rowid=11 + } {1 {cannot UPDATE a subset of columns on fts5 contentless-delete table: ft2}} + do_catchsql_test 1.$tn.18 { + UPDATE ft1 SET rowid=12 WHERE rowid=1 + } {1 {cannot UPDATE contentless fts5 table: ft1}} + + do_execsql_test 1.$tn.19 { + UPDATE ft2 SET a='aXtwo', d='dXtwo', c='newval', rowid=12 WHERE rowid=2 + } {} + + set res(0) { + 3 {} {} {} {} + 11 {} {} {} {} + 12 {} {} {} {} + } + set res(1) { + 3 {} b3 c3.2 {} + 11 {} b1.1.2 c1.2 {} + 12 {} b2.1.2 newval {} + } + do_execsql_test 1.$tn.20 { + SELECT rowid, * FROM ft2 + } $res($cu) + + do_execsql_test 1.$tn.21 { + SELECT rowid, * FROM ft2('aXtwo AND dXtwo') + } [lrange $res($cu) 10 end] + +}} ;# end of [foreach_detail_mode] loop + +finish_test diff --git a/ext/fts5/test/fts5version.test b/ext/fts5/test/fts5version.test index 60ec81c03d..58dd9fe14e 100644 --- a/ext/fts5/test/fts5version.test +++ b/ext/fts5/test/fts5version.test @@ -16,7 +16,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5version -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return @@ -38,20 +38,20 @@ do_execsql_test 1.3 { sqlite3_db_config db DEFENSIVE 0 do_execsql_test 1.4 { - UPDATE t1_config set v=5 WHERE k='version'; + UPDATE t1_config set v=6 WHERE k='version'; } do_test 1.5 { db close sqlite3 db test.db catchsql { SELECT * FROM t1 WHERE t1 MATCH 'a' } -} {1 {invalid fts5 file format (found 5, expected 4) - run 'rebuild'}} +} {1 {invalid fts5 file format (found 6, expected 4 or 5) - run 'rebuild'}} do_test 1.6 { db close sqlite3 db test.db catchsql { INSERT INTO t1 VALUES('x y z') } -} {1 {invalid fts5 file format (found 5, expected 4) - run 'rebuild'}} +} {1 {invalid fts5 file format (found 6, expected 4 or 5) - run 'rebuild'}} do_test 1.7 { sqlite3_db_config db DEFENSIVE 0 @@ -59,7 +59,75 @@ do_test 1.7 { db close sqlite3 db test.db catchsql { SELECT * FROM t1 WHERE t1 MATCH 'a' } -} {1 {invalid fts5 file format (found 0, expected 4) - run 'rebuild'}} +} {1 {invalid fts5 file format (found 0, expected 4 or 5) - run 'rebuild'}} + +do_test 1.8 { + sqlite3_db_config db DEFENSIVE 0 + execsql { INSERT INTO t1_config VALUES('version', 4) } + execsql { INSERT INTO t1(t1, rank) VALUES('secure-delete', 1) } +} {} + +do_execsql_test 1.10 { + SELECT * FROM t1_config +} {secure-delete 1 version 4} + +do_execsql_test 1.11 { + INSERT INTO t1(rowid, one) VALUES(123, 'one two three'); + DELETE FROM t1 WHERE rowid=123; + SELECT * FROM t1_config +} {secure-delete 1 version 5} + +do_execsql_test 1.11 { + INSERT INTO t1(t1) VALUES('rebuild'); + SELECT * FROM t1_config +} {secure-delete 1 version 4} + +do_execsql_test 1.12 { + SELECT * FROM t1_config +} {secure-delete 1 version 4} + +#------------------------------------------------------------------------- +reset_db + +do_execsql_test 2.0 { + CREATE VIRTUAL TABLE xyz USING fts5(x); + INSERT INTO xyz(rowid, x) VALUES + (1, 'one document'), + (2, 'two document'), + (3, 'three document'), + (4, 'four document'), + (5, 'five document'), + (6, 'six document'); + + INSERT INTO xyz(xyz, rank) VALUES('secure-delete', 1); + SELECT v FROM xyz_config WHERE k='version'; +} {4} + +do_execsql_test 2.1 { + BEGIN; + INSERT INTO xyz(rowid, x) VALUES(7, 'seven document'); + SAVEPOINT one; + DELETE FROM xyz WHERE rowid = 4; +} + +do_execsql_test 2.2 { + SELECT v FROM xyz_config WHERE k='version'; +} {4} + +do_execsql_test 2.3 { + ROLLBACK TO one; + SELECT v FROM xyz_config WHERE k='version'; +} {4} + + +do_execsql_test 2.4 { + DELETE FROM xyz WHERE rowid = 3; + COMMIT; + SELECT v FROM xyz_config WHERE k='version'; +} {5} + + finish_test + diff --git a/ext/fts5/test/fts5vocab.test b/ext/fts5/test/fts5vocab.test index c457c5c210..b1644527ea 100644 --- a/ext/fts5/test/fts5vocab.test +++ b/ext/fts5/test/fts5vocab.test @@ -15,7 +15,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5vocab -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return @@ -513,6 +513,7 @@ do_execsql_test 10.5 { INSERT INTO ft(a) VALUES('4 5 6'); } +unset -nocomplain x res do_test 10.6 { set res [list] db eval { SELECT rowid FROM ft('4') } x { diff --git a/ext/fts5/test/fts5vocab2.test b/ext/fts5/test/fts5vocab2.test index 6f7aad329c..58416a7e90 100644 --- a/ext/fts5/test/fts5vocab2.test +++ b/ext/fts5/test/fts5vocab2.test @@ -15,7 +15,7 @@ source [file join [file dirname [info script]] fts5_common.tcl] set testprefix fts5vocab2 -# If SQLITE_ENABLE_FTS5 is defined, omit this file. +# If SQLITE_ENABLE_FTS5 is not defined, omit this file. ifcapable !fts5 { finish_test return @@ -280,6 +280,52 @@ do_catchsql_test 5.2 { INSERT INTO t1 SELECT randomblob(3000) FROM v1 } {1 {query aborted}} +#------------------------------------------------------------------------- +reset_db +sqlite3_fts5_may_be_corrupt 1 + +do_execsql_test 6.0 { + BEGIN TRANSACTION; + CREATE VIRTUAL TABLE t1 USING fts5(a,b unindexed,c,tokenize="porter ascii",tokendata=1); + REPLACE INTO t1_data VALUES(1,X'03090009'); + REPLACE INTO t1_data VALUES(10,X'000000000103030003010101020101030101'); + REPLACE INTO t1_data VALUES(137438953473,X'0000002e023061010202010162010203010163010204010167010601020201016801060102030101690106010204040606060808'); + REPLACE INTO t1_data VALUES(274877906945,X'0000001f013067020802010202010168020803010203010169020804010204040909'); + REPLACE INTO t1_data VALUES(412316860417,X'0000002e023061030202010162030203010163030204010167030601020201016803060102030101690306010204040606060808'); + COMMIT; +} + +do_execsql_test 6.1 { + CREATE VIRTUAL TABLE t3 USING fts5vocab('t1', 'row'); +} + +do_catchsql_test 6.2 { + SELECT * FROM t3; +} {1 {database disk image is malformed}} + +sqlite3_fts5_may_be_corrupt 0 + +#------------------------------------------------------------------------- +reset_db +do_execsql_test 7.0 { + CREATE VIRTUAL TABLE t1 USING fts5(a, b); + CREATE VIRTUAL TABLE v1 USING fts5vocab(t1, col); + + INSERT INTO t1 VALUES('xx', 'xx'); + + CREATE TABLE x1(t); + INSERT INTO x1 VALUES('xx'); + INSERT INTO x1 VALUES('xx'); + + SELECT term, col FROM v1; +} { + xx a xx b +} + +do_execsql_test 7.1 { + SELECT * FROM x1 WHERE 'a'=(SELECT col FROM v1 WHERE term=t) +} {xx xx} + finish_test diff --git a/ext/fts5/tool/mkfts5c.tcl b/ext/fts5/tool/mkfts5c.tcl index b1a55fa4ae..6f20a0cd73 100644 --- a/ext/fts5/tool/mkfts5c.tcl +++ b/ext/fts5/tool/mkfts5c.tcl @@ -2,7 +2,7 @@ # restart with tclsh \ exec tclsh "$0" "$@" -set srcdir [file dirname [file dirname [info script]]] +set srcdir [file dirname [file dirname [file normalize [info script]]]] set G(src) [string map [list %dir% $srcdir] { %dir%/fts5.h %dir%/fts5Int.h @@ -23,7 +23,27 @@ set G(src) [string map [list %dir% $srcdir] { }] set G(hdr) { - +/* +** This, the "fts5.c" source file, is a composite file that is itself +** assembled from the following files: +** +** fts5.h +** fts5Int.h +** fts5parse.h <--- Generated from fts5parse.y by Lemon +** fts5parse.c <--- Generated from fts5parse.y by Lemon +** fts5_aux.c +** fts5_buffer.c +** fts5_config.c +** fts5_expr.c +** fts5_hash.c +** fts5_index.c +** fts5_main.c +** fts5_storage.c +** fts5_tokenize.c +** fts5_unicode2.c +** fts5_varint.c +** fts5_vocab.c +*/ #if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS5) #if !defined(NDEBUG) && !defined(SQLITE_DEBUG) @@ -33,10 +53,16 @@ set G(hdr) { # undef NDEBUG #endif +#ifdef HAVE_STDINT_H +#include +#endif +#ifdef HAVE_INTTYPES_H +#include +#endif } set G(footer) { - +/* Here ends the fts5.c composite file. */ #endif /* !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_FTS5) */ } diff --git a/ext/icu/README.txt b/ext/icu/README.txt index be443f54ef..40def24662 100644 --- a/ext/icu/README.txt +++ b/ext/icu/README.txt @@ -1,19 +1,18 @@ - This directory contains source code for the SQLite "ICU" extension, an integration of the "International Components for Unicode" library with SQLite. Documentation follows. 1. Features - + 1.1 SQL Scalars upper() and lower() 1.2 Unicode Aware LIKE Operator 1.3 ICU Collation Sequences 1.4 SQL REGEXP Operator - + 2. Compilation and Usage - + 3. Bugs, Problems and Security Issues - + 3.1 The "case_sensitive_like" Pragma 3.2 The SQLITE_MAX_LIKE_PATTERN_LENGTH Macro 3.3 Collation Sequence Security Issue @@ -23,10 +22,10 @@ SQLite. Documentation follows. 1.1 SQL Scalars upper() and lower() - SQLite's built-in implementations of these two functions only + SQLite's built-in implementations of these two functions only provide case mapping for the 26 letters used in the English language. The ICU based functions provided by this extension - provide case mapping, where defined, for the full range of + provide case mapping, where defined, for the full range of unicode characters. ICU provides two types of case mapping, "general" case mapping and @@ -36,7 +35,7 @@ SQLite. Documentation follows. http://www.icu-project.org/userguide/caseMappings.html http://www.icu-project.org/userguide/posix.html#case_mappings - To utilise "general" case mapping, the upper() or lower() scalar + To utilise "general" case mapping, the upper() or lower() scalar functions are invoked with one argument: upper('abc') -> 'ABC' @@ -57,7 +56,7 @@ SQLite. Documentation follows. operator understands case equivalence for the 26 letters of the English language alphabet. The implementation of LIKE included in this extension uses the ICU function u_foldCase() to provide case - independent comparisons for the full range of unicode characters. + independent comparisons for the full range of unicode characters. The U_FOLD_CASE_DEFAULT flag is passed to u_foldCase(), meaning the dotless 'I' character used in the Turkish language is considered @@ -66,9 +65,9 @@ SQLite. Documentation follows. 1.3 ICU Collation Sequences - A special SQL scalar function, icu_load_collation() is provided that + A special SQL scalar function, icu_load_collation() is provided that may be used to register ICU collation sequences with SQLite. It - is always called with exactly two arguments, the ICU locale + is always called with exactly two arguments, the ICU locale identifying the collation sequence to ICU, and the name of the SQLite collation sequence to create. For example, to create an SQLite collation sequence named "turkish" using Turkish language @@ -87,7 +86,7 @@ SQLite. Documentation follows. australian_penpal_name TEXT COLLATE australian, turkish_penpal_name TEXT COLLATE turkish ); - + 1.4 SQL REGEXP Operator This extension provides an implementation of the SQL binary @@ -116,7 +115,7 @@ SQLite. Documentation follows. and use it as a dynamically loadable SQLite extension. To do this using gcc on *nix: - gcc -fPIC -shared icu.c `pkg-config --libs --cflags icu-uc icu-io` \ + gcc -fPIC -shared icu.c `pkg-config --libs --cflags icu-io` \ -o libSqliteIcu.so You may need to add "-I" flags so that gcc can find sqlite3ext.h @@ -124,6 +123,11 @@ SQLite. Documentation follows. loaded into sqlite in the same way as any other dynamically loadable extension. + As of version 3.48, it can be enabled in the canonical build process + by passing one of --with-icu-config or --with-icu-ldflags to the + configure script, optionally together with --enable-icu-collations. + See the configure --help for more details. + 3 BUGS, PROBLEMS AND SECURITY ISSUES @@ -144,27 +148,21 @@ SQLite. Documentation follows. SQLITE_MAX_LIKE_PATTERN_LENGTH macro as the maximum length of a pattern in bytes (irrespective of encoding). The default value is defined in internal header file "limits.h". - - The ICU extension LIKE implementation suffers from the same + + The ICU extension LIKE implementation suffers from the same problem and uses the same solution. However, since the ICU extension code does not include the SQLite file "limits.h", modifying the default value therein does not affect the ICU extension. The default value of SQLITE_MAX_LIKE_PATTERN_LENGTH used by - the ICU extension LIKE operator is 50000, defined in source + the ICU extension LIKE operator is 50000, defined in source file "icu.c". - 3.3 Collation Sequence Security Issue + 3.3 Collation Sequence Security Internally, SQLite assumes that indices stored in database files are sorted according to the collation sequence indicated by the SQL schema. Changing the definition of a collation sequence after an index has been built is therefore equivalent to database - corruption. The SQLite library is not very well tested under - these conditions, and may contain potential buffer overruns - or other programming errors that could be exploited by a malicious - programmer. - - If the ICU extension is used in an environment where potentially - malicious users may execute arbitrary SQL (i.e. gears), they - should be prevented from invoking the icu_load_collation() function, - possibly using the authorisation callback. + corruption. The SQLite library is well tested for robustness in + the fact of database corruption. Database corruption may well + lead to incorrect answers, but should not cause memory errors. diff --git a/ext/icu/icu.c b/ext/icu/icu.c index 92d7c5438e..50110072b5 100644 --- a/ext/icu/icu.c +++ b/ext/icu/icu.c @@ -299,8 +299,9 @@ static void icuRegexpFunc(sqlite3_context *p, int nArg, sqlite3_value **apArg){ if( U_SUCCESS(status) ){ sqlite3_set_auxdata(p, 0, pExpr, icuRegexpDelete); - }else{ - assert(!pExpr); + pExpr = sqlite3_get_auxdata(p, 0); + } + if( !pExpr ){ icuFunctionError(p, "uregex_open", status); return; } @@ -470,7 +471,7 @@ static void icuLoadCollation( UCollator *pUCollator; /* ICU library collation object */ int rc; /* Return code from sqlite3_create_collation_x() */ - assert(nArg==2); + assert(nArg==2 || nArg==3); (void)nArg; /* Unused parameter */ zLocale = (const char *)sqlite3_value_text(apArg[0]); zName = (const char *)sqlite3_value_text(apArg[1]); @@ -485,7 +486,39 @@ static void icuLoadCollation( return; } assert(p); - + if(nArg==3){ + const char *zOption = (const char*)sqlite3_value_text(apArg[2]); + static const struct { + const char *zName; + UColAttributeValue val; + } aStrength[] = { + { "PRIMARY", UCOL_PRIMARY }, + { "SECONDARY", UCOL_SECONDARY }, + { "TERTIARY", UCOL_TERTIARY }, + { "DEFAULT", UCOL_DEFAULT_STRENGTH }, + { "QUARTERNARY", UCOL_QUATERNARY }, + { "IDENTICAL", UCOL_IDENTICAL }, + }; + unsigned int i; + for(i=0; i=sizeof(aStrength)/sizeof(aStrength[0]) ){ + sqlite3_str *pStr = sqlite3_str_new(sqlite3_context_db_handle(p)); + sqlite3_str_appendf(pStr, + "unknown collation strength \"%s\" - should be one of:", + zOption); + for(i=0; i +#include + +#include +#include + +/* +** nKeyVal: +** The number of values that make up the 'key' for the current pCheck +** statement. +** +** rc: +** Error code returned by most recent sqlite3_intck_step() or +** sqlite3_intck_unlock() call. This is set to SQLITE_DONE when +** the integrity-check operation is finished. +** +** zErr: +** If the object has entered the error state, this is the error message. +** Is freed using sqlite3_free() when the object is deleted. +** +** zTestSql: +** The value returned by the most recent call to sqlite3_intck_testsql(). +** Each call to testsql() frees the previous zTestSql value (using +** sqlite3_free()) and replaces it with the new value it will return. +*/ +struct sqlite3_intck { + sqlite3 *db; + const char *zDb; /* Copy of zDb parameter to _open() */ + char *zObj; /* Current object. Or NULL. */ + + sqlite3_stmt *pCheck; /* Current check statement */ + char *zKey; + int nKeyVal; + + char *zMessage; + int bCorruptSchema; + + int rc; /* Error code */ + char *zErr; /* Error message */ + char *zTestSql; /* Returned by sqlite3_intck_test_sql() */ +}; + + +/* +** Some error has occurred while using database p->db. Save the error message +** and error code currently held by the database handle in p->rc and p->zErr. +*/ +static void intckSaveErrmsg(sqlite3_intck *p){ + p->rc = sqlite3_errcode(p->db); + sqlite3_free(p->zErr); + p->zErr = sqlite3_mprintf("%s", sqlite3_errmsg(p->db)); +} + +/* +** If the handle passed as the first argument is already in the error state, +** then this function is a no-op (returns NULL immediately). Otherwise, if an +** error occurs within this function, it leaves an error in said handle. +** +** Otherwise, this function attempts to prepare SQL statement zSql and +** return the resulting statement handle to the user. +*/ +static sqlite3_stmt *intckPrepare(sqlite3_intck *p, const char *zSql){ + sqlite3_stmt *pRet = 0; + if( p->rc==SQLITE_OK ){ + p->rc = sqlite3_prepare_v2(p->db, zSql, -1, &pRet, 0); + if( p->rc!=SQLITE_OK ){ + intckSaveErrmsg(p); + assert( pRet==0 ); + } + } + return pRet; +} + +/* +** If the handle passed as the first argument is already in the error state, +** then this function is a no-op (returns NULL immediately). Otherwise, if an +** error occurs within this function, it leaves an error in said handle. +** +** Otherwise, this function treats argument zFmt as a printf() style format +** string. It formats it according to the trailing arguments and then +** attempts to prepare the results and return the resulting prepared +** statement. +*/ +static sqlite3_stmt *intckPrepareFmt(sqlite3_intck *p, const char *zFmt, ...){ + sqlite3_stmt *pRet = 0; + va_list ap; + char *zSql = 0; + va_start(ap, zFmt); + zSql = sqlite3_vmprintf(zFmt, ap); + if( p->rc==SQLITE_OK && zSql==0 ){ + p->rc = SQLITE_NOMEM; + } + pRet = intckPrepare(p, zSql); + sqlite3_free(zSql); + va_end(ap); + return pRet; +} + +/* +** Finalize SQL statement pStmt. If an error occurs and the handle passed +** as the first argument does not already contain an error, store the +** error in the handle. +*/ +static void intckFinalize(sqlite3_intck *p, sqlite3_stmt *pStmt){ + int rc = sqlite3_finalize(pStmt); + if( p->rc==SQLITE_OK && rc!=SQLITE_OK ){ + intckSaveErrmsg(p); + } +} + +/* +** If there is already an error in handle p, return it. Otherwise, call +** sqlite3_step() on the statement handle and return that value. +*/ +static int intckStep(sqlite3_intck *p, sqlite3_stmt *pStmt){ + if( p->rc ) return p->rc; + return sqlite3_step(pStmt); +} + +/* +** Execute SQL statement zSql. There is no way to obtain any results +** returned by the statement. This function uses the sqlite3_intck error +** code convention. +*/ +static void intckExec(sqlite3_intck *p, const char *zSql){ + sqlite3_stmt *pStmt = 0; + pStmt = intckPrepare(p, zSql); + intckStep(p, pStmt); + intckFinalize(p, pStmt); +} + +/* +** A wrapper around sqlite3_mprintf() that uses the sqlite3_intck error +** code convention. +*/ +static char *intckMprintf(sqlite3_intck *p, const char *zFmt, ...){ + va_list ap; + char *zRet = 0; + va_start(ap, zFmt); + zRet = sqlite3_vmprintf(zFmt, ap); + if( p->rc==SQLITE_OK ){ + if( zRet==0 ){ + p->rc = SQLITE_NOMEM; + } + }else{ + sqlite3_free(zRet); + zRet = 0; + } + va_end(ap); + return zRet; +} + +/* +** This is used by sqlite3_intck_unlock() to save the vector key value +** required to restart the current pCheck query as a nul-terminated string +** in p->zKey. +*/ +static void intckSaveKey(sqlite3_intck *p){ + int ii; + char *zSql = 0; + sqlite3_stmt *pStmt = 0; + sqlite3_stmt *pXinfo = 0; + const char *zDir = 0; + + assert( p->pCheck ); + assert( p->zKey==0 ); + + pXinfo = intckPrepareFmt(p, + "SELECT group_concat(desc, '') FROM %Q.sqlite_schema s, " + "pragma_index_xinfo(%Q, %Q) " + "WHERE s.type='index' AND s.name=%Q", + p->zDb, p->zObj, p->zDb, p->zObj + ); + if( p->rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pXinfo) ){ + zDir = (const char*)sqlite3_column_text(pXinfo, 0); + } + + if( zDir==0 ){ + /* Object is a table, not an index. This is the easy case,as there are + ** no DESC columns or NULL values in a primary key. */ + const char *zSep = "SELECT '(' || "; + for(ii=0; iinKeyVal; ii++){ + zSql = intckMprintf(p, "%z%squote(?)", zSql, zSep); + zSep = " || ', ' || "; + } + zSql = intckMprintf(p, "%z || ')'", zSql); + }else{ + + /* Object is an index. */ + assert( p->nKeyVal>1 ); + for(ii=p->nKeyVal; ii>0; ii--){ + int bLastIsDesc = zDir[ii-1]=='1'; + int bLastIsNull = sqlite3_column_type(p->pCheck, ii)==SQLITE_NULL; + const char *zLast = sqlite3_column_name(p->pCheck, ii); + char *zLhs = 0; + char *zRhs = 0; + char *zWhere = 0; + + if( bLastIsNull ){ + if( bLastIsDesc ) continue; + zWhere = intckMprintf(p, "'%s IS NOT NULL'", zLast); + }else{ + const char *zOp = bLastIsDesc ? "<" : ">"; + zWhere = intckMprintf(p, "'%s %s ' || quote(?%d)", zLast, zOp, ii); + } + + if( ii>1 ){ + const char *zLhsSep = ""; + const char *zRhsSep = ""; + int jj; + for(jj=0; jjpCheck,jj+1); + zLhs = intckMprintf(p, "%z%s%s", zLhs, zLhsSep, zAlias); + zRhs = intckMprintf(p, "%z%squote(?%d)", zRhs, zRhsSep, jj+1); + zLhsSep = ","; + zRhsSep = " || ',' || "; + } + + zWhere = intckMprintf(p, + "'(%z) IS (' || %z || ') AND ' || %z", + zLhs, zRhs, zWhere); + } + zWhere = intckMprintf(p, "'WHERE ' || %z", zWhere); + + zSql = intckMprintf(p, "%z%s(quote( %z ) )", + zSql, + (zSql==0 ? "VALUES" : ",\n "), + zWhere + ); + } + zSql = intckMprintf(p, + "WITH wc(q) AS (\n%z\n)" + "SELECT 'VALUES' || group_concat('(' || q || ')', ',\n ') FROM wc" + , zSql + ); + } + + pStmt = intckPrepare(p, zSql); + if( p->rc==SQLITE_OK ){ + for(ii=0; iinKeyVal; ii++){ + sqlite3_bind_value(pStmt, ii+1, sqlite3_column_value(p->pCheck, ii+1)); + } + if( SQLITE_ROW==sqlite3_step(pStmt) ){ + p->zKey = intckMprintf(p,"%s",(const char*)sqlite3_column_text(pStmt, 0)); + } + intckFinalize(p, pStmt); + } + + sqlite3_free(zSql); + intckFinalize(p, pXinfo); +} + +/* +** Find the next database object (table or index) to check. If successful, +** set sqlite3_intck.zObj to point to a nul-terminated buffer containing +** the object's name before returning. +*/ +static void intckFindObject(sqlite3_intck *p){ + sqlite3_stmt *pStmt = 0; + char *zPrev = p->zObj; + p->zObj = 0; + + assert( p->rc==SQLITE_OK ); + assert( p->pCheck==0 ); + + pStmt = intckPrepareFmt(p, + "WITH tables(table_name) AS (" + " SELECT name" + " FROM %Q.sqlite_schema WHERE (type='table' OR type='index') AND rootpage" + " UNION ALL " + " SELECT 'sqlite_schema'" + ")" + "SELECT table_name FROM tables " + "WHERE ?1 IS NULL OR table_name%s?1 " + "ORDER BY 1" + , p->zDb, (p->zKey ? ">=" : ">") + ); + + if( p->rc==SQLITE_OK ){ + sqlite3_bind_text(pStmt, 1, zPrev, -1, SQLITE_TRANSIENT); + if( sqlite3_step(pStmt)==SQLITE_ROW ){ + p->zObj = intckMprintf(p,"%s",(const char*)sqlite3_column_text(pStmt, 0)); + } + } + intckFinalize(p, pStmt); + + /* If this is a new object, ensure the previous key value is cleared. */ + if( sqlite3_stricmp(p->zObj, zPrev) ){ + sqlite3_free(p->zKey); + p->zKey = 0; + } + + sqlite3_free(zPrev); +} + +/* +** Return the size in bytes of the first token in nul-terminated buffer z. +** For the purposes of this call, a token is either: +** +** * a quoted SQL string, +* * a contiguous series of ascii alphabet characters, or +* * any other single byte. +*/ +static int intckGetToken(const char *z){ + char c = z[0]; + int iRet = 1; + if( c=='\'' || c=='"' || c=='`' ){ + while( 1 ){ + if( z[iRet]==c ){ + iRet++; + if( z[iRet]!=c ) break; + } + iRet++; + } + } + else if( c=='[' ){ + while( z[iRet++]!=']' && z[iRet] ); + } + else if( (c>='A' && c<='Z') || (c>='a' && c<='z') ){ + while( (z[iRet]>='A' && z[iRet]<='Z') || (z[iRet]>='a' && z[iRet]<='z') ){ + iRet++; + } + } + + return iRet; +} + +/* +** Return true if argument c is an ascii whitespace character. +*/ +static int intckIsSpace(char c){ + return (c==' ' || c=='\t' || c=='\n' || c=='\r'); +} + +/* +** Argument z points to the text of a CREATE INDEX statement. This function +** identifies the part of the text that contains either the index WHERE +** clause (if iCol<0) or the iCol'th column of the index. +** +** If (iCol<0), the identified fragment does not include the "WHERE" keyword, +** only the expression that follows it. If (iCol>=0) then the identified +** fragment does not include any trailing sort-order keywords - "ASC" or +** "DESC". +** +** If the CREATE INDEX statement does not contain the requested field or +** clause, NULL is returned and (*pnByte) is set to 0. Otherwise, a pointer to +** the identified fragment is returned and output parameter (*pnByte) set +** to its size in bytes. +*/ +static const char *intckParseCreateIndex(const char *z, int iCol, int *pnByte){ + int iOff = 0; + int iThisCol = 0; + int iStart = 0; + int nOpen = 0; + + const char *zRet = 0; + int nRet = 0; + + int iEndOfCol = 0; + + /* Skip forward until the first "(" token */ + while( z[iOff]!='(' ){ + iOff += intckGetToken(&z[iOff]); + if( z[iOff]=='\0' ) return 0; + } + assert( z[iOff]=='(' ); + + nOpen = 1; + iOff++; + iStart = iOff; + while( z[iOff] ){ + const char *zToken = &z[iOff]; + int nToken = 0; + + /* Check if this is the end of the current column - either a "," or ")" + ** when nOpen==1. */ + if( nOpen==1 ){ + if( z[iOff]==',' || z[iOff]==')' ){ + if( iCol==iThisCol ){ + int iEnd = iEndOfCol ? iEndOfCol : iOff; + nRet = (iEnd - iStart); + zRet = &z[iStart]; + break; + } + iStart = iOff+1; + while( intckIsSpace(z[iStart]) ) iStart++; + iThisCol++; + } + if( z[iOff]==')' ) break; + } + if( z[iOff]=='(' ) nOpen++; + if( z[iOff]==')' ) nOpen--; + nToken = intckGetToken(zToken); + + if( (nToken==3 && 0==sqlite3_strnicmp(zToken, "ASC", nToken)) + || (nToken==4 && 0==sqlite3_strnicmp(zToken, "DESC", nToken)) + ){ + iEndOfCol = iOff; + }else if( 0==intckIsSpace(zToken[0]) ){ + iEndOfCol = 0; + } + + iOff += nToken; + } + + /* iStart is now the byte offset of 1 byte passed the final ')' in the + ** CREATE INDEX statement. Try to find a WHERE clause to return. */ + while( zRet==0 && z[iOff] ){ + int n = intckGetToken(&z[iOff]); + if( n==5 && 0==sqlite3_strnicmp(&z[iOff], "where", 5) ){ + zRet = &z[iOff+5]; + nRet = (int)strlen(zRet); + } + iOff += n; + } + + /* Trim any whitespace from the start and end of the returned string. */ + if( zRet ){ + while( intckIsSpace(zRet[0]) ){ + nRet--; + zRet++; + } + while( nRet>0 && intckIsSpace(zRet[nRet-1]) ) nRet--; + } + + *pnByte = nRet; + return zRet; +} + +/* +** User-defined SQL function wrapper for intckParseCreateIndex(): +** +** SELECT parse_create_index(, ); +*/ +static void intckParseCreateIndexFunc( + sqlite3_context *pCtx, + int nVal, + sqlite3_value **apVal +){ + const char *zSql = (const char*)sqlite3_value_text(apVal[0]); + int idx = sqlite3_value_int(apVal[1]); + const char *zRes = 0; + int nRes = 0; + + assert( nVal==2 ); + if( zSql ){ + zRes = intckParseCreateIndex(zSql, idx, &nRes); + } + sqlite3_result_text(pCtx, zRes, nRes, SQLITE_TRANSIENT); +} + +/* +** Return true if sqlite3_intck.db has automatic indexes enabled, false +** otherwise. +*/ +static int intckGetAutoIndex(sqlite3_intck *p){ + int bRet = 0; + sqlite3_stmt *pStmt = 0; + pStmt = intckPrepare(p, "PRAGMA automatic_index"); + if( SQLITE_ROW==intckStep(p, pStmt) ){ + bRet = sqlite3_column_int(pStmt, 0); + } + intckFinalize(p, pStmt); + return bRet; +} + +/* +** Return true if zObj is an index, or false otherwise. +*/ +static int intckIsIndex(sqlite3_intck *p, const char *zObj){ + int bRet = 0; + sqlite3_stmt *pStmt = 0; + pStmt = intckPrepareFmt(p, + "SELECT 1 FROM %Q.sqlite_schema WHERE name=%Q AND type='index'", + p->zDb, zObj + ); + if( p->rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pStmt) ){ + bRet = 1; + } + intckFinalize(p, pStmt); + return bRet; +} + +/* +** Return a pointer to a nul-terminated buffer containing the SQL statement +** used to check database object zObj (a table or index) for corruption. +** If parameter zPrev is not NULL, then it must be a string containing the +** vector key required to restart the check where it left off last time. +** If pnKeyVal is not NULL, then (*pnKeyVal) is set to the number of +** columns in the vector key value for the specified object. +** +** This function uses the sqlite3_intck error code convention. +*/ +static char *intckCheckObjectSql( + sqlite3_intck *p, /* Integrity check object */ + const char *zObj, /* Object (table or index) to scan */ + const char *zPrev, /* Restart key vector, if any */ + int *pnKeyVal /* OUT: Number of key-values for this scan */ +){ + char *zRet = 0; + sqlite3_stmt *pStmt = 0; + int bAutoIndex = 0; + int bIsIndex = 0; + + const char *zCommon = + /* Relation without_rowid also contains just one row. Column "b" is + ** set to true if the table being examined is a WITHOUT ROWID table, + ** or false otherwise. */ + ", without_rowid(b) AS (" + " SELECT EXISTS (" + " SELECT 1 FROM tabname, pragma_index_list(tab, db) AS l" + " WHERE origin='pk' " + " AND NOT EXISTS (SELECT 1 FROM sqlite_schema WHERE name=l.name)" + " )" + ")" + "" + /* Table idx_cols contains 1 row for each column in each index on the + ** table being checked. Columns are: + ** + ** idx_name: Name of the index. + ** idx_ispk: True if this index is the PK of a WITHOUT ROWID table. + ** col_name: Name of indexed column, or NULL for index on expression. + ** col_expr: Indexed expression, including COLLATE clause. + ** col_alias: Alias used for column in 'intck_wrapper' table. + */ + ", idx_cols(idx_name, idx_ispk, col_name, col_expr, col_alias) AS (" + " SELECT l.name, (l.origin=='pk' AND w.b), i.name, COALESCE((" + " SELECT parse_create_index(sql, i.seqno) FROM " + " sqlite_schema WHERE name = l.name" + " ), format('\"%w\"', i.name) || ' COLLATE ' || quote(i.coll))," + " 'c' || row_number() OVER ()" + " FROM " + " tabname t," + " without_rowid w," + " pragma_index_list(t.tab, t.db) l," + " pragma_index_xinfo(l.name) i" + " WHERE i.key" + " UNION ALL" + " SELECT '', 1, '_rowid_', '_rowid_', 'r1' FROM without_rowid WHERE b=0" + ")" + "" + "" + /* + ** For a PK declared as "PRIMARY KEY(a, b) ... WITHOUT ROWID", where + ** the intck_wrapper aliases of "a" and "b" are "c1" and "c2": + ** + ** o_pk: "o.c1, o.c2" + ** i_pk: "i.'a', i.'b'" + ** ... + ** n_pk: 2 + */ + ", tabpk(db, tab, idx, o_pk, i_pk, q_pk, eq_pk, ps_pk, pk_pk, n_pk) AS (" + " WITH pkfields(f, a) AS (" + " SELECT i.col_name, i.col_alias FROM idx_cols i WHERE i.idx_ispk" + " )" + " SELECT t.db, t.tab, t.idx, " + " group_concat(a, ', '), " + " group_concat('i.'||quote(f), ', '), " + " group_concat('quote(o.'||a||')', ' || '','' || '), " + " format('(%s)==(%s)'," + " group_concat('o.'||a, ', '), " + " group_concat(format('\"%w\"', f), ', ')" + " )," + " group_concat('%s', ',')," + " group_concat('quote('||a||')', ', '), " + " count(*)" + " FROM tabname t, pkfields" + ")" + "" + ", idx(name, match_expr, partial, partial_alias, idx_ps, idx_idx) AS (" + " SELECT idx_name," + " format('(%s,%s) IS (%s,%s)', " + " group_concat(i.col_expr, ', '), i_pk," + " group_concat('o.'||i.col_alias, ', '), o_pk" + " ), " + " parse_create_index(" + " (SELECT sql FROM sqlite_schema WHERE name=idx_name), -1" + " )," + " 'cond' || row_number() OVER ()" + " , group_concat('%s', ',')" + " , group_concat('quote('||i.col_alias||')', ', ')" + " FROM tabpk t, " + " without_rowid w," + " idx_cols i" + " WHERE i.idx_ispk==0 " + " GROUP BY idx_name" + ")" + "" + ", wrapper_with(s) AS (" + " SELECT 'intck_wrapper AS (\n SELECT\n ' || (" + " WITH f(a, b) AS (" + " SELECT col_expr, col_alias FROM idx_cols" + " UNION ALL " + " SELECT partial, partial_alias FROM idx WHERE partial IS NOT NULL" + " )" + " SELECT group_concat(format('%s AS %s', a, b), ',\n ') FROM f" + " )" + " || format('\n FROM %Q.%Q ', t.db, t.tab)" + /* If the object being checked is a table, append "NOT INDEXED". + ** Otherwise, append "INDEXED BY ", and then, if the index + ** is a partial index " WHERE ". */ + " || CASE WHEN t.idx IS NULL THEN " + " 'NOT INDEXED'" + " ELSE" + " format('INDEXED BY %Q%s', t.idx, ' WHERE '||i.partial)" + " END" + " || '\n)'" + " FROM tabname t LEFT JOIN idx i ON (i.name=t.idx)" + ")" + "" + ; + + bAutoIndex = intckGetAutoIndex(p); + if( bAutoIndex ) intckExec(p, "PRAGMA automatic_index = 0"); + + bIsIndex = intckIsIndex(p, zObj); + if( bIsIndex ){ + pStmt = intckPrepareFmt(p, + /* Table idxname contains a single row. The first column, "db", contains + ** the name of the db containing the table (e.g. "main") and the second, + ** "tab", the name of the table itself. */ + "WITH tabname(db, tab, idx) AS (" + " SELECT %Q, (SELECT tbl_name FROM %Q.sqlite_schema WHERE name=%Q), %Q " + ")" + "" + ", whereclause(w_c) AS (%s)" + "" + "%s" /* zCommon */ + "" + ", case_statement(c) AS (" + " SELECT " + " 'CASE WHEN (' || group_concat(col_alias, ', ') || ', 1) IS (\n' " + " || ' SELECT ' || group_concat(col_expr, ', ') || ', 1 FROM '" + " || format('%%Q.%%Q NOT INDEXED WHERE %%s\n', t.db, t.tab, p.eq_pk)" + " || ' )\n THEN NULL\n '" + " || 'ELSE format(''surplus entry ('" + " || group_concat('%%s', ',') || ',' || p.ps_pk" + " || ') in index ' || t.idx || ''', ' " + " || group_concat('quote('||i.col_alias||')', ', ') || ', ' || p.pk_pk" + " || ')'" + " || '\n END AS error_message'" + " FROM tabname t, tabpk p, idx_cols i WHERE i.idx_name=t.idx" + ")" + "" + ", thiskey(k, n) AS (" + " SELECT group_concat(i.col_alias, ', ') || ', ' || p.o_pk, " + " count(*) + p.n_pk " + " FROM tabpk p, idx_cols i WHERE i.idx_name=p.idx" + ")" + "" + ", main_select(m, n) AS (" + " SELECT format(" + " 'WITH %%s\n' ||" + " ', idx_checker AS (\n' ||" + " ' SELECT %%s,\n' ||" + " ' %%s\n' || " + " ' FROM intck_wrapper AS o\n' ||" + " ')\n'," + " ww.s, c, t.k" + " ), t.n" + " FROM case_statement, wrapper_with ww, thiskey t" + ")" + + "SELECT m || " + " group_concat('SELECT * FROM idx_checker ' || w_c, ' UNION ALL '), n" + " FROM " + "main_select, whereclause " + , p->zDb, p->zDb, zObj, zObj + , zPrev ? zPrev : "VALUES('')", zCommon + ); + }else{ + pStmt = intckPrepareFmt(p, + /* Table tabname contains a single row. The first column, "db", contains + ** the name of the db containing the table (e.g. "main") and the second, + ** "tab", the name of the table itself. */ + "WITH tabname(db, tab, idx, prev) AS (SELECT %Q, %Q, NULL, %Q)" + "" + "%s" /* zCommon */ + + /* expr(e) contains one row for each index on table zObj. Value e + ** is set to an expression that evaluates to NULL if the required + ** entry is present in the index, or an error message otherwise. */ + ", expr(e, p) AS (" + " SELECT format('CASE WHEN EXISTS \n" + " (SELECT 1 FROM %%Q.%%Q AS i INDEXED BY %%Q WHERE %%s%%s)\n" + " THEN NULL\n" + " ELSE format(''entry (%%s,%%s) missing from index %%s'', %%s, %%s)\n" + " END\n'" + " , t.db, t.tab, i.name, i.match_expr, ' AND (' || partial || ')'," + " i.idx_ps, t.ps_pk, i.name, i.idx_idx, t.pk_pk)," + " CASE WHEN partial IS NULL THEN NULL ELSE i.partial_alias END" + " FROM tabpk t, idx i" + ")" + + ", numbered(ii, cond, e) AS (" + " SELECT 0, 'n.ii=0', 'NULL'" + " UNION ALL " + " SELECT row_number() OVER ()," + " '(n.ii='||row_number() OVER ()||COALESCE(' AND '||p||')', ')'), e" + " FROM expr" + ")" + + ", counter_with(w) AS (" + " SELECT 'WITH intck_counter(ii) AS (\n ' || " + " group_concat('SELECT '||ii, ' UNION ALL\n ') " + " || '\n)' FROM numbered" + ")" + "" + ", case_statement(c) AS (" + " SELECT 'CASE ' || " + " group_concat(format('\n WHEN %%s THEN (%%s)', cond, e), '') ||" + " '\nEND AS error_message'" + " FROM numbered" + ")" + "" + + /* This table contains a single row consisting of a single value - + ** the text of an SQL expression that may be used by the main SQL + ** statement to output an SQL literal that can be used to resume + ** the scan if it is suspended. e.g. for a rowid table, an expression + ** like: + ** + ** format('(%d,%d)', _rowid_, n.ii) + */ + ", thiskey(k, n) AS (" + " SELECT o_pk || ', ii', n_pk+1 FROM tabpk" + ")" + "" + ", whereclause(w_c) AS (" + " SELECT CASE WHEN prev!='' THEN " + " '\nWHERE (' || o_pk ||', n.ii) > ' || prev" + " ELSE ''" + " END" + " FROM tabpk, tabname" + ")" + "" + ", main_select(m, n) AS (" + " SELECT format(" + " '%%s, %%s\nSELECT %%s,\n%%s\nFROM intck_wrapper AS o" + ", intck_counter AS n%%s\nORDER BY %%s', " + " w, ww.s, c, thiskey.k, whereclause.w_c, t.o_pk" + " ), thiskey.n" + " FROM case_statement, tabpk t, counter_with, " + " wrapper_with ww, thiskey, whereclause" + ")" + + "SELECT m, n FROM main_select", + p->zDb, zObj, zPrev, zCommon + ); + } + + while( p->rc==SQLITE_OK && SQLITE_ROW==sqlite3_step(pStmt) ){ + zRet = intckMprintf(p, "%s", (const char*)sqlite3_column_text(pStmt, 0)); + if( pnKeyVal ){ + *pnKeyVal = sqlite3_column_int(pStmt, 1); + } + } + intckFinalize(p, pStmt); + + if( bAutoIndex ) intckExec(p, "PRAGMA automatic_index = 1"); + return zRet; +} + +/* +** Open a new integrity-check object. +*/ +int sqlite3_intck_open( + sqlite3 *db, /* Database handle to operate on */ + const char *zDbArg, /* "main", "temp" etc. */ + sqlite3_intck **ppOut /* OUT: New integrity-check handle */ +){ + sqlite3_intck *pNew = 0; + int rc = SQLITE_OK; + const char *zDb = zDbArg ? zDbArg : "main"; + int nDb = (int)strlen(zDb); + + pNew = (sqlite3_intck*)sqlite3_malloc(sizeof(*pNew) + nDb + 1); + if( pNew==0 ){ + rc = SQLITE_NOMEM; + }else{ + memset(pNew, 0, sizeof(*pNew)); + pNew->db = db; + pNew->zDb = (const char*)&pNew[1]; + memcpy(&pNew[1], zDb, nDb+1); + rc = sqlite3_create_function(db, "parse_create_index", + 2, SQLITE_UTF8, 0, intckParseCreateIndexFunc, 0, 0 + ); + if( rc!=SQLITE_OK ){ + sqlite3_intck_close(pNew); + pNew = 0; + } + } + + *ppOut = pNew; + return rc; +} + +/* +** Free the integrity-check object. +*/ +void sqlite3_intck_close(sqlite3_intck *p){ + if( p ){ + sqlite3_finalize(p->pCheck); + sqlite3_create_function( + p->db, "parse_create_index", 1, SQLITE_UTF8, 0, 0, 0, 0 + ); + sqlite3_free(p->zObj); + sqlite3_free(p->zKey); + sqlite3_free(p->zTestSql); + sqlite3_free(p->zErr); + sqlite3_free(p->zMessage); + sqlite3_free(p); + } +} + +/* +** Step the integrity-check object. +*/ +int sqlite3_intck_step(sqlite3_intck *p){ + if( p->rc==SQLITE_OK ){ + + if( p->zMessage ){ + sqlite3_free(p->zMessage); + p->zMessage = 0; + } + + if( p->bCorruptSchema ){ + p->rc = SQLITE_DONE; + }else + if( p->pCheck==0 ){ + intckFindObject(p); + if( p->rc==SQLITE_OK ){ + if( p->zObj ){ + char *zSql = 0; + zSql = intckCheckObjectSql(p, p->zObj, p->zKey, &p->nKeyVal); + p->pCheck = intckPrepare(p, zSql); + sqlite3_free(zSql); + sqlite3_free(p->zKey); + p->zKey = 0; + }else{ + p->rc = SQLITE_DONE; + } + }else if( p->rc==SQLITE_CORRUPT ){ + p->rc = SQLITE_OK; + p->zMessage = intckMprintf(p, "%s", + "corruption found while reading database schema" + ); + p->bCorruptSchema = 1; + } + } + + if( p->pCheck ){ + assert( p->rc==SQLITE_OK ); + if( sqlite3_step(p->pCheck)==SQLITE_ROW ){ + /* Normal case, do nothing. */ + }else{ + intckFinalize(p, p->pCheck); + p->pCheck = 0; + p->nKeyVal = 0; + if( p->rc==SQLITE_CORRUPT ){ + p->rc = SQLITE_OK; + p->zMessage = intckMprintf(p, + "corruption found while scanning database object %s", p->zObj + ); + } + } + } + } + + return p->rc; +} + +/* +** Return a message describing the corruption encountered by the most recent +** call to sqlite3_intck_step(), or NULL if no corruption was encountered. +*/ +const char *sqlite3_intck_message(sqlite3_intck *p){ + assert( p->pCheck==0 || p->zMessage==0 ); + if( p->zMessage ){ + return p->zMessage; + } + if( p->pCheck ){ + return (const char*)sqlite3_column_text(p->pCheck, 0); + } + return 0; +} + +/* +** Return the error code and message. +*/ +int sqlite3_intck_error(sqlite3_intck *p, const char **pzErr){ + if( pzErr ) *pzErr = p->zErr; + return (p->rc==SQLITE_DONE ? SQLITE_OK : p->rc); +} + +/* +** Close any read transaction the integrity-check object is holding open +** on the database. +*/ +int sqlite3_intck_unlock(sqlite3_intck *p){ + if( p->rc==SQLITE_OK && p->pCheck ){ + assert( p->zKey==0 && p->nKeyVal>0 ); + intckSaveKey(p); + intckFinalize(p, p->pCheck); + p->pCheck = 0; + } + return p->rc; +} + +/* +** Return the SQL statement used to check object zObj. Or, if zObj is +** NULL, the current SQL statement. +*/ +const char *sqlite3_intck_test_sql(sqlite3_intck *p, const char *zObj){ + sqlite3_free(p->zTestSql); + if( zObj ){ + p->zTestSql = intckCheckObjectSql(p, zObj, 0, 0); + }else{ + if( p->zObj ){ + p->zTestSql = intckCheckObjectSql(p, p->zObj, p->zKey, 0); + }else{ + sqlite3_free(p->zTestSql); + p->zTestSql = 0; + } + } + return p->zTestSql; +} diff --git a/ext/intck/sqlite3intck.h b/ext/intck/sqlite3intck.h new file mode 100644 index 0000000000..e08a86f289 --- /dev/null +++ b/ext/intck/sqlite3intck.h @@ -0,0 +1,171 @@ +/* +** 2024-02-08 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +*/ + +/* +** Incremental Integrity-Check Extension +** ------------------------------------- +** +** This module contains code to check whether or not an SQLite database +** is well-formed or corrupt. This is the same task as performed by SQLite's +** built-in "PRAGMA integrity_check" command. This module differs from +** "PRAGMA integrity_check" in that: +** +** + It is less thorough - this module does not detect certain types +** of corruption that are detected by the PRAGMA command. However, +** it does detect all kinds of corruption that are likely to cause +** errors in SQLite applications. +** +** + It is slower. Sometimes up to three times slower. +** +** + It allows integrity-check operations to be split into multiple +** transactions, so that the database does not need to be read-locked +** for the duration of the integrity-check. +** +** One way to use the API to run integrity-check on the "main" database +** of handle db is: +** +** int rc = SQLITE_OK; +** sqlite3_intck *p = 0; +** +** sqlite3_intck_open(db, "main", &p); +** while( SQLITE_OK==sqlite3_intck_step(p) ){ +** const char *zMsg = sqlite3_intck_message(p); +** if( zMsg ) printf("corruption: %s\n", zMsg); +** } +** rc = sqlite3_intck_error(p, &zErr); +** if( rc!=SQLITE_OK ){ +** printf("error occured (rc=%d), (errmsg=%s)\n", rc, zErr); +** } +** sqlite3_intck_close(p); +** +** Usually, the sqlite3_intck object opens a read transaction within the +** first call to sqlite3_intck_step() and holds it open until the +** integrity-check is complete. However, if sqlite3_intck_unlock() is +** called, the read transaction is ended and a new read transaction opened +** by the subsequent call to sqlite3_intck_step(). +*/ + +#ifndef _SQLITE_INTCK_H +#define _SQLITE_INTCK_H + +#include "sqlite3.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/* +** An ongoing incremental integrity-check operation is represented by an +** opaque pointer of the following type. +*/ +typedef struct sqlite3_intck sqlite3_intck; + +/* +** Open a new incremental integrity-check object. If successful, populate +** output variable (*ppOut) with the new object handle and return SQLITE_OK. +** Or, if an error occurs, set (*ppOut) to NULL and return an SQLite error +** code (e.g. SQLITE_NOMEM). +** +** The integrity-check will be conducted on database zDb (which must be "main", +** "temp", or the name of an attached database) of database handle db. Once +** this function has been called successfully, the caller should not use +** database handle db until the integrity-check object has been destroyed +** using sqlite3_intck_close(). +*/ +int sqlite3_intck_open( + sqlite3 *db, /* Database handle */ + const char *zDb, /* Database name ("main", "temp" etc.) */ + sqlite3_intck **ppOut /* OUT: New sqlite3_intck handle */ +); + +/* +** Close and release all resources associated with a handle opened by an +** earlier call to sqlite3_intck_open(). The results of using an +** integrity-check handle after it has been passed to this function are +** undefined. +*/ +void sqlite3_intck_close(sqlite3_intck *pCk); + +/* +** Do the next step of the integrity-check operation specified by the handle +** passed as the only argument. This function returns SQLITE_DONE if the +** integrity-check operation is finished, or an SQLite error code if +** an error occurs, or SQLITE_OK if no error occurs but the integrity-check +** is not finished. It is not considered an error if database corruption +** is encountered. +** +** Following a successful call to sqlite3_intck_step() (one that returns +** SQLITE_OK), sqlite3_intck_message() returns a non-NULL value if +** corruption was detected in the db. +** +** If an error occurs and a value other than SQLITE_OK or SQLITE_DONE is +** returned, then the integrity-check handle is placed in an error state. +** In this state all subsequent calls to sqlite3_intck_step() or +** sqlite3_intck_unlock() will immediately return the same error. The +** sqlite3_intck_error() method may be used to obtain an English language +** error message in this case. +*/ +int sqlite3_intck_step(sqlite3_intck *pCk); + +/* +** If the previous call to sqlite3_intck_step() encountered corruption +** within the database, then this function returns a pointer to a buffer +** containing a nul-terminated string describing the corruption in +** English. If the previous call to sqlite3_intck_step() did not encounter +** corruption, or if there was no previous call, this function returns +** NULL. +*/ +const char *sqlite3_intck_message(sqlite3_intck *pCk); + +/* +** Close any read-transaction opened by an earlier call to +** sqlite3_intck_step(). Any subsequent call to sqlite3_intck_step() will +** open a new transaction. Return SQLITE_OK if successful, or an SQLite error +** code otherwise. +** +** If an error occurs, then the integrity-check handle is placed in an error +** state. In this state all subsequent calls to sqlite3_intck_step() or +** sqlite3_intck_unlock() will immediately return the same error. The +** sqlite3_intck_error() method may be used to obtain an English language +** error message in this case. +*/ +int sqlite3_intck_unlock(sqlite3_intck *pCk); + +/* +** If an error has occurred in an earlier call to sqlite3_intck_step() +** or sqlite3_intck_unlock(), then this method returns the associated +** SQLite error code. Additionally, if pzErr is not NULL, then (*pzErr) +** may be set to point to a nul-terminated string containing an English +** language error message. Or, if no error message is available, to +** NULL. +** +** If no error has occurred within sqlite3_intck_step() or +** sqlite_intck_unlock() calls on the handle passed as the first argument, +** then SQLITE_OK is returned and (*pzErr) set to NULL. +*/ +int sqlite3_intck_error(sqlite3_intck *pCk, const char **pzErr); + +/* +** This API is used for testing only. It returns the full-text of an SQL +** statement used to test object zObj, which may be a table or index. +** The returned buffer is valid until the next call to either this function +** or sqlite3_intck_close() on the same sqlite3_intck handle. +*/ +const char *sqlite3_intck_test_sql(sqlite3_intck *pCk, const char *zObj); + + +#ifdef __cplusplus +} /* end of the 'extern "C"' block */ +#endif + +#endif /* ifndef _SQLITE_INTCK_H */ diff --git a/ext/intck/test_intck.c b/ext/intck/test_intck.c new file mode 100644 index 0000000000..d3a619b503 --- /dev/null +++ b/ext/intck/test_intck.c @@ -0,0 +1,233 @@ +/* +** 2010 August 28 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** Code for testing all sorts of SQLite interfaces. This code +** is not included in the SQLite library. +*/ + +#include "sqlite3.h" +#include "sqlite3intck.h" +#include "tclsqlite.h" +#include +#include + +/* In test1.c */ +int getDbPointer(Tcl_Interp *interp, const char *zA, sqlite3 **ppDb); +const char *sqlite3ErrName(int); + +typedef struct TestIntck TestIntck; +struct TestIntck { + sqlite3_intck *intck; +}; + +static int testIntckCmd( + void * clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + struct Subcmd { + const char *zName; + int nArg; + const char *zExpect; + } aCmd[] = { + {"close", 0, ""}, /* 0 */ + {"step", 0, ""}, /* 1 */ + {"message", 0, ""}, /* 2 */ + {"error", 0, ""}, /* 3 */ + {"unlock", 0, ""}, /* 4 */ + {"test_sql", 1, ""}, /* 5 */ + {0 , 0} + }; + int rc = TCL_OK; + int iIdx = -1; + TestIntck *p = (TestIntck*)clientData; + + if( objc<2 ){ + Tcl_WrongNumArgs(interp, 1, objv, "SUB-COMMAND ..."); + return TCL_ERROR; + } + + rc = Tcl_GetIndexFromObjStruct( + interp, objv[1], aCmd, sizeof(aCmd[0]), "SUB-COMMAND", 0, &iIdx + ); + if( rc ) return rc; + + if( objc!=2+aCmd[iIdx].nArg ){ + Tcl_WrongNumArgs(interp, 2, objv, aCmd[iIdx].zExpect); + return TCL_ERROR; + } + + switch( iIdx ){ + case 0: assert( 0==strcmp("close", aCmd[iIdx].zName) ); { + Tcl_DeleteCommand(interp, Tcl_GetStringFromObj(objv[0], 0)); + break; + } + + case 1: assert( 0==strcmp("step", aCmd[iIdx].zName) ); { + rc = sqlite3_intck_step(p->intck); + Tcl_SetObjResult(interp, Tcl_NewStringObj(sqlite3ErrName(rc), -1)); + break; + } + + case 2: assert( 0==strcmp("message", aCmd[iIdx].zName) ); { + const char *z = sqlite3_intck_message(p->intck); + Tcl_SetObjResult(interp, Tcl_NewStringObj(z ? z : "", -1)); + break; + } + + case 3: assert( 0==strcmp("error", aCmd[iIdx].zName) ); { + const char *zErr = 0; + Tcl_Obj *pRes; + rc = sqlite3_intck_error(p->intck, 0); + pRes = Tcl_NewObj(); + Tcl_ListObjAppendElement( + interp, pRes, Tcl_NewStringObj(sqlite3ErrName(rc), -1) + ); + sqlite3_intck_error(p->intck, &zErr); + Tcl_ListObjAppendElement( + interp, pRes, Tcl_NewStringObj(zErr ? zErr : 0, -1) + ); + Tcl_SetObjResult(interp, pRes); + break; + } + + case 4: assert( 0==strcmp("unlock", aCmd[iIdx].zName) ); { + rc = sqlite3_intck_unlock(p->intck); + Tcl_SetObjResult(interp, Tcl_NewStringObj(sqlite3ErrName(rc), -1)); + break; + } + + case 5: assert( 0==strcmp("test_sql", aCmd[iIdx].zName) ); { + const char *zObj = Tcl_GetString(objv[2]); + const char *zSql = sqlite3_intck_test_sql(p->intck, zObj[0] ? zObj : 0); + Tcl_SetObjResult(interp, Tcl_NewStringObj(zSql, -1)); + break; + } + } + + return TCL_OK; +} + +/* +** Destructor for commands created by test_sqlite3_intck(). +*/ +static void testIntckFree(void *clientData){ + TestIntck *p = (TestIntck*)clientData; + sqlite3_intck_close(p->intck); + ckfree(p); +} + +/* +** tclcmd: sqlite3_intck DB DBNAME +*/ +static int test_sqlite3_intck( + void * clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + char zName[64]; + int iName = 0; + Tcl_CmdInfo info; + TestIntck *p = 0; + sqlite3 *db = 0; + const char *zDb = 0; + int rc = SQLITE_OK; + + if( objc!=3 ){ + Tcl_WrongNumArgs(interp, 1, objv, "DB DBNAME"); + return TCL_ERROR; + } + + p = (TestIntck*)ckalloc(sizeof(TestIntck)); + memset(p, 0, sizeof(TestIntck)); + + if( getDbPointer(interp, Tcl_GetString(objv[1]), &db) ){ + return TCL_ERROR; + } + zDb = Tcl_GetString(objv[2]); + if( zDb[0]=='\0' ) zDb = 0; + + rc = sqlite3_intck_open(db, zDb, &p->intck); + if( rc!=SQLITE_OK ){ + ckfree(p); + Tcl_SetObjResult(interp, Tcl_NewStringObj(sqlite3_errstr(rc), -1)); + return TCL_ERROR; + } + + do { + sprintf(zName, "intck%d", iName++); + }while( Tcl_GetCommandInfo(interp, zName, &info)!=0 ); + Tcl_CreateObjCommand(interp, zName, testIntckCmd, (void*)p, testIntckFree); + Tcl_SetObjResult(interp, Tcl_NewStringObj(zName, -1)); + + return TCL_OK; +} + +/* +** tclcmd: test_do_intck DB DBNAME +*/ +static int test_do_intck( + void * clientData, + Tcl_Interp *interp, + int objc, + Tcl_Obj *CONST objv[] +){ + sqlite3 *db = 0; + const char *zDb = 0; + int rc = SQLITE_OK; + sqlite3_intck *pCk = 0; + Tcl_Obj *pRet = 0; + const char *zErr = 0; + + if( objc!=3 ){ + Tcl_WrongNumArgs(interp, 1, objv, "DB DBNAME"); + return TCL_ERROR; + } + if( getDbPointer(interp, Tcl_GetString(objv[1]), &db) ){ + return TCL_ERROR; + } + zDb = Tcl_GetString(objv[2]); + + pRet = Tcl_NewObj(); + Tcl_IncrRefCount(pRet); + + rc = sqlite3_intck_open(db, zDb, &pCk); + if( rc==SQLITE_OK ){ + while( sqlite3_intck_step(pCk)==SQLITE_OK ){ + const char *zMsg = sqlite3_intck_message(pCk); + if( zMsg ){ + Tcl_ListObjAppendElement(interp, pRet, Tcl_NewStringObj(zMsg, -1)); + } + } + rc = sqlite3_intck_error(pCk, &zErr); + } + if( rc!=SQLITE_OK ){ + if( zErr ){ + Tcl_SetObjResult(interp, Tcl_NewStringObj(zErr, -1)); + }else{ + Tcl_SetObjResult(interp, Tcl_NewStringObj(sqlite3ErrName(rc), -1)); + } + }else{ + Tcl_SetObjResult(interp, pRet); + } + Tcl_DecrRefCount(pRet); + sqlite3_intck_close(pCk); + sqlite3_intck_close(0); + return rc ? TCL_ERROR : TCL_OK; +} + +int Sqlitetestintck_Init(Tcl_Interp *interp){ + Tcl_CreateObjCommand(interp, "sqlite3_intck", test_sqlite3_intck, 0, 0); + Tcl_CreateObjCommand(interp, "test_do_intck", test_do_intck, 0, 0); + return TCL_OK; +} diff --git a/ext/jni/GNUmakefile b/ext/jni/GNUmakefile new file mode 100644 index 0000000000..f2fc7fb7bd --- /dev/null +++ b/ext/jni/GNUmakefile @@ -0,0 +1,505 @@ +# Quick-and-dirty makefile to bootstrap the sqlite3-jni project. This +# build assumes a Linux-like system. +default: all + +JAVA_HOME ?= $(HOME)/jdk/current +# e.g. /usr/lib/jvm/default-javajava-19-openjdk-amd64 +JDK_HOME ?= $(JAVA_HOME) +# ^^^ JDK_HOME is not as widely used as JAVA_HOME +bin.jar := $(JDK_HOME)/bin/jar +bin.java := $(JDK_HOME)/bin/java +bin.javac := $(JDK_HOME)/bin/javac +bin.javadoc := $(JDK_HOME)/bin/javadoc +ifeq (,$(wildcard $(JDK_HOME))) +$(error set JDK_HOME to the top-most dir of your JDK installation.) +endif +MAKEFILE := $(lastword $(MAKEFILE_LIST)) +$(MAKEFILE): + +package.jar := sqlite3-jni.jar + +dir.top := ../.. +dir.tool := ../../tool +dir.jni := $(patsubst %/,%,$(dir $(MAKEFILE))) +dir.src := $(dir.jni)/src +dir.src.c := $(dir.src)/c +dir.bld := $(dir.jni)/bld +dir.bld.c := $(dir.bld) +dir.src.jni := $(dir.src)/org/sqlite/jni +dir.src.capi := $(dir.src.jni)/capi +dir.src.fts5 := $(dir.src.jni)/fts5 +dir.tests := $(dir.src)/tests +mkdir ?= mkdir -p +$(dir.bld.c): + $(mkdir) $@ + +javac.flags ?= -Xlint:unchecked -Xlint:deprecation +java.flags ?= +javac.flags += -encoding utf8 +# -------------^^^^^^^^^^^^^^ required for Windows builds +jnicheck ?= 1 +ifeq (1,$(jnicheck)) + java.flags += -Xcheck:jni +endif + +classpath := $(dir.src) +CLEAN_FILES := $(package.jar) +DISTCLEAN_FILES := $(dir.jni)/*~ $(dir.src.c)/*~ $(dir.src.jni)/*~ + +sqlite3-jni.h := $(dir.src.c)/sqlite3-jni.h +.NOTPARALLEL: $(sqlite3-jni.h) +CApi.java := $(dir.src.capi)/CApi.java +SQLTester.java := $(dir.src.capi)/SQLTester.java +CApi.class := $(CApi.java:.java=.class) +SQLTester.class := $(SQLTester.java:.java=.class) + +######################################################################## +# The future of FTS5 customization in this API is as yet unclear. +# The pieces are all in place, and are all thin proxies so not much +# complexity, but some semantic changes were required in porting +# which are largely untested. +# +# Reminder: this flag influences the contents of $(sqlite3-jni.h), +# which is checked in. Please do not check in changes to that file in +# which the fts5 APIs have been stripped unless that feature is +# intended to be stripped for good. +enable.fts5 ?= 1 + +ifeq (,$(wildcard $(dir.tests)/*)) + enable.tester := 0 +else + enable.tester := 1 +endif + +# bin.version-info = binary to output various sqlite3 version info +# building the distribution zip file. +bin.version-info := $(dir.top)/version-info +.NOTPARALLEL: $(bin.version-info) +$(bin.version-info): $(dir.tool)/version-info.c $(sqlite3.h) $(dir.top)/Makefile + $(MAKE) -C $(dir.top) version-info + +# Be explicit about which Java files to compile so that we can work on +# in-progress files without requiring them to be in a compilable state. +JAVA_FILES.main := $(patsubst %,$(dir.src.jni)/annotation/%,\ + Experimental.java \ + NotNull.java \ + Nullable.java \ +) $(patsubst %,$(dir.src.capi)/%,\ + AbstractCollationCallback.java \ + AggregateFunction.java \ + AuthorizerCallback.java \ + AutoExtensionCallback.java \ + BusyHandlerCallback.java \ + CollationCallback.java \ + CollationNeededCallback.java \ + CommitHookCallback.java \ + ConfigLogCallback.java \ + ConfigSqlLogCallback.java \ + NativePointerHolder.java \ + OutputPointer.java \ + PrepareMultiCallback.java \ + PreupdateHookCallback.java \ + ProgressHandlerCallback.java \ + ResultCode.java \ + RollbackHookCallback.java \ + ScalarFunction.java \ + SQLFunction.java \ + CallbackProxy.java \ + CApi.java \ + TableColumnMetadata.java \ + TraceV2Callback.java \ + UpdateHookCallback.java \ + ValueHolder.java \ + WindowFunction.java \ + XDestroyCallback.java \ + sqlite3.java \ + sqlite3_blob.java \ + sqlite3_context.java \ + sqlite3_stmt.java \ + sqlite3_value.java \ +) $(patsubst %,$(dir.src.jni)/wrapper1/%,\ + AggregateFunction.java \ + ScalarFunction.java \ + SqlFunction.java \ + Sqlite.java \ + SqliteException.java \ + ValueHolder.java \ + WindowFunction.java \ +) + +JAVA_FILES.unittest := $(patsubst %,$(dir.src.jni)/%,\ + capi/Tester1.java \ + wrapper1/Tester2.java \ +) +ifeq (1,$(enable.fts5)) + JAVA_FILES.unittest += $(patsubst %,$(dir.src.fts5)/%,\ + TesterFts5.java \ + ) + JAVA_FILES.main += $(patsubst %,$(dir.src.fts5)/%,\ + fts5_api.java \ + fts5_extension_function.java \ + fts5_tokenizer.java \ + Fts5.java \ + Fts5Context.java \ + Fts5ExtensionApi.java \ + Fts5PhraseIter.java \ + Fts5Tokenizer.java \ + XTokenizeCallback.java \ + ) +endif +JAVA_FILES.tester := $(SQLTester.java) +JAVA_FILES.package.info := \ + $(dir.src.jni)/package-info.java \ + $(dir.src.jni)/annotation/package-info.java + +CLASS_FILES.main := $(JAVA_FILES.main:.java=.class) +CLASS_FILES.unittest := $(JAVA_FILES.unittest:.java=.class) +CLASS_FILES.tester := $(JAVA_FILES.tester:.java=.class) + +JAVA_FILES += $(JAVA_FILES.main) $(JAVA_FILES.unittest) +ifeq (1,$(enable.tester)) + JAVA_FILES += $(JAVA_FILES.tester) +endif + +CLASS_FILES := +define CLASSFILE_DEPS +all: $(1).class +$(1).class: $(1).java +CLASS_FILES += $(1).class +endef +$(foreach B,$(basename \ + $(JAVA_FILES.main) $(JAVA_FILES.unittest) $(JAVA_FILES.tester)),\ + $(eval $(call CLASSFILE_DEPS,$(B)))) +$(CLASS_FILES): $(MAKEFILE) + $(bin.javac) $(javac.flags) -h $(dir.bld.c) -cp $(classpath) $(JAVA_FILES) + +#.PHONY: classfiles + +######################################################################## +# Set up sqlite3.c and sqlite3.h... +# +# To build with SEE (https://sqlite.org/see), either put sqlite3-see.c +# in the top of this build tree or pass +# sqlite3.c=PATH_TO_sqlite3-see.c to the build. Note that only +# encryption modules with no 3rd-party dependencies will currently +# work here: AES256-OFB, AES128-OFB, and AES128-CCM. Not +# coincidentally, those 3 modules are included in the sqlite3-see.c +# bundle. +# +# A custom sqlite3.c must not have any spaces in its name. +# $(sqlite3.canonical.c) must point to the sqlite3.c in +# the sqlite3 canonical source tree, as that source file +# is required for certain utility and test code. +sqlite3.canonical.c := $(firstword $(wildcard $(dir.src.c)/sqlite3.c) $(dir.top)/sqlite3.c) +sqlite3.canonical.h := $(firstword $(wildcard $(dir.src.c)/sqlite3.h) $(dir.top)/sqlite3.h) +sqlite3.c := $(sqlite3.canonical.c) +sqlite3.h := $(sqlite3.canonical.h) +#ifeq (,$(shell grep sqlite3_activate_see $(sqlite3.c) 2>/dev/null)) +# SQLITE_C_IS_SEE := 0 +#else +# SQLITE_C_IS_SEE := 1 +# $(info This is an SEE build.) +#endif + +.NOTPARALLEL: $(sqlite3.h) +$(sqlite3.h): + $(MAKE) -C $(dir.top) sqlite3.c +$(sqlite3.c): $(sqlite3.h) + +opt.threadsafe ?= 1 +opt.fatal-oom ?= 1 +opt.debug ?= 1 +opt.metrics ?= 1 +SQLITE_OPT = \ + -DSQLITE_THREADSAFE=$(opt.threadsafe) \ + -DSQLITE_TEMP_STORE=2 \ + -DSQLITE_USE_URI=1 \ + -DSQLITE_OMIT_LOAD_EXTENSION \ + -DSQLITE_OMIT_DEPRECATED \ + -DSQLITE_OMIT_SHARED_CACHE \ + -DSQLITE_C=$(sqlite3.c) \ + -DSQLITE_JNI_FATAL_OOM=$(opt.fatal-oom) \ + -DSQLITE_JNI_ENABLE_METRICS=$(opt.metrics) + +opt.extras ?= 1 +ifeq (1,$(opt.extras)) +SQLITE_OPT += -DSQLITE_ENABLE_RTREE \ + -DSQLITE_ENABLE_EXPLAIN_COMMENTS \ + -DSQLITE_ENABLE_STMTVTAB \ + -DSQLITE_ENABLE_DBPAGE_VTAB \ + -DSQLITE_ENABLE_DBSTAT_VTAB \ + -DSQLITE_ENABLE_BYTECODE_VTAB \ + -DSQLITE_ENABLE_OFFSET_SQL_FUNC \ + -DSQLITE_ENABLE_PREUPDATE_HOOK \ + -DSQLITE_ENABLE_NORMALIZE \ + -DSQLITE_ENABLE_SQLLOG \ + -DSQLITE_ENABLE_COLUMN_METADATA +endif + +ifeq (1,$(opt.debug)) + SQLITE_OPT += -DSQLITE_DEBUG -g -DDEBUG -UNDEBUG +else + SQLITE_OPT += -Os +endif + +ifeq (1,$(enable.fts5)) + SQLITE_OPT += -DSQLITE_ENABLE_FTS5 +endif + +sqlite3-jni.c := $(dir.src.c)/sqlite3-jni.c +sqlite3-jni.o := $(dir.bld.c)/sqlite3-jni.o +sqlite3-jni.h := $(dir.src.c)/sqlite3-jni.h +package.dll := $(dir.bld.c)/libsqlite3-jni.so +# All javac-generated .h files must be listed in $(sqlite3-jni.h.in): +sqlite3-jni.h.in := +# $(java.with.jni) lists all Java files which contain JNI decls: +java.with.jni := +define ADD_JNI_H +sqlite3-jni.h.in += $$(dir.bld.c)/org_sqlite_jni$(3)_$(2).h +java.with.jni += $(1)/$(2).java +$$(dir.bld.c)/org_sqlite_jni$(3)_$(2).h: $(1)/$(2).java +endef +# Invoke ADD_JNI_H once for each Java file which includes JNI +# declarations: +$(eval $(call ADD_JNI_H,$(dir.src.capi),CApi,_capi)) +$(eval $(call ADD_JNI_H,$(dir.src.capi),SQLTester,_capi)) +ifeq (1,$(enable.fts5)) + $(eval $(call ADD_JNI_H,$(dir.src.fts5),Fts5ExtensionApi,_fts5)) + $(eval $(call ADD_JNI_H,$(dir.src.fts5),fts5_api,_fts5)) + $(eval $(call ADD_JNI_H,$(dir.src.fts5),fts5_tokenizer,_fts5)) +endif +$(sqlite3-jni.h.in): $(dir.bld.c) + +#package.dll.cfiles := +package.dll.cflags = \ + -std=c99 \ + -fPIC \ + -I. \ + -I$(dir $(sqlite3.h)) \ + -I$(dir.src.c) \ + -I$(JDK_HOME)/include \ + $(patsubst %,-I%,$(patsubst %.h,,$(wildcard $(JDK_HOME)/include/*))) \ + -Wall +# The gross $(patsubst...) above is to include the platform-specific +# subdir which lives under $(JDK_HOME)/include and is a required +# include path for client-level code. +# +# Using (-Wall -Wextra) triggers an untennable number of +# gcc warnings from sqlite3.c for mundane things like +# unused parameters. +######################################################################## +ifeq (1,$(enable.tester)) + package.dll.cflags += -DSQLITE_JNI_ENABLE_SQLTester +endif + +$(sqlite3-jni.h): $(sqlite3-jni.h.in) $(MAKEFILE) + @cat $(sqlite3-jni.h.in) > $@.tmp + @if cmp $@ $@.tmp >/dev/null; then \ + rm -f $@.tmp; \ + echo "$@ not modified"; \ + else \ + mv $@.tmp $@; \ + echo "Updated $@"; \ + fi + @if [ x1 != x$(enable.fts5) ]; then \ + echo "*** REMINDER:"; \ + echo "*** enable.fts5=0, so please do not check in changes to $@."; \ + fi + +$(package.dll): $(sqlite3-jni.h) $(sqlite3.c) $(sqlite3.h) +$(package.dll): $(sqlite3-jni.c) $(MAKEFILE) + $(CC) $(package.dll.cflags) $(SQLITE_OPT) \ + $(sqlite3-jni.c) -shared -o $@ +all: $(package.dll) + +.PHONY: test test-one +Tester1.flags ?= +Tester2.flags ?= +test.flags.jvm = -ea -Djava.library.path=$(dir.bld.c) \ + $(java.flags) -cp $(classpath) +test.deps := $(CLASS_FILES) $(package.dll) +test-one: $(test.deps) + $(bin.java) $(test.flags.jvm) org.sqlite.jni.capi.Tester1 $(Tester1.flags) + $(bin.java) $(test.flags.jvm) org.sqlite.jni.wrapper1.Tester2 $(Tester2.flags) +test-sqllog: $(test.deps) + @echo "Testing with -sqllog..." + $(bin.java) $(test.flags.jvm) org.sqlite.jni.capi.Tester1 $(Tester1.flags) -sqllog +test-mt: $(test.deps) + @echo "Testing in multi-threaded mode:"; + $(bin.java) $(test.flags.jvm) org.sqlite.jni.capi.Tester1 \ + -t 7 -r 50 -shuffle $(Tester1.flags) + $(bin.java) $(test.flags.jvm) org.sqlite.jni.wrapper1.Tester2 \ + -t 7 -r 50 -shuffle $(Tester2.flags) + +test: test-one test-mt +tests: test test-sqllog + +tester.scripts := $(sort $(wildcard $(dir.src)/tests/*.test)) +tester.flags ?= # --verbose +.PHONY: tester tester-local tester-ext +ifeq (1,$(enable.tester)) +tester-local: $(CLASS_FILES.tester) $(package.dll) + $(bin.java) -ea -Djava.library.path=$(dir.bld.c) \ + $(java.flags) -cp $(classpath) \ + org.sqlite.jni.capi.SQLTester $(tester.flags) $(tester.scripts) +tester: tester-local +else +tester: + @echo "SQLTester support is disabled." +endif + +tester.extdir.default := $(dir.tests)/ext +tester.extdir ?= $(tester.extdir.default) +tester.extern-scripts := $(wildcard $(tester.extdir)/*.test) +ifneq (,$(tester.extern-scripts)) +tester-ext: + $(bin.java) -ea -Djava.library.path=$(dir.bld.c) \ + $(java.flags) -cp $(classpath) \ + org.sqlite.jni.capi.SQLTester $(tester.flags) $(tester.extern-scripts) +else +tester-ext: + @echo "******************************************************"; \ + echo "*** Include the out-of-tree test suite in the 'tester'"; \ + echo "*** target by either symlinking its directory to"; \ + echo "*** $(tester.extdir.default) or passing it to make"; \ + echo "*** as tester.extdir=/path/to/that/dir."; \ + echo "******************************************************"; +endif + +tester-ext: tester-local +tester: tester-ext +tests: tester +######################################################################## +# Build each SQLITE_THREADMODE variant and run all tests against them. +multitest: clean +define MULTIOPT +multitest: multitest-$(1) +multitest-$(1): + $$(MAKE) opt.debug=$$(opt.debug) $(patsubst %,opt.%,$(2)) \ + tests clean enable.fts5=1 +endef + +$(eval $(call MULTIOPT,01,threadsafe=0 oom=1)) +$(eval $(call MULTIOPT,00,threadsafe=0 oom=0)) +$(eval $(call MULTIOPT,11,threadsafe=1 oom=1)) +$(eval $(call MULTIOPT,10,threadsafe=1 oom=0)) +$(eval $(call MULTIOPT,21,threadsafe=2 oom=1)) +$(eval $(call MULTIOPT,20,threadsafe=2 oom=0)) + + +######################################################################## +# jar bundle... +package.jar.in := $(abspath $(dir.src)/jar.in) +CLEAN_FILES += $(package.jar.in) +JAVA_FILES.jar := $(JAVA_FILES.main) $(JAVA_FILES.unittest) $(JAVA_FILES.package.info) +CLASS_FILES.jar := $(filter-out %/package-info.class,$(JAVA_FILES.jar:.java=.class)) +$(package.jar.in): $(package.dll) $(MAKEFILE) + ls -1 \ + $(dir.src.jni)/*/*.java $(dir.src.jni)/*/*.class \ + | sed -e 's,^$(dir.src)/,,' | sort > $@ + +$(package.jar): $(CLASS_FILES.jar) $(MAKEFILE) $(package.jar.in) + @rm -f $(dir.src)/c/*~ $(dir.src.jni)/*~ + cd $(dir.src); $(bin.jar) -cfe ../$@ org.sqlite.jni.capi.Tester1 @$(package.jar.in) + @ls -la $@ + @echo "To use this jar you will need the -Djava.library.path=DIR/CONTAINING/libsqlite3-jni.so flag." + @echo "e.g. java -Djava.library.path=bld -jar $@" + +jar: $(package.jar) +run-jar: $(package.jar) $(package.dll) + $(bin.java) -Djava.library.path=$(dir.bld) -jar $(package.jar) $(run-jar.flags) + +######################################################################## +# javadoc... +dir.doc := $(dir.jni)/javadoc +doc.index := $(dir.doc)/index.html +javadoc.exclude := -exclude org.sqlite.jni.fts5 +# ^^^^ 2023-09-13: elide the fts5 parts from the public docs for +# the time being, as it's not clear where the Java bindings for +# those bits are going. +# javadoc.exclude += -exclude org.sqlite.jni.capi +# ^^^^ exclude the capi API only for certain builds (TBD) +$(doc.index): $(JAVA_FILES.main) $(MAKEFILE) + @if [ -d $(dir.doc) ]; then rm -fr $(dir.doc)/*; fi + $(bin.javadoc) -cp $(classpath) -d $(dir.doc) -quiet \ + -subpackages org.sqlite.jni $(javadoc.exclude) + @echo "javadoc output is in $@" + +.PHONY: doc javadoc docserve +.FORCE: doc +doc: $(doc.index) +javadoc: $(doc.index) +# Force rebuild of docs +redoc: + @rm -f $(doc.index) + @$(MAKE) doc +docserve: $(doc.index) + cd $(dir.doc) && althttpd -max-age 1 -page index.html +######################################################################## +# Clean up... +CLEAN_FILES += $(dir.bld.c)/* \ + $(dir.src.jni)/*.class \ + $(dir.src.jni)/*/*.class \ + $(package.dll) \ + hs_err_pid*.log + +.PHONY: clean distclean +clean: + -rm -f $(CLEAN_FILES) +distclean: clean + -rm -f $(DISTCLEAN_FILES) + -rm -fr $(dir.bld.c) $(dir.doc) + +######################################################################## +# distribution bundle rules... + +ifeq (,$(filter snapshot,$(MAKECMDGOALS))) +dist-name-prefix := sqlite-jni +else +dist-name-prefix := sqlite-jni-snapshot-$(shell /usr/bin/date +%Y%m%d) +endif +dist-name := $(dist-name-prefix)-TEMP + + +dist-dir.top := $(dist-name) +dist-dir.src := $(dist-dir.top)/src +dist.top.extras := \ + README.md + +.PHONY: dist snapshot + +dist: \ + $(bin.version-info) $(sqlite3.canonical.c) \ + $(package.jar) $(MAKEFILE) + @echo "Making end-user deliverables..." + @echo "****************************************************************************"; \ + echo "*** WARNING: be sure to build this with JDK8 (javac 1.8) for compatibility."; \ + echo "*** reasons!"; $$($(bin.javac) -version); \ + echo "****************************************************************************" + @rm -fr $(dist-dir.top) + @mkdir -p $(dist-dir.src) + @cp -p $(dist.top.extras) $(dist-dir.top)/. + @cp -p jar-dist.make $(dist-dir.top)/Makefile + @cp -p $(dir.src.c)/*.[ch] $(dist-dir.src)/. + @cp -p $(sqlite3.canonical.c) $(sqlite3.canonical.h) $(dist-dir.src)/. + @set -e; \ + vnum=$$($(bin.version-info) --download-version); \ + vjar=$$($(bin.version-info) --version); \ + vdir=$(dist-name-prefix)-$$vnum; \ + arczip=$$vdir.zip; \ + cp -p $(package.jar) $(dist-dir.top)/sqlite3-jni-$${vjar}.jar; \ + echo "Making $$arczip ..."; \ + rm -fr $$arczip $$vdir; \ + mv $(dist-dir.top) $$vdir; \ + zip -qr $$arczip $$vdir; \ + rm -fr $$vdir; \ + ls -la $$arczip; \ + set +e; \ + unzip -lv $$arczip || echo "Missing unzip app? Not fatal." + +snapshot: dist + +.PHONY: dist-clean +clean: dist-clean +dist-clean: + rm -fr $(dist-name) $(wildcard sqlite-jni-*.zip) diff --git a/ext/jni/README.md b/ext/jni/README.md new file mode 100644 index 0000000000..5ad79fce9e --- /dev/null +++ b/ext/jni/README.md @@ -0,0 +1,316 @@ +SQLite3 via JNI +======================================================================== + +This directory houses a Java Native Interface (JNI) binding for the +sqlite3 API. If you are reading this from the distribution ZIP file, +links to resources in the canonical source tree will note work. The +canonical copy of this file can be browsed at: + + + +Technical support is available in the forum: + + + + +> **FOREWARNING:** this subproject is very much in development and + subject to any number of changes. Please do not rely on any + information about its API until this disclaimer is removed. The JNI + bindings released with version 3.43 are a "tech preview." Once + finalized, strong backward compatibility guarantees will apply. + +Project goals/requirements: + +- A [1-to-1(-ish) mapping of the C API](#1to1ish) to Java via JNI, + insofar as cross-language semantics allow for. A closely-related + goal is that [the C documentation](https://sqlite.org/c3ref/intro.html) + should be usable as-is, insofar as possible, for the JNI binding. + +- Support Java as far back as version 8 (2014). + +- Environment-independent. Should work everywhere both Java + and SQLite3 do. + +- No 3rd-party dependencies beyond the JDK. That includes no + build-level dependencies for specific IDEs and toolchains. We + welcome the addition of build files for arbitrary environments + insofar as they neither interfere with each other nor become + a maintenance burden for the sqlite developers. + +Non-goals: + +- Creation of high-level OO wrapper APIs. Clients are free to create + them off of the C-style API. + +- Virtual tables are unlikely to be supported due to the amount of + glue code needed to fit them into Java. + +- Support for mixed-mode operation, where client code accesses SQLite + both via the Java-side API and the C API via their own native + code. Such cases would be a minefield of potential mis-interactions + between this project's JNI bindings and mixed-mode client code. + + +Hello World +----------------------------------------------------------------------- + +```java +import org.sqlite.jni.*; +import static org.sqlite.jni.CApi.*; + +... + +final sqlite3 db = sqlite3_open(":memory:"); +try { + final int rc = sqlite3_errcode(db); + if( 0 != rc ){ + if( null != db ){ + System.out.print("Error opening db: "+sqlite3_errmsg(db)); + }else{ + System.out.print("Error opening db: rc="+rc); + } + ... handle error ... + } + // ... else use the db ... +}finally{ + // ALWAYS close databases using sqlite3_close() or sqlite3_close_v2() + // when done with them. All of their active statement handles must + // first have been passed to sqlite3_finalize(). + sqlite3_close_v2(db); +} +``` + + +Building +======================================================================== + +The canonical builds assumes a Linux-like environment and requires: + +- GNU Make +- A JDK supporting Java 8 or higher +- A modern C compiler. gcc and clang should both work. + +Put simply: + +```console +$ export JAVA_HOME=/path/to/jdk/root +$ make +$ make test +$ make clean +``` + +The jar distribution can be created with `make jar`, but note that it +does not contain the binary DLL file. A different DLL is needed for +each target platform. + + + +One-to-One(-ish) Mapping to C +======================================================================== + +This JNI binding aims to provide as close to a 1-to-1 experience with +the C API as cross-language semantics allow. Interface changes are +necessarily made where cross-language semantics do not allow a 1-to-1, +and judiciously made where a 1-to-1 mapping would be unduly cumbersome +to use in Java. In all cases, this binding makes every effort to +provide semantics compatible with the C API documentation even if the +interface to those semantics is slightly different. Any cases which +deviate from those semantics (either removing or adding semantics) are +clearly documented. + +Where it makes sense to do so for usability, Java-side overloads are +provided which accept or return data in alternative forms or provide +sensible default argument values. In all such cases they are thin +proxies around the corresponding C APIs and do not introduce new +semantics. + +In a few cases, Java-specific capabilities have been added in +new APIs, all of which have "_java" somewhere in their names. +Examples include: + +- `sqlite3_result_java_object()` +- `sqlite3_column_java_object()` +- `sqlite3_value_java_object()` + +which, as one might surmise, collectively enable the passing of +arbitrary Java objects from user-defined SQL functions through to the +caller. + + +Golden Rule: Garbage Collection Cannot Free SQLite Resources +------------------------------------------------------------------------ + +It is important that all databases and prepared statement handles get +cleaned up by client code. A database cannot be closed if it has open +statement handles. `sqlite3_close()` fails if the db cannot be closed +whereas `sqlite3_close_v2()` recognizes that case and marks the db as +a "zombie," pending finalization when the library detects that all +pending statements have been closed. Be aware that Java garbage +collection _cannot_ close a database or finalize a prepared statement. +Those things require explicit API calls. + +Classes for which it is sensible support Java's `AutoCloseable` +interface so can be used with try-with-resources constructs. + + +Golden Rule #2: _Never_ Throw from Callbacks (Unless...) +------------------------------------------------------------------------ + +All routines in this API, barring explicitly documented exceptions, +retain C-like semantics. For example, they are not permitted to throw +or propagate exceptions and must return error information (if any) via +result codes or `null`. The only cases where the C-style APIs may +throw is through client-side misuse, e.g. passing in a null where it +may cause a `NullPointerException`. The APIs clearly mark function +parameters which should not be null, but does not generally actively +defend itself against such misuse. Some C-style APIs explicitly accept +`null` as a no-op for usability's sake, and some of the JNI APIs +deliberately return an error code, instead of segfaulting, when passed +a `null`. + +Client-defined callbacks _must never throw exceptions_ unless _very +explicitly documented_ as being throw-safe. Exceptions are generally +reserved for higher-level bindings which are constructed to +specifically deal with them and ensure that they do not leak C-level +resources. In some cases, callback handlers are permitted to throw, in +which cases they get translated to C-level result codes and/or +messages. If a callback which is not permitted to throw throws, its +exception may trigger debug output but will otherwise be suppressed. + +The reason some callbacks are permitted to throw and others not is +because all such callbacks act as proxies for C function callback +interfaces and some of those interfaces have no error-reporting +mechanism. Those which are capable of propagating errors back through +the library convert exceptions from callbacks into corresponding +C-level error information. Those which cannot propagate errors +necessarily suppress any exceptions in order to maintain the C-style +semantics of the APIs. + + +Unwieldy Constructs are Re-mapped +------------------------------------------------------------------------ + +Some constructs, when modelled 1-to-1 from C to Java, are unduly +clumsy to work with in Java because they try to shoehorn C's way of +doing certain things into Java's wildly different ways. The following +subsections cover those, starting with a verbose explanation and +demonstration of where such changes are "really necessary"... + +### Custom Collations + +A prime example of where interface changes for Java are necessary for +usability is [registration of a custom +collation](https://sqlite.org/c3ref/create_collation.html): + +```c +// C: +int sqlite3_create_collation(sqlite3 * db, const char * name, int eTextRep, + void *pUserData, + int (*xCompare)(void*,int,void const *,int,void const *)); + +int sqlite3_create_collation_v2(sqlite3 * db, const char * name, int eTextRep, + void *pUserData, + int (*xCompare)(void*,int,void const *,int,void const *), + void (*xDestroy)(void*)); +``` + +The `pUserData` object is optional client-defined state for the +`xCompare()` and/or `xDestroy()` callback functions, both of which are +passed that object as their first argument. That data is passed around +"externally" in C because that's how C models the world. If we were to +bind that part as-is to Java, the result would be awkward to use (^Yes, +we tried this.): + +```java +// Java: +int sqlite3_create_collation(sqlite3 db, String name, int eTextRep, + Object pUserData, xCompareType xCompare); + +int sqlite3_create_collation_v2(sqlite3 db, String name, int eTextRep, + Object pUserData, + xCompareType xCompare, xDestroyType xDestroy); +``` + +The awkwardness comes from (A) having two distinctly different objects +for callbacks and (B) having their internal state provided separately, +which is ill-fitting in Java. For the sake of usability, C APIs which +follow that pattern use a slightly different Java interface: + +```java +int sqlite3_create_collation(sqlite3 db, String name, int eTextRep, + SomeCallbackType collation); +``` + +Where the `Collation` class has an abstract `call()` method and +no-op `xDestroy()` method which can be overridden if needed, leading to +a much more Java-esque usage: + +```java +int rc = sqlite3_create_collation(db, "mycollation", SQLITE_UTF8, new SomeCallbackType(){ + + // Required comparison function: + @Override public int call(byte[] lhs, byte[] rhs){ ... } + + // Optional finalizer function: + @Override public void xDestroy(){ ... } + + // Optional local state: + private String localState1 = + "This is local state. There are many like it, but this one is mine."; + private MyStateType localState2 = new MyStateType(); + ... +}); +``` + +Noting that: + +- It is possible to bind in call-scope-local state via closures, if + desired, as opposed to packing it into the Collation object. + +- No capabilities of the C API are lost or unduly obscured via the + above API reshaping, so power users need not make any compromises. + +- In the specific example above, `sqlite3_create_collation_v2()` + becomes superfluous because the provided interface effectively + provides both the v1 and v2 interfaces, the difference being that + overriding the `xDestroy()` method effectively gives it v2 + semantics. + + +### User-defined SQL Functions (a.k.a. UDFs) + +The [`sqlite3_create_function()`](https://sqlite.org/c3ref/create_function.html) +family of APIs make heavy use of function pointers to provide +client-defined callbacks, necessitating interface changes in the JNI +binding. The Java API has only one core function-registration function: + +```java +int sqlite3_create_function(sqlite3 db, String funcName, int nArgs, + int encoding, SQLFunction func); +``` + +> Design question: does the encoding argument serve any purpose in + Java? That's as-yet undetermined. If not, it will be removed. + +`SQLFunction` is not used directly, but is instead instantiated via +one of its three subclasses: + +- `ScalarFunction` implements simple scalar functions using but a + single callback. +- `AggregateFunction` implements aggregate functions using two + callbacks. +- `WindowFunction` implements window functions using four + callbacks. + +Search [`Tester1.java`](/file/ext/jni/src/org/sqlite/jni/capi/Tester1.java) for +`SQLFunction` for how it's used. + +Reminder: see the disclaimer at the top of this document regarding the +in-flux nature of this API. + +### And so on... + +Various APIs which accept callbacks, e.g. `sqlite3_trace_v2()` and +`sqlite3_update_hook()`, use interfaces similar to those shown above. +Despite the changes in signature, the JNI layer makes every effort to +provide the same semantics as the C API documentation suggests. diff --git a/ext/jni/jar-dist.make b/ext/jni/jar-dist.make new file mode 100644 index 0000000000..7596c99f3f --- /dev/null +++ b/ext/jni/jar-dist.make @@ -0,0 +1,60 @@ +#!/this/is/make +#^^^^ help emacs out +# +# This is a POSIX-make-compatible makefile for building the sqlite3 +# JNI library from "dist" zip file. It must be edited to set the +# proper top-level JDK directory and, depending on the platform, add a +# platform-specific -I directory. It should build as-is with any +# 2020s-era version of gcc or clang. It requires JDK version 8 or +# higher and that JAVA_HOME points to the top-most installation +# directory of that JDK. On Ubuntu-style systems the JDK is typically +# installed under /usr/lib/jvm/java-VERSION-PLATFORM. + +default: all + +JAVA_HOME = /usr/lib/jvm/java-1.8.0-openjdk-amd64 +CFLAGS = \ + -fPIC \ + -Isrc \ + -I$(JAVA_HOME)/include \ + -I$(JAVA_HOME)/include/linux \ + -I$(JAVA_HOME)/include/apple \ + -I$(JAVA_HOME)/include/bsd \ + -Wall + +SQLITE_OPT = \ + -DSQLITE_ENABLE_RTREE \ + -DSQLITE_ENABLE_EXPLAIN_COMMENTS \ + -DSQLITE_ENABLE_STMTVTAB \ + -DSQLITE_ENABLE_DBPAGE_VTAB \ + -DSQLITE_ENABLE_DBSTAT_VTAB \ + -DSQLITE_ENABLE_BYTECODE_VTAB \ + -DSQLITE_ENABLE_OFFSET_SQL_FUNC \ + -DSQLITE_OMIT_LOAD_EXTENSION \ + -DSQLITE_OMIT_DEPRECATED \ + -DSQLITE_OMIT_SHARED_CACHE \ + -DSQLITE_THREADSAFE=1 \ + -DSQLITE_TEMP_STORE=2 \ + -DSQLITE_USE_URI=1 \ + -DSQLITE_ENABLE_FTS5 \ + -DSQLITE_DEBUG + +sqlite3-jni.dll = libsqlite3-jni.so +$(sqlite3-jni.dll): + @echo "************************************************************************"; \ + echo "*** If this fails to build, be sure to edit this makefile ***"; \ + echo "*** to configure it for your system. ***"; \ + echo "************************************************************************" + $(CC) $(CFLAGS) $(SQLITE_OPT) \ + src/sqlite3-jni.c -shared -o $@ + @echo "Now try running it with: make test" + +test.flags = -Djava.library.path=. sqlite3-jni-*.jar +test: $(sqlite3-jni.dll) + java -jar $(test.flags) + java -jar $(test.flags) -t 7 -r 10 -shuffle + +clean: + -rm -f $(sqlite3-jni.dll) + +all: $(sqlite3-jni.dll) diff --git a/ext/jni/src/c/sqlite3-jni.c b/ext/jni/src/c/sqlite3-jni.c new file mode 100644 index 0000000000..f130eff042 --- /dev/null +++ b/ext/jni/src/c/sqlite3-jni.c @@ -0,0 +1,6360 @@ +/* +** 2023-07-21 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file implements the JNI bindings declared in +** org.sqlite.jni.capi.CApi (from which sqlite3-jni.h is generated). +*/ + +/* +** If you found this comment by searching the code for +** CallStaticObjectMethod because it appears in console output then +** you're probably the victim of an OpenJDK bug: +** +** https://bugs.openjdk.org/browse/JDK-8130659 +** +** It's known to happen with OpenJDK v8 but not with v19. It was +** triggered by this code long before it made any use of +** CallStaticObjectMethod(). +*/ + +/* +** Define any SQLITE_... config defaults we want if they aren't +** overridden by the builder. Please keep these alphabetized. +*/ + +/**********************************************************************/ +/* SQLITE_D... */ +#ifndef SQLITE_DEFAULT_CACHE_SIZE +# define SQLITE_DEFAULT_CACHE_SIZE -16384 +#endif +#if !defined(SQLITE_DEFAULT_PAGE_SIZE) +# define SQLITE_DEFAULT_PAGE_SIZE 8192 +#endif +#ifndef SQLITE_DQS +# define SQLITE_DQS 0 +#endif + +/**********************************************************************/ +/* SQLITE_ENABLE_... */ +/* +** Unconditionally enable API_ARMOR in the JNI build. It ensures that +** public APIs behave predictable in the face of passing illegal NULLs +** or ranges which might otherwise invoke undefined behavior. +*/ +#undef SQLITE_ENABLE_API_ARMOR +#define SQLITE_ENABLE_API_ARMOR 1 + +#ifndef SQLITE_ENABLE_BYTECODE_VTAB +# define SQLITE_ENABLE_BYTECODE_VTAB 1 +#endif +#ifndef SQLITE_ENABLE_DBPAGE_VTAB +# define SQLITE_ENABLE_DBPAGE_VTAB 1 +#endif +#ifndef SQLITE_ENABLE_DBSTAT_VTAB +# define SQLITE_ENABLE_DBSTAT_VTAB 1 +#endif +#ifndef SQLITE_ENABLE_EXPLAIN_COMMENTS +# define SQLITE_ENABLE_EXPLAIN_COMMENTS 1 +#endif +#ifndef SQLITE_ENABLE_MATH_FUNCTIONS +# define SQLITE_ENABLE_MATH_FUNCTIONS 1 +#endif +#ifndef SQLITE_ENABLE_OFFSET_SQL_FUNC +# define SQLITE_ENABLE_OFFSET_SQL_FUNC 1 +#endif +#ifndef SQLITE_ENABLE_RTREE +# define SQLITE_ENABLE_RTREE 1 +#endif +//#ifndef SQLITE_ENABLE_SESSION +//# define SQLITE_ENABLE_SESSION 1 +//#endif +#ifndef SQLITE_ENABLE_STMTVTAB +# define SQLITE_ENABLE_STMTVTAB 1 +#endif +//#ifndef SQLITE_ENABLE_UNKNOWN_SQL_FUNCTION +//# define SQLITE_ENABLE_UNKNOWN_SQL_FUNCTION +//#endif + +/**********************************************************************/ +/* SQLITE_J... */ +#ifdef SQLITE_JNI_FATAL_OOM +#if !SQLITE_JNI_FATAL_OOM +#undef SQLITE_JNI_FATAL_OOM +#endif +#endif + +/**********************************************************************/ +/* SQLITE_O... */ +#ifndef SQLITE_OMIT_DEPRECATED +# define SQLITE_OMIT_DEPRECATED 1 +#endif +#ifndef SQLITE_OMIT_LOAD_EXTENSION +# define SQLITE_OMIT_LOAD_EXTENSION 1 +#endif +#ifndef SQLITE_OMIT_SHARED_CACHE +# define SQLITE_OMIT_SHARED_CACHE 1 +#endif +#ifdef SQLITE_OMIT_UTF16 +/* UTF16 is required for java */ +# undef SQLITE_OMIT_UTF16 1 +#endif + +/**********************************************************************/ +/* SQLITE_S... */ +#ifndef SQLITE_STRICT_SUBTYPE +# define SQLITE_STRICT_SUBTYPE 1 +#endif + +/**********************************************************************/ +/* SQLITE_T... */ +#ifndef SQLITE_TEMP_STORE +# define SQLITE_TEMP_STORE 2 +#endif +#ifndef SQLITE_THREADSAFE +# define SQLITE_THREADSAFE 1 +#endif + +/**********************************************************************/ +/* SQLITE_USE_... */ +#ifndef SQLITE_USE_URI +# define SQLITE_USE_URI 1 +#endif + + +/* +** Which sqlite3.c we're using needs to be configurable to enable +** building against a custom copy, e.g. the SEE variant. We have to +** include sqlite3.c, as opposed to sqlite3.h, in order to get access +** to some internal details like SQLITE_MAX_... and friends. This +** increases the rebuild time considerably but we need this in order +** to access some internal functionality and keep the to-Java-exported +** values of SQLITE_MAX_... and SQLITE_LIMIT_... in sync with the C +** build. +*/ +#ifndef SQLITE_C +# define SQLITE_C sqlite3.c +#endif +#define INC__STRINGIFY_(f) #f +#define INC__STRINGIFY(f) INC__STRINGIFY_(f) +#include INC__STRINGIFY(SQLITE_C) +#undef INC__STRINGIFY_ +#undef INC__STRINGIFY +#undef SQLITE_C + +/* +** End of the sqlite3 lib setup. What follows is JNI-specific. +*/ + +#include "sqlite3-jni.h" +#include +#include /* only for testing/debugging */ +#include /* intptr_t for 32-bit builds */ + +/* Only for debugging */ +#define MARKER(pfexp) \ + do{ printf("MARKER: %s:%d:%s():\t",__FILE__,__LINE__,__func__); \ + printf pfexp; \ + } while(0) + +/* +** Creates a verbose JNI function name. Suffix must be +** the JNI-mangled form of the function's name, minus the +** prefix seen in this macro. +** +** If you get java.lang.UnsatisfiedLinkError when calling newly-added +** native bindings, be sure that the mangled name is correct. It can +** be found in the generated sqlite3-jni.h. +*/ +#define JniFuncName(Suffix) \ + Java_org_sqlite_jni_capi_CApi_sqlite3_ ## Suffix + +/* Prologue for JNI function declarations and definitions. */ +#define JniDecl(ReturnType,Suffix) \ + JNIEXPORT ReturnType JNICALL JniFuncName(Suffix) + +/* +** S3JniApi's intent is that CFunc be the name(s) of the C API func(s) +** the being-declared JNI function is wrapping, making it easier to +** find those bindings' JNI-side entry points. The other args are for +** JniDecl. See the many examples in this file. +*/ +#define S3JniApi(CFunc,ReturnType,Suffix) JniDecl(ReturnType,Suffix) + +/* +** S3JniCast_L2P and P2L cast jlong (64-bit) to/from pointers. This is +** required for casting warning-free on 32-bit builds, where we +** otherwise get complaints that we're casting between different-sized +** int types. +** +** This use of intptr_t is the _only_ reason we require +** which, in turn, requires building with -std=c99 (or later). +** +** See also: the notes for LongPtrGet_T. +*/ +#define S3JniCast_L2P(JLongAsPtr) (void*)((intptr_t)(JLongAsPtr)) +#define S3JniCast_P2L(PTR) (jlong)((intptr_t)(PTR)) + +/* +** Shortcuts for the first 2 parameters to all JNI bindings. +** +** The type of the jSelf arg differs, but no docs seem to mention +** this: for static methods it's of type jclass and for non-static +** it's jobject. jobject actually works for all funcs, in the sense +** that it compiles and runs so long as we don't use jSelf (which is +** only rarely needed in this code), but to be pedantically correct we +** need the proper type in the signature. +** +** https://docs.oracle.com/javase/8/docs/technotes/guides/jni/spec/design.html#jni_interface_functions_and_pointers +*/ +#define JniArgsEnvObj JNIEnv * env, jobject jSelf +#define JniArgsEnvClass JNIEnv * env, jclass jKlazz +/* +** Helpers to account for -Xcheck:jni warnings about not having +** checked for exceptions. +*/ +#define S3JniIfThrew if( (*env)->ExceptionCheck(env) ) +#define S3JniExceptionClear (*env)->ExceptionClear(env) +#define S3JniExceptionReport (*env)->ExceptionDescribe(env) +#define S3JniExceptionIgnore S3JniIfThrew S3JniExceptionClear +#define S3JniExceptionWarnIgnore \ + S3JniIfThrew {S3JniExceptionReport; S3JniExceptionClear;}(void)0 +#define S3JniExceptionWarnCallbackThrew(STR) \ + MARKER(("WARNING: " STR " MUST NOT THROW.\n")); \ + (*env)->ExceptionDescribe(env) + +/** To be used for cases where we're _really_ not expecting an + exception, e.g. looking up well-defined Java class members. */ +#define S3JniExceptionIsFatal(MSG) S3JniIfThrew {\ + S3JniExceptionReport; S3JniExceptionClear; \ + (*env)->FatalError(env, MSG); \ + } + +/* +** Declares local var env = s3jni_env(). All JNI calls involve a +** JNIEnv somewhere, always named env, and many of our macros assume +** env is in scope. Where it's not, but should be, use this to make it +** so. +*/ +#define S3JniDeclLocal_env JNIEnv * const env = s3jni_env() + +/* Fail fatally with an OOM message. */ +static inline void s3jni_oom(JNIEnv * const env){ + (*env)->FatalError(env, "SQLite3 JNI is out of memory.") /* does not return */; +} + +/* +** sqlite3_malloc() proxy which fails fatally on OOM. This should +** only be used for routines which manage global state and have no +** recovery strategy for OOM. For sqlite3 API which can reasonably +** return SQLITE_NOMEM, s3jni_malloc() should be used instead. +*/ +static void * s3jni_malloc_or_die(JNIEnv * const env, size_t n){ + void * const rv = sqlite3_malloc(n); + if( n && !rv ) s3jni_oom(env); + return rv; +} + +/* +** Works like sqlite3_malloc() unless built with SQLITE_JNI_FATAL_OOM, +** in which case it calls s3jni_oom() on OOM. +*/ +#ifdef SQLITE_JNI_FATAL_OOM +#define s3jni_malloc(SIZE) s3jni_malloc_or_die(env, SIZE) +#else +#define s3jni_malloc(SIZE) sqlite3_malloc(((void)env,(SIZE))) +/* the ((void)env) trickery here is to avoid ^^^^^^ an otherwise + unused arg in at least one place. */ +#endif + +/* +** Works like sqlite3_realloc() unless built with SQLITE_JNI_FATAL_OOM, +** in which case it calls s3jni_oom() on OOM. +*/ +#ifdef SQLITE_JNI_FATAL_OOM +static void * s3jni_realloc_or_die(JNIEnv * const env, void * p, size_t n){ + void * const rv = sqlite3_realloc(p, (int)n); + if( n && !rv ) s3jni_oom(env); + return rv; +} +#define s3jni_realloc(MEM,SIZE) s3jni_realloc_or_die(env, (MEM), (SIZE)) +#else +#define s3jni_realloc(MEM,SIZE) sqlite3_realloc((MEM), ((void)env, (SIZE))) +#endif + +/* Fail fatally if !EXPR. */ +#define s3jni_oom_fatal(EXPR) if( !(EXPR) ) s3jni_oom(env) +/* Maybe fail fatally if !EXPR. */ +#ifdef SQLITE_JNI_FATAL_OOM +#define s3jni_oom_check s3jni_oom_fatal +#else +#define s3jni_oom_check(EXPR) +#endif +//#define S3JniDb_oom(pDb,EXPR) ((EXPR) ? sqlite3OomFault(pDb) : 0) + +#define s3jni_db_oom(pDb) (void)((pDb) ? ((pDb)->mallocFailed=1) : 0) + +/* Helpers for Java value reference management. */ +static jobject s3jni_ref_global(JNIEnv * const env, jobject const v){ + jobject const rv = v ? (*env)->NewGlobalRef(env, v) : NULL; + s3jni_oom_fatal( v ? !!rv : 1 ); + return rv; +} +static jobject s3jni_ref_local(JNIEnv * const env, jobject const v){ + jobject const rv = v ? (*env)->NewLocalRef(env, v) : NULL; + s3jni_oom_fatal( v ? !!rv : 1 ); + return rv; +} +static inline void s3jni_unref_global(JNIEnv * const env, jobject const v){ + if( v ) (*env)->DeleteGlobalRef(env, v); +} +static inline void s3jni_unref_local(JNIEnv * const env, jobject const v){ + if( v ) (*env)->DeleteLocalRef(env, v); +} +#define S3JniRefGlobal(VAR) s3jni_ref_global(env, (VAR)) +#define S3JniRefLocal(VAR) s3jni_ref_local(env, (VAR)) +#define S3JniUnrefGlobal(VAR) s3jni_unref_global(env, (VAR)) +#define S3JniUnrefLocal(VAR) s3jni_unref_local(env, (VAR)) + +/* +** Lookup key type for use with s3jni_nphop() and a cache of a +** frequently-needed Java-side class reference and one or two Java +** class member IDs. +*/ +typedef struct S3JniNphOp S3JniNphOp; +struct S3JniNphOp { + const int index /* index into S3JniGlobal.nph[] */; + const char * const zName /* Full Java name of the class */; + const char * const zMember /* Name of member property */; + const char * const zTypeSig /* JNI type signature of zMember */; + /* + ** klazz is a global ref to the class represented by pRef. + ** + ** According to: + ** + ** https://developer.ibm.com/articles/j-jni/ + ** + ** > ... the IDs returned for a given class don't change for the + ** lifetime of the JVM process. But the call to get the field or + ** method can require significant work in the JVM, because fields + ** and methods might have been inherited from superclasses, making + ** the JVM walk up the class hierarchy to find them. Because the + ** IDs are the same for a given class, you should look them up + ** once and then reuse them. Similarly, looking up class objects + ** can be expensive, so they should be cached as well. + */ + jclass klazz; + volatile jfieldID fidValue /* NativePointerHolder.nativePointer or + ** OutputPointer.T.value */; + volatile jmethodID midCtor /* klazz's no-arg constructor. Used by + ** NativePointerHolder_new(). */; +}; + +/* +** Cache keys for each concrete NativePointerHolder subclasses and +** OutputPointer.T types. The members are to be used with s3jni_nphop() +** and friends, and each one's member->index corresponds to its index +** in the S3JniGlobal.nph[] array. +*/ +static const struct { + const S3JniNphOp sqlite3; + const S3JniNphOp sqlite3_backup; + const S3JniNphOp sqlite3_blob; + const S3JniNphOp sqlite3_context; + const S3JniNphOp sqlite3_stmt; + const S3JniNphOp sqlite3_value; + const S3JniNphOp OutputPointer_Bool; + const S3JniNphOp OutputPointer_Int32; + const S3JniNphOp OutputPointer_Int64; + const S3JniNphOp OutputPointer_sqlite3; + const S3JniNphOp OutputPointer_sqlite3_blob; + const S3JniNphOp OutputPointer_sqlite3_stmt; + const S3JniNphOp OutputPointer_sqlite3_value; + const S3JniNphOp OutputPointer_String; +#ifdef SQLITE_ENABLE_FTS5 + const S3JniNphOp OutputPointer_ByteArray; + const S3JniNphOp Fts5Context; + const S3JniNphOp Fts5ExtensionApi; + const S3JniNphOp fts5_api; + const S3JniNphOp fts5_tokenizer; + const S3JniNphOp Fts5Tokenizer; +#endif +} S3JniNphOps = { +#define MkRef(INDEX, KLAZZ, MEMBER, SIG) \ + { INDEX, "org/sqlite/jni/" KLAZZ, MEMBER, SIG } +/* NativePointerHolder ref */ +#define RefN(INDEX, KLAZZ) MkRef(INDEX, KLAZZ, "nativePointer", "J") +/* OutputPointer.T ref */ +#define RefO(INDEX, KLAZZ, SIG) MkRef(INDEX, KLAZZ, "value", SIG) + RefN(0, "capi/sqlite3"), + RefN(1, "capi/sqlite3_backup"), + RefN(2, "capi/sqlite3_blob"), + RefN(3, "capi/sqlite3_context"), + RefN(4, "capi/sqlite3_stmt"), + RefN(5, "capi/sqlite3_value"), + RefO(6, "capi/OutputPointer$Bool", "Z"), + RefO(7, "capi/OutputPointer$Int32", "I"), + RefO(8, "capi/OutputPointer$Int64", "J"), + RefO(9, "capi/OutputPointer$sqlite3", + "Lorg/sqlite/jni/capi/sqlite3;"), + RefO(10, "capi/OutputPointer$sqlite3_blob", + "Lorg/sqlite/jni/capi/sqlite3_blob;"), + RefO(11, "capi/OutputPointer$sqlite3_stmt", + "Lorg/sqlite/jni/capi/sqlite3_stmt;"), + RefO(12, "capi/OutputPointer$sqlite3_value", + "Lorg/sqlite/jni/capi/sqlite3_value;"), + RefO(13, "capi/OutputPointer$String", "Ljava/lang/String;"), +#ifdef SQLITE_ENABLE_FTS5 + RefO(14, "capi/OutputPointer$ByteArray", "[B"), + RefN(15, "fts5/Fts5Context"), + RefN(16, "fts5/Fts5ExtensionApi"), + RefN(17, "fts5/fts5_api"), + RefN(18, "fts5/fts5_tokenizer"), + RefN(19, "fts5/Fts5Tokenizer") +#endif +#undef MkRef +#undef RefN +#undef RefO +}; + +#define S3JniNph(T) &S3JniNphOps.T + +enum { + /* + ** Size of the NativePointerHolder cache. Need enough space for + ** (only) the library's NativePointerHolder and OutputPointer types, + ** a fixed count known at build-time. This value needs to be + ** exactly the number of S3JniNphOp entries in the S3JniNphOps + ** object. + */ + S3Jni_NphCache_size = sizeof(S3JniNphOps) / sizeof(S3JniNphOp) +}; + +/* +** State for binding C callbacks to Java methods. +*/ +typedef struct S3JniHook S3JniHook; +struct S3JniHook{ + jobject jObj /* global ref to Java instance */; + jmethodID midCallback /* callback method. Signature depends on + ** jObj's type */; + /* We lookup the jObj.xDestroy() method as-needed for contexts which + ** support custom finalizers. Fundamentally we can support them for + ** any Java type, but we only want to expose support for them where + ** the C API does. */ + jobject jExtra /* Global ref to a per-hook-type value */; + int doXDestroy /* If true then S3JniHook_unref() will call + jObj->xDestroy() if it's available. */; + S3JniHook * pNext /* Next entry in S3Global.hooks.aFree */; +}; +/* For clean bitwise-copy init of local instances. */ +static const S3JniHook S3JniHook_empty = {0,0,0,0,0}; + +/* +** Per-(sqlite3*) state for various JNI bindings. This state is +** allocated as needed, cleaned up in sqlite3_close(_v2)(), and +** recycled when possible. +** +** Trivia: vars and parameters of this type are often named "ps" +** because this class used to have a name for which that abbreviation +** made sense. +*/ +typedef struct S3JniDb S3JniDb; +struct S3JniDb { + sqlite3 *pDb /* The associated db handle */; + jobject jDb /* A global ref of the output object which gets + returned from sqlite3_open(_v2)(). We need this in + order to have an object to pass to routines like + sqlite3_collation_needed()'s callback, or else we + have to dynamically create one for that purpose, + which would be fine except that it would be a + different instance (and maybe even a different + class) than the one the user may expect to + receive. */; + char * zMainDbName /* Holds the string allocated on behalf of + SQLITE_DBCONFIG_MAINDBNAME. */; + struct { + S3JniHook busyHandler; + S3JniHook collationNeeded; + S3JniHook commit; + S3JniHook progress; + S3JniHook rollback; + S3JniHook trace; + S3JniHook update; + S3JniHook auth; +#ifdef SQLITE_ENABLE_PREUPDATE_HOOK + S3JniHook preUpdate; +#endif + } hooks; +#ifdef SQLITE_ENABLE_FTS5 + /* FTS5-specific state */ + struct { + jobject jApi /* global ref to s3jni_fts5_api_from_db() */; + } fts; +#endif + S3JniDb * pNext /* Next entry in SJG.perDb.aFree */; +}; + +static const char * const S3JniDb_clientdata_key = "S3JniDb"; +#define S3JniDb_from_clientdata(pDb) \ + (pDb ? sqlite3_get_clientdata(pDb, S3JniDb_clientdata_key) : 0) + +/* +** Cache for per-JNIEnv (i.e. per-thread) data. +** +** Trivia: vars and parameters of this type are often named "jc" +** because this class used to have a name for which that abbreviation +** made sense. +*/ +typedef struct S3JniEnv S3JniEnv; +struct S3JniEnv { + JNIEnv *env /* JNIEnv in which this cache entry was created */; + /* + ** pdbOpening is used to coordinate the Java/DB connection of a + ** being-open()'d db in the face of auto-extensions. + ** Auto-extensions run before we can bind the C db to its Java + ** representation, but auto-extensions require that binding to pass + ** on to their Java-side callbacks. We handle this as follows: + ** + ** - In the JNI side of sqlite3_open(), allocate the Java side of + ** that connection and set pdbOpening to point to that + ** object. + ** + ** - Call sqlite3_open(), which triggers the auto-extension + ** handler. That handler uses pdbOpening to connect the native + ** db handle which it receives with pdbOpening. + ** + ** - When sqlite3_open() returns, check whether pdbOpening->pDb is + ** NULL. If it isn't, auto-extension handling set it up. If it + ** is, complete the Java/C binding unless sqlite3_open() returns + ** a NULL db, in which case free pdbOpening. + */ + S3JniDb * pdbOpening; + S3JniEnv * pNext /* Next entry in SJG.envCache.aHead or + SJG.envCache.aFree */; +}; + +/* +** State for proxying sqlite3_auto_extension() in Java. This was +** initially a separate class from S3JniHook and now the older name is +** retained for readability in the APIs which use this, as well as for +** its better code-searchability. +*/ +typedef S3JniHook S3JniAutoExtension; + +/* +** Type IDs for SQL function categories. +*/ +enum UDFType { + UDF_UNKNOWN_TYPE = 0/*for error propagation*/, + UDF_SCALAR, + UDF_AGGREGATE, + UDF_WINDOW +}; + +/* +** State for binding Java-side UDFs. +*/ +typedef struct S3JniUdf S3JniUdf; +struct S3JniUdf { + jobject jObj /* SQLFunction instance */; + char * zFuncName /* Only for error reporting and debug logging */; + enum UDFType type /* UDF type */; + /** Method IDs for the various UDF methods. */ + jmethodID jmidxFunc /* xFunc method (scalar) */; + jmethodID jmidxStep /* xStep method (aggregate/window) */; + jmethodID jmidxFinal /* xFinal method (aggregate/window) */; + jmethodID jmidxValue /* xValue method (window) */; + jmethodID jmidxInverse /* xInverse method (window) */; + S3JniUdf * pNext /* Next entry in SJG.udf.aFree. */; +}; + +#if defined(SQLITE_JNI_ENABLE_METRICS) && 0==SQLITE_JNI_ENABLE_METRICS +# undef SQLITE_JNI_ENABLE_METRICS +#endif + +/* +** If true, modifying S3JniGlobal.metrics is protected by a mutex, +** else it isn't. +*/ +#ifdef SQLITE_DEBUG +# define S3JNI_METRICS_MUTEX SQLITE_THREADSAFE +#else +# define S3JNI_METRICS_MUTEX 0 +#endif +#ifndef SQLITE_JNI_ENABLE_METRICS +# undef S3JNI_METRICS_MUTEX +# define S3JNI_METRICS_MUTEX 0 +#endif + +/* +** Global state, e.g. caches and metrics. +*/ +typedef struct S3JniGlobalType S3JniGlobalType; +struct S3JniGlobalType { + /* + ** According to: https://developer.ibm.com/articles/j-jni/ + ** + ** > A thread can get a JNIEnv by calling GetEnv() using the JNI + ** invocation interface through a JavaVM object. The JavaVM object + ** itself can be obtained by calling the JNI GetJavaVM() method + ** using a JNIEnv object and can be cached and shared across + ** threads. Caching a copy of the JavaVM object enables any thread + ** with access to the cached object to get access to its own + ** JNIEnv when necessary. + */ + JavaVM * jvm; + /* + ** Global mutex. It must not be used for anything which might call + ** back into the JNI layer. + */ + sqlite3_mutex * mutex; + /* + ** Cache of references to Java classes and method IDs for + ** NativePointerHolder subclasses and OutputPointer.T types. + */ + struct { + S3JniNphOp list[S3Jni_NphCache_size]; + sqlite3_mutex * mutex; /* mutex for this->list */ + volatile void const * locker; /* sanity-checking-only context object + for this->mutex */ + } nph; + /* + ** Cache of per-thread state. + */ + struct { + S3JniEnv * aHead /* Linked list of in-use instances */; + S3JniEnv * aFree /* Linked list of free instances */; + sqlite3_mutex * mutex /* mutex for aHead and aFree. */; + volatile void const * locker /* env mutex is held on this + object's behalf. Used only for + sanity checking. */; + } envCache; + /* + ** Per-db state. This can move into the core library once we can tie + ** client-defined state to db handles there. + */ + struct { + S3JniDb * aFree /* Linked list of free instances */; + sqlite3_mutex * mutex /* mutex for aHead and aFree */; + volatile void const * locker + /* perDb mutex is held on this object's behalf. Used only for + sanity checking. Note that the mutex is at the class level, not + instance level. */; + } perDb; + struct { + S3JniUdf * aFree /* Head of the free-item list. Guarded by global + mutex. */; + } udf; + /* + ** Refs to global classes and methods. Obtained during static init + ** and never released. + */ + struct { + jclass cLong /* global ref to java.lang.Long */; + jclass cString /* global ref to java.lang.String */; + jobject oCharsetUtf8 /* global ref to StandardCharset.UTF_8 */; + jmethodID ctorLong1 /* the Long(long) constructor */; + jmethodID ctorStringBA /* the String(byte[],Charset) constructor */; + jmethodID stringGetBytes /* the String.getBytes(Charset) method */; + + /* + ByteBuffer may or may not be supported via JNI on any given + platform: + + https://docs.oracle.com/javase/8/docs/technotes/guides/jni/spec/functions.html#nio_support + + We only store a ref to byteBuffer.klazz if JNI support for + ByteBuffer is available (which we determine during static init). + */ + struct { + jclass klazz /* global ref to java.nio.ByteBuffer */; + jmethodID midAlloc /* ByteBuffer.allocateDirect() */; + jmethodID midLimit /* ByteBuffer.limit() */; + } byteBuffer; + } g; + /* + ** The list of Java-side auto-extensions + ** (org.sqlite.jni.capi.AutoExtensionCallback objects). + */ + struct { + S3JniAutoExtension *aExt /* The auto-extension list. It is + maintained such that all active + entries are in the first contiguous + nExt array elements. */; + int nAlloc /* number of entries allocated for aExt, + as distinct from the number of active + entries. */; + int nExt /* number of active entries in aExt, all in the + first nExt'th array elements. */; + sqlite3_mutex * mutex /* mutex for manipulation/traversal of aExt */; + volatile const void * locker /* object on whose behalf the mutex + is held. Only for sanity checking + in debug builds. */; + } autoExt; +#ifdef SQLITE_ENABLE_FTS5 + struct { + volatile jobject jExt /* Global ref to Java singleton for the + Fts5ExtensionApi instance. */; + struct { + jfieldID fidA /* Fts5Phrase::a member */; + jfieldID fidB /* Fts5Phrase::b member */; + } jPhraseIter; + } fts5; +#endif + struct { +#ifdef SQLITE_ENABLE_SQLLOG + S3JniHook sqllog /* sqlite3_config(SQLITE_CONFIG_SQLLOG) callback */; +#endif + S3JniHook configlog /* sqlite3_config(SQLITE_CONFIG_LOG) callback */; + S3JniHook * aFree /* free-item list, for recycling. */; + sqlite3_mutex * mutex /* mutex for aFree */; + volatile const void * locker /* object on whose behalf the mutex + is held. Only for sanity checking + in debug builds. */; + } hook; +#ifdef SQLITE_JNI_ENABLE_METRICS + /* Internal metrics. */ + struct { + volatile unsigned nEnvHit; + volatile unsigned nEnvMiss; + volatile unsigned nEnvAlloc; + volatile unsigned nMutexEnv /* number of times envCache.mutex was entered for + a S3JniEnv operation. */; + volatile unsigned nMutexNph /* number of times SJG.mutex was entered */; + volatile unsigned nMutexHook /* number of times SJG.mutex hooks.was entered */; + volatile unsigned nMutexPerDb /* number of times perDb.mutex was entered */; + volatile unsigned nMutexAutoExt /* number of times autoExt.mutex was entered */; + volatile unsigned nMutexGlobal /* number of times global mutex was entered. */; + volatile unsigned nMutexUdf /* number of times global mutex was entered + for UDFs. */; + volatile unsigned nDestroy /* xDestroy() calls across all types */; + volatile unsigned nPdbAlloc /* Number of S3JniDb alloced. */; + volatile unsigned nPdbRecycled /* Number of S3JniDb reused. */; + volatile unsigned nUdfAlloc /* Number of S3JniUdf alloced. */; + volatile unsigned nUdfRecycled /* Number of S3JniUdf reused. */; + volatile unsigned nHookAlloc /* Number of S3JniHook alloced. */; + volatile unsigned nHookRecycled /* Number of S3JniHook reused. */; + struct { + /* Number of calls for each type of UDF callback. */ + volatile unsigned nFunc; + volatile unsigned nStep; + volatile unsigned nFinal; + volatile unsigned nValue; + volatile unsigned nInverse; + } udf; + unsigned nMetrics /* Total number of mutex-locked + metrics increments. */; +#if S3JNI_METRICS_MUTEX + sqlite3_mutex * mutex; +#endif + } metrics; +#endif /* SQLITE_JNI_ENABLE_METRICS */ +}; +static S3JniGlobalType S3JniGlobal = {}; +#define SJG S3JniGlobal + +/* Increments *p, possibly protected by a mutex. */ +#ifndef SQLITE_JNI_ENABLE_METRICS +#define s3jni_incr(PTR) +#elif S3JNI_METRICS_MUTEX +static void s3jni_incr( volatile unsigned int * const p ){ + sqlite3_mutex_enter(SJG.metrics.mutex); + ++SJG.metrics.nMetrics; + ++(*p); + sqlite3_mutex_leave(SJG.metrics.mutex); +} +#else +#define s3jni_incr(PTR) ++(*(PTR)) +#endif + +/* Helpers for working with specific mutexes. */ +#if SQLITE_THREADSAFE +#define s3jni_mutex_enter2(M, Metric) \ + sqlite3_mutex_enter( M ); \ + s3jni_incr( &SJG.metrics.Metric ) +#define s3jni_mutex_leave2(M) \ + sqlite3_mutex_leave( M ) + +#define s3jni_mutex_enter(M, L, Metric) \ + assert( (void*)env != (void*)L && "Invalid use of " #L); \ + s3jni_mutex_enter2( M, Metric ); \ + L = env +#define s3jni_mutex_leave(M, L) \ + assert( (void*)env == (void*)L && "Invalid use of " #L); \ + L = 0; \ + s3jni_mutex_leave2( M ) + +#define S3JniEnv_mutex_assertLocked \ + assert( 0 != SJG.envCache.locker && "Misuse of S3JniGlobal.envCache.mutex" ) +#define S3JniEnv_mutex_assertLocker \ + assert( (env) == SJG.envCache.locker && "Misuse of S3JniGlobal.envCache.mutex" ) +#define S3JniEnv_mutex_assertNotLocker \ + assert( (env) != SJG.envCache.locker && "Misuse of S3JniGlobal.envCache.mutex" ) + +#define S3JniEnv_mutex_enter \ + s3jni_mutex_enter( SJG.envCache.mutex, SJG.envCache.locker, nMutexEnv ) +#define S3JniEnv_mutex_leave \ + s3jni_mutex_leave( SJG.envCache.mutex, SJG.envCache.locker ) + +#define S3JniAutoExt_mutex_enter \ + s3jni_mutex_enter( SJG.autoExt.mutex, SJG.autoExt.locker, nMutexAutoExt ) +#define S3JniAutoExt_mutex_leave \ + s3jni_mutex_leave( SJG.autoExt.mutex, SJG.autoExt.locker ) +#define S3JniAutoExt_mutex_assertLocker \ + assert( env == SJG.autoExt.locker && "Misuse of S3JniGlobal.autoExt.mutex" ) + +#define S3JniGlobal_mutex_enter \ + s3jni_mutex_enter2( SJG.mutex, nMutexGlobal ) +#define S3JniGlobal_mutex_leave \ + s3jni_mutex_leave2( SJG.mutex ) + +#define S3JniHook_mutex_enter \ + s3jni_mutex_enter( SJG.hook.mutex, SJG.hook.locker, nMutexHook ) +#define S3JniHook_mutex_leave \ + s3jni_mutex_leave( SJG.hook.mutex, SJG.hook.locker ) + +#define S3JniNph_mutex_enter \ + s3jni_mutex_enter( SJG.nph.mutex, SJG.nph.locker, nMutexNph ) +#define S3JniNph_mutex_leave \ + s3jni_mutex_leave( SJG.nph.mutex, SJG.nph.locker ) + +#define S3JniDb_mutex_assertLocker \ + assert( (env) == SJG.perDb.locker && "Misuse of S3JniGlobal.perDb.mutex" ) +#define S3JniDb_mutex_enter \ + s3jni_mutex_enter( SJG.perDb.mutex, SJG.perDb.locker, nMutexPerDb ) +#define S3JniDb_mutex_leave \ + s3jni_mutex_leave( SJG.perDb.mutex, SJG.perDb.locker ) + +#else /* SQLITE_THREADSAFE==0 */ +#define S3JniAutoExt_mutex_assertLocker +#define S3JniAutoExt_mutex_enter +#define S3JniAutoExt_mutex_leave +#define S3JniDb_mutex_assertLocker +#define S3JniDb_mutex_enter +#define S3JniDb_mutex_leave +#define S3JniEnv_mutex_assertLocked +#define S3JniEnv_mutex_assertLocker +#define S3JniEnv_mutex_assertNotLocker +#define S3JniEnv_mutex_enter +#define S3JniEnv_mutex_leave +#define S3JniGlobal_mutex_enter +#define S3JniGlobal_mutex_leave +#define S3JniHook_mutex_enter +#define S3JniHook_mutex_leave +#define S3JniNph_mutex_enter +#define S3JniNph_mutex_leave +#endif + +/* Helpers for jstring and jbyteArray. */ +static const char * s3jni__jstring_to_mutf8(JNIEnv * const env, jstring v ){ + const char *z = v ? (*env)->GetStringUTFChars(env, v, NULL) : 0; + s3jni_oom_check( v ? !!z : !z ); + return z; +} + +#define s3jni_jstring_to_mutf8(ARG) s3jni__jstring_to_mutf8(env, (ARG)) +#define s3jni_mutf8_release(ARG,VAR) if( VAR ) (*env)->ReleaseStringUTFChars(env, ARG, VAR) + +/* +** If jBA is not NULL then its GetByteArrayElements() value is +** returned. If jBA is not NULL and nBA is not NULL then *nBA is set +** to the GetArrayLength() of jBA. If GetByteArrayElements() requires +** an allocation and that allocation fails then this function either +** fails fatally or returns 0, depending on build-time options. + */ +static jbyte * s3jni__jbyteArray_bytes2(JNIEnv * const env, jbyteArray jBA, jsize * nBA ){ + jbyte * const rv = jBA ? (*env)->GetByteArrayElements(env, jBA, NULL) : 0; + s3jni_oom_check( jBA ? !!rv : 1 ); + if( jBA && nBA ) *nBA = (*env)->GetArrayLength(env, jBA); + return rv; +} + +#define s3jni_jbyteArray_bytes2(jByteArray,ptrToSz) \ + s3jni__jbyteArray_bytes2(env, (jByteArray), (ptrToSz)) +#define s3jni_jbyteArray_bytes(jByteArray) s3jni__jbyteArray_bytes2(env, (jByteArray), 0) +#define s3jni_jbyteArray_release(jByteArray,jBytes) \ + if( jBytes ) (*env)->ReleaseByteArrayElements(env, jByteArray, jBytes, JNI_ABORT) +#define s3jni_jbyteArray_commit(jByteArray,jBytes) \ + if( jBytes ) (*env)->ReleaseByteArrayElements(env, jByteArray, jBytes, JNI_COMMIT) + +/* +** If jbb is-a java.nio.Buffer object and the JNI environment supports +** it, *pBuf is set to the buffer's memory and *pN is set to its +** limit() (as opposed to its capacity()). If jbb is NULL, not a +** Buffer, or the JNI environment does not support that operation, +** *pBuf is set to 0 and *pN is set to 0. +** +** Note that the length of the buffer can be larger than SQLITE_LIMIT +** but this function does not know what byte range of the buffer is +** required so cannot check for that violation. The caller is required +** to ensure that any to-be-bind()ed range fits within SQLITE_LIMIT. +** +** Sidebar: it is unfortunate that we cannot get ByteBuffer.limit() +** via a JNI method like we can for ByteBuffer.capacity(). We instead +** have to call back into Java to get the limit(). Depending on how +** the ByteBuffer is used, the limit and capacity might be the same, +** but when reusing a buffer, the limit may well change whereas the +** capacity is fixed. The problem with, e.g., read()ing blob data to a +** ByteBuffer's memory based on its capacity is that Java-level code +** is restricted to accessing the range specified in +** ByteBuffer.limit(). If we were to honor only the capacity, we +** could end up writing to, or reading from, parts of a ByteBuffer +** which client code itself cannot access without explicitly modifying +** the limit. The penalty we pay for this correctness is that we must +** call into Java to get the limit() of every ByteBuffer we work with. +** +** An alternative to having to call into ByteBuffer.limit() from here +** would be to add private native impls of all ByteBuffer-using +** methods, each of which adds a jint parameter which _must_ be set to +** theBuffer.limit() by public Java APIs which use those private impls +** to do the real work. +*/ +static void s3jni__get_nio_buffer(JNIEnv * const env, jobject jbb, void **pBuf, jint * pN ){ + *pBuf = 0; + *pN = 0; + if( jbb ){ + *pBuf = (*env)->GetDirectBufferAddress(env, jbb); + if( *pBuf ){ + /* + ** Maintenance reminder: do not use + ** (*env)->GetDirectBufferCapacity(env,jbb), even though it + ** would be much faster, for reasons explained in this + ** function's comments. + */ + *pN = (*env)->CallIntMethod(env, jbb, SJG.g.byteBuffer.midLimit); + S3JniExceptionIsFatal("Error calling ByteBuffer.limit() method."); + } + } +} +#define s3jni_get_nio_buffer(JOBJ,vpOut,jpOut) \ + s3jni__get_nio_buffer(env,(JOBJ),(vpOut),(jpOut)) + +/* +** Returns the current JNIEnv object. Fails fatally if it cannot find +** the object. +*/ +static JNIEnv * s3jni_env(void){ + JNIEnv * env = 0; + if( (*SJG.jvm)->GetEnv(SJG.jvm, (void **)&env, + JNI_VERSION_1_8) ){ + fprintf(stderr, "Fatal error: cannot get current JNIEnv.\n"); + abort(); + } + return env; +} + +/* +** Fetches the S3JniGlobal.envCache row for the given env, allocating a +** row if needed. When a row is allocated, its state is initialized +** insofar as possible. Calls (*env)->FatalError() if allocation of an +** entry fails. That's hypothetically possible but "shouldn't happen." +*/ +static S3JniEnv * S3JniEnv__get(JNIEnv * const env){ + struct S3JniEnv * row; + S3JniEnv_mutex_enter; + row = SJG.envCache.aHead; + for( ; row; row = row->pNext ){ + if( row->env == env ){ + s3jni_incr( &SJG.metrics.nEnvHit ); + S3JniEnv_mutex_leave; + return row; + } + } + s3jni_incr( &SJG.metrics.nEnvMiss ); + row = SJG.envCache.aFree; + if( row ){ + SJG.envCache.aFree = row->pNext; + }else{ + row = s3jni_malloc_or_die(env, sizeof(*row)); + s3jni_incr( &SJG.metrics.nEnvAlloc ); + } + memset(row, 0, sizeof(*row)); + row->pNext = SJG.envCache.aHead; + SJG.envCache.aHead = row; + row->env = env; + + S3JniEnv_mutex_leave; + return row; +} + +#define S3JniEnv_get() S3JniEnv__get(env) + +/* +** This function is NOT part of the sqlite3 public API. It is strictly +** for use by the sqlite project's own Java/JNI bindings. +** +** For purposes of certain hand-crafted JNI function bindings, we +** need a way of reporting errors which is consistent with the rest of +** the C API, as opposed to throwing Java exceptions. To that end, this +** internal-use-only function is a thin proxy around +** sqlite3ErrorWithMessage(). The intent is that it only be used from +** JNI bindings such as sqlite3_prepare_v2/v3(), and definitely not +** from client code. +** +** Returns err_code. +*/ +static int s3jni_db_error(sqlite3* const db, int err_code, + const char * const zMsg){ + if( db!=0 ){ + if( 0==zMsg ){ + sqlite3Error(db, err_code); + }else{ + const int nMsg = sqlite3Strlen30(zMsg); + sqlite3_mutex_enter(sqlite3_db_mutex(db)); + sqlite3ErrorWithMsg(db, err_code, "%.*s", nMsg, zMsg); + sqlite3_mutex_leave(sqlite3_db_mutex(db)); + } + } + return err_code; +} + +/* +** Creates a new jByteArray of length nP, copies p's contents into it, +** and returns that byte array (NULL on OOM unless fail-fast alloc +** errors are enabled). p may be NULL, in which case the array is +** created but no bytes are filled. +*/ +static jbyteArray s3jni__new_jbyteArray(JNIEnv * const env, + const void * const p, int nP){ + jbyteArray jba = (*env)->NewByteArray(env, (jint)nP); + + s3jni_oom_check( jba ); + if( jba && p ){ + (*env)->SetByteArrayRegion(env, jba, 0, (jint)nP, (const jbyte*)p); + } + return jba; +} + +#define s3jni_new_jbyteArray(P,n) s3jni__new_jbyteArray(env, P, n) + + +/* +** Uses the java.lang.String(byte[],Charset) constructor to create a +** new String from UTF-8 string z. n is the number of bytes to +** copy. If n<0 then sqlite3Strlen30() is used to calculate it. +** +** Returns NULL if z is NULL or on OOM, else returns a new jstring +** owned by the caller. +** +** Sidebar: this is a painfully inefficient way to convert from +** standard UTF-8 to a Java string, but JNI offers only algorithms for +** working with MUTF-8, not UTF-8. +*/ +static jstring s3jni__utf8_to_jstring(JNIEnv * const env, + const char * const z, int n){ + jstring rv = NULL; + if( 0==n || (n<0 && z && !z[0]) ){ + /* Fast-track the empty-string case via the MUTF-8 API. We could + hypothetically do this for any strings where n<4 and z is + NUL-terminated and none of z[0..3] are NUL bytes. */ + rv = (*env)->NewStringUTF(env, ""); + s3jni_oom_check( rv ); + }else if( z ){ + jbyteArray jba; + if( n<0 ) n = sqlite3Strlen30(z); + jba = s3jni_new_jbyteArray((unsigned const char *)z, n); + if( jba ){ + rv = (*env)->NewObject(env, SJG.g.cString, SJG.g.ctorStringBA, + jba, SJG.g.oCharsetUtf8); + S3JniIfThrew{ + S3JniExceptionReport; + S3JniExceptionClear; + } + S3JniUnrefLocal(jba); + } + s3jni_oom_check( rv ); + } + return rv; +} +#define s3jni_utf8_to_jstring(CStr,n) s3jni__utf8_to_jstring(env, CStr, n) + +/* +** Converts the given java.lang.String object into a NUL-terminated +** UTF-8 C-string by calling jstr.getBytes(StandardCharset.UTF_8). +** Returns NULL if jstr is NULL or on allocation error. If jstr is not +** NULL and nLen is not NULL then nLen is set to the length of the +** returned string, not including the terminating NUL. If jstr is not +** NULL and it returns NULL, this indicates an allocation error. In +** that case, if nLen is not NULL then it is either set to 0 (if +** fetching of jstr's bytes fails to allocate) or set to what would +** have been the length of the string had C-string allocation +** succeeded. +** +** The returned memory is allocated from sqlite3_malloc() and +** ownership is transferred to the caller. +*/ +static char * s3jni__jstring_to_utf8(JNIEnv * const env, + jstring jstr, int *nLen){ + jbyteArray jba; + jsize nBA; + char *rv; + + if( !jstr ) return 0; + jba = (*env)->CallObjectMethod(env, jstr, SJG.g.stringGetBytes, + SJG.g.oCharsetUtf8); + + if( (*env)->ExceptionCheck(env) || !jba + /* order of these checks is significant for -Xlint:jni */ ) { + S3JniExceptionReport; + s3jni_oom_check( jba ); + if( nLen ) *nLen = 0; + return 0; + } + nBA = (*env)->GetArrayLength(env, jba); + if( nLen ) *nLen = (int)nBA; + rv = s3jni_malloc( nBA + 1 ); + if( rv ){ + (*env)->GetByteArrayRegion(env, jba, 0, nBA, (jbyte*)rv); + rv[nBA] = 0; + } + S3JniUnrefLocal(jba); + return rv; +} +#define s3jni_jstring_to_utf8(JStr,n) s3jni__jstring_to_utf8(env, JStr, n) + +/* +** Expects to be passed a pointer from sqlite3_column_text16() or +** sqlite3_value_text16() and a byte-length value from +** sqlite3_column_bytes16() or sqlite3_value_bytes16(). It creates a +** Java String of exactly half that character length, returning NULL +** if !p or (*env)->NewString() fails. +*/ +static jstring s3jni_text16_to_jstring(JNIEnv * const env, const void * const p, int nP){ + jstring const rv = p + ? (*env)->NewString(env, (const jchar *)p, (jsize)(nP/2)) + : NULL; + s3jni_oom_check( p ? !!rv : 1 ); + return rv; +} + +/* +** Creates a new ByteBuffer instance with a capacity of n. assert()s +** that SJG.g.byteBuffer.klazz is not 0 and n>0. +*/ +static jobject s3jni__new_ByteBuffer(JNIEnv * const env, int n){ + jobject rv = 0; + assert( SJG.g.byteBuffer.klazz ); + assert( SJG.g.byteBuffer.midAlloc ); + assert( n > 0 ); + rv = (*env)->CallStaticObjectMethod(env, SJG.g.byteBuffer.klazz, + SJG.g.byteBuffer.midAlloc, (jint)n); + S3JniIfThrew { + S3JniExceptionReport; + S3JniExceptionClear; + } + s3jni_oom_check( rv ); + return rv; +} + +/* +** If n>0 and sqlite3_jni_supports_nio() is true then this creates a +** new ByteBuffer object and copies n bytes from p to it. Returns NULL +** if n is 0, sqlite3_jni_supports_nio() is false, or on allocation +** error (unless fatal alloc failures are enabled). +*/ +static jobject s3jni__blob_to_ByteBuffer(JNIEnv * const env, + const void * p, int n){ + jobject rv = NULL; + assert( n >= 0 ); + if( 0==n || !SJG.g.byteBuffer.klazz ){ + return NULL; + } + rv = s3jni__new_ByteBuffer(env, n); + if( rv ){ + void * tgt = (*env)->GetDirectBufferAddress(env, rv); + memcpy(tgt, p, (size_t)n); + } + return rv; +} + + +/* +** Requires jx to be a Throwable. Calls its toString() method and +** returns its value converted to a UTF-8 string. The caller owns the +** returned string and must eventually sqlite3_free() it. Returns 0 +** if there is a problem fetching the info or on OOM. +** +** Design note: we use toString() instead of getMessage() because the +** former includes the exception type's name: +** +** Exception e = new RuntimeException("Hi"); +** System.out.println(e.toString()); // java.lang.RuntimeException: Hi +** System.out.println(e.getMessage()); // Hi +*/ +static char * s3jni_exception_error_msg(JNIEnv * const env, jthrowable jx){ + jmethodID mid; + jstring msg; + char * zMsg; + jclass const klazz = (*env)->GetObjectClass(env, jx); + mid = (*env)->GetMethodID(env, klazz, "toString", "()Ljava/lang/String;"); + S3JniUnrefLocal(klazz); + S3JniIfThrew{ + S3JniExceptionReport; + S3JniExceptionClear; + return 0; + } + msg = (*env)->CallObjectMethod(env, jx, mid); + S3JniIfThrew{ + S3JniExceptionReport; + S3JniExceptionClear; + return 0; + } + zMsg = s3jni_jstring_to_utf8( msg, 0); + S3JniUnrefLocal(msg); + return zMsg; +} + +/* +** Extracts env's current exception, sets ps->pDb's error message to +** its message string, and clears the exception. If errCode is non-0, +** it is used as-is, else SQLITE_ERROR is assumed. If there's a +** problem extracting the exception's message, it's treated as +** non-fatal and zDfltMsg is used in its place. +** +** Locks the global S3JniDb mutex. +** +** This must only be called if a JNI exception is pending. +** +** Returns errCode unless it is 0, in which case SQLITE_ERROR is +** returned. +*/ +static int s3jni__db_exception(JNIEnv * const env, sqlite3 * const pDb, + int errCode, const char *zDfltMsg){ + jthrowable const ex = (*env)->ExceptionOccurred(env); + + if( 0==errCode ) errCode = SQLITE_ERROR; + if( ex ){ + char * zMsg; + S3JniExceptionClear; + zMsg = s3jni_exception_error_msg(env, ex); + s3jni_db_error(pDb, errCode, zMsg ? zMsg : zDfltMsg); + sqlite3_free(zMsg); + S3JniUnrefLocal(ex); + }else if( zDfltMsg ){ + s3jni_db_error(pDb, errCode, zDfltMsg); + } + return errCode; +} +#define s3jni_db_exception(pDb,ERRCODE,DFLTMSG) \ + s3jni__db_exception(env, (pDb), (ERRCODE), (DFLTMSG) ) + +/* +** Extracts the (void xDestroy()) method from jObj and applies it to +** jObj. If jObj is NULL, this is a no-op. The lack of an xDestroy() +** method is silently ignored. Any exceptions thrown by xDestroy() +** trigger a warning to stdout or stderr and then the exception is +** suppressed. +*/ +static void s3jni__call_xDestroy(JNIEnv * const env, jobject jObj){ + if( jObj ){ + jclass const klazz = (*env)->GetObjectClass(env, jObj); + jmethodID method = (*env)->GetMethodID(env, klazz, "xDestroy", "()V"); + + S3JniUnrefLocal(klazz); + if( method ){ + s3jni_incr( &SJG.metrics.nDestroy ); + (*env)->CallVoidMethod(env, jObj, method); + S3JniIfThrew{ + S3JniExceptionWarnCallbackThrew("xDestroy() callback"); + S3JniExceptionClear; + } + }else{ + /* Non-fatal. */ + S3JniExceptionClear; + } + } +} +#define s3jni_call_xDestroy(JOBJ) s3jni__call_xDestroy(env, (JOBJ)) + +/* +** Internal helper for many hook callback impls. Locks the S3JniDb +** mutex, makes a copy of src into dest, with a some differences: (1) +** if src->jObj or src->jExtra are not NULL then dest will be a new +** LOCAL ref to it instead of a copy of the prior GLOBAL ref. (2) +** dest->doXDestroy is always false. +** +** If dest->jObj is not NULL when this returns then the caller is +** obligated to eventually free the new ref by passing *dest to +** S3JniHook_localundup(). The dest pointer must NOT be passed to +** S3JniHook_unref(), as that routine assumes that dest->jObj/jExtra +** are GLOBAL refs (it's illegal to try to unref the wrong ref type). +** +** Background: when running a hook we need a call-local copy lest +** another thread modify the hook while we're running it. That copy +** has to have its own Java reference, but it need only be call-local. +*/ +static void S3JniHook__localdup( JNIEnv * const env, S3JniHook const * const src, + S3JniHook * const dest ){ + S3JniHook_mutex_enter; + *dest = *src; + if(src->jObj) dest->jObj = S3JniRefLocal(src->jObj); + if(src->jExtra) dest->jExtra = S3JniRefLocal(src->jExtra); + dest->doXDestroy = 0; + S3JniHook_mutex_leave; +} +#define S3JniHook_localdup(src,dest) S3JniHook__localdup(env,src,dest) + +static void S3JniHook__localundup( JNIEnv * const env, S3JniHook * const h ){ + S3JniUnrefLocal(h->jObj); + S3JniUnrefLocal(h->jExtra); + *h = S3JniHook_empty; +} +#define S3JniHook_localundup(HOOK) S3JniHook__localundup(env, &(HOOK)) + +/* +** Removes any Java references from s and clears its state. If +** doXDestroy is true and s->jObj is not NULL, s->jObj +** is passed to s3jni_call_xDestroy() before any references are +** cleared. It is legal to call this when the object has no Java +** references. s must not be NULL. +*/ +static void S3JniHook__unref(JNIEnv * const env, S3JniHook * const s){ + if( s->jObj ){ + if( s->doXDestroy ){ + s3jni_call_xDestroy(s->jObj); + } + S3JniUnrefGlobal(s->jObj); + S3JniUnrefGlobal(s->jExtra); + }else{ + assert( !s->jExtra ); + } + *s = S3JniHook_empty; +} +#define S3JniHook_unref(hook) S3JniHook__unref(env, (hook)) + +/* +** Allocates one blank S3JniHook object from the recycling bin, if +** available, else from the heap. Returns NULL or dies on OOM, +** depending on build options. Locks on SJG.hooks.mutex. +*/ +static S3JniHook *S3JniHook__alloc(JNIEnv * const env){ + S3JniHook * p = 0; + S3JniHook_mutex_enter; + if( SJG.hook.aFree ){ + p = SJG.hook.aFree; + SJG.hook.aFree = p->pNext; + p->pNext = 0; + s3jni_incr(&SJG.metrics.nHookRecycled); + } + S3JniHook_mutex_leave; + if( 0==p ){ + p = s3jni_malloc(sizeof(S3JniHook)); + if( p ){ + s3jni_incr(&SJG.metrics.nHookAlloc); + } + } + if( p ){ + *p = S3JniHook_empty; + } + return p; +} +#define S3JniHook_alloc() S3JniHook__alloc(env) + +/* +** The rightful fate of all results from S3JniHook_alloc(). Locks on +** SJG.hook.mutex. +*/ +static void S3JniHook__free(JNIEnv * const env, S3JniHook * const p){ + if(p){ + assert( !p->pNext ); + S3JniHook_unref(p); + S3JniHook_mutex_enter; + p->pNext = SJG.hook.aFree; + SJG.hook.aFree = p; + S3JniHook_mutex_leave; + } +} +#define S3JniHook_free(hook) S3JniHook__free(env, hook) + +#if 0 +/* S3JniHook__free() without the lock: caller must hold the global mutex */ +static void S3JniHook__free_unlocked(JNIEnv * const env, S3JniHook * const p){ + if(p){ + assert( !p->pNext ); + assert( p->pNext != SJG.hook.aFree ); + S3JniHook_unref(p); + p->pNext = SJG.hook.aFree; + SJG.hook.aFree = p; + } +} +#define S3JniHook_free_unlocked(hook) S3JniHook__free_unlocked(env, hook) +#endif + +/* +** Clears all of s's state. Requires that that the caller has locked +** S3JniGlobal.perDb.mutex. Make sure to do anything needed with +** s->pNext and s->pPrev before calling this, as this clears them. +*/ +static void S3JniDb_clear(JNIEnv * const env, S3JniDb * const s){ + S3JniDb_mutex_assertLocker; + sqlite3_free( s->zMainDbName ); +#define UNHOOK(MEMBER) \ + S3JniHook_unref(&s->hooks.MEMBER) + UNHOOK(auth); + UNHOOK(busyHandler); + UNHOOK(collationNeeded); + UNHOOK(commit); + UNHOOK(progress); + UNHOOK(rollback); + UNHOOK(trace); + UNHOOK(update); +#ifdef SQLITE_ENABLE_PREUPDATE_HOOK + UNHOOK(preUpdate); +#endif +#undef UNHOOK + S3JniUnrefGlobal(s->jDb); + memset(s, 0, sizeof(S3JniDb)); +} + +/* +** Clears s's state and moves it to the free-list. Requires that +** S3JniGlobal.perDb.mutex is locked. +*/ +static void S3JniDb__set_aside_unlocked(JNIEnv * const env, S3JniDb * const s){ + assert( s ); + S3JniDb_mutex_assertLocker; + if( s ){ + S3JniDb_clear(env, s); + s->pNext = SJG.perDb.aFree; + SJG.perDb.aFree = s; + } +} +#define S3JniDb_set_aside_unlocked(JniDb) S3JniDb__set_aside_unlocked(env, JniDb) + +static void S3JniDb__set_aside(JNIEnv * const env, S3JniDb * const s){ + S3JniDb_mutex_enter; + S3JniDb_set_aside_unlocked(s); + S3JniDb_mutex_leave; +} +#define S3JniDb_set_aside(JNIDB) S3JniDb__set_aside(env, JNIDB) + +/* +** Uncache any state for the given JNIEnv, clearing all Java +** references the cache owns. Returns true if env was cached and false +** if it was not found in the cache. Ownership of the S3JniEnv object +** associated with the given argument is transferred to this function, +** which makes it free for re-use. +** +** Requires that the env mutex be locked. +*/ +static int S3JniEnv_uncache(JNIEnv * const env){ + struct S3JniEnv * row; + struct S3JniEnv * pPrev = 0; + + S3JniEnv_mutex_assertLocked; + row = SJG.envCache.aHead; + for( ; row; pPrev = row, row = row->pNext ){ + if( row->env == env ){ + break; + } + } + if( !row ){ + return 0; + } + if( pPrev) pPrev->pNext = row->pNext; + else{ + assert( SJG.envCache.aHead == row ); + SJG.envCache.aHead = row->pNext; + } + memset(row, 0, sizeof(S3JniEnv)); + row->pNext = SJG.envCache.aFree; + SJG.envCache.aFree = row; + return 1; +} + +/* +** Fetches the given nph-ref from cache the cache and returns the +** object with its klazz member set. This is an O(1) operation except +** on the first call for a given pRef, during which pRef->klazz and +** pRef->pRef are initialized thread-safely. In the latter case it's +** still effectively O(1), but with a much longer 1. +** +** It is up to the caller to populate the other members of the +** returned object if needed, taking care to lock the modification +** with S3JniNph_mutex_enter/leave. +** +** This simple cache catches >99% of searches in the current +** (2023-07-31) tests. +*/ +static S3JniNphOp * s3jni__nphop(JNIEnv * const env, S3JniNphOp const* pRef){ + S3JniNphOp * const pNC = &SJG.nph.list[pRef->index]; + + assert( (void*)pRef>=(void*)&S3JniNphOps && (void*)pRef<(void*)(&S3JniNphOps + 1) + && "pRef is out of range" ); + assert( pRef->index>=0 + && (pRef->index < (sizeof(S3JniNphOps) / sizeof(S3JniNphOp))) + && "pRef->index is out of range" ); + if( !pNC->klazz ){ + S3JniNph_mutex_enter; + if( !pNC->klazz ){ + jclass const klazz = (*env)->FindClass(env, pRef->zName); + //printf("FindClass %s\n", pRef->zName); + S3JniExceptionIsFatal("FindClass() unexpectedly threw"); + pNC->klazz = S3JniRefGlobal(klazz); + } + S3JniNph_mutex_leave; + } + assert( pNC->klazz ); + return pNC; +} + +#define s3jni_nphop(PRef) s3jni__nphop(env, PRef) + +/* +** Common code for accessor functions for NativePointerHolder and +** OutputPointer types. pRef must be a pointer from S3JniNphOps. jOut +** must be an instance of that class (Java's type safety takes care of +** that requirement). If necessary, this fetches the jfieldID for +** jOut's pRef->zMember, which must be of the type represented by the +** JNI type signature pRef->zTypeSig, and stores it in +** S3JniGlobal.nph.list[pRef->index]. Fails fatally if the pRef->zMember +** property is not found, as that presents a serious internal misuse. +** +** Property lookups are cached on a per-pRef basis. +*/ +static jfieldID s3jni_nphop_field(JNIEnv * const env, S3JniNphOp const* pRef){ + S3JniNphOp * const pNC = s3jni_nphop(pRef); + + if( !pNC->fidValue ){ + S3JniNph_mutex_enter; + if( !pNC->fidValue ){ + pNC->fidValue = (*env)->GetFieldID(env, pNC->klazz, + pRef->zMember, pRef->zTypeSig); + S3JniExceptionIsFatal("Code maintenance required: missing " + "required S3JniNphOp::fidValue."); + } + S3JniNph_mutex_leave; + } + assert( pNC->fidValue ); + return pNC->fidValue; +} + +/* +** Sets a native ptr value in NativePointerHolder object jNph, +** which must be of the native type described by pRef. jNph +** may not be NULL. +*/ +static void NativePointerHolder__set(JNIEnv * const env, S3JniNphOp const* pRef, + jobject jNph, const void * p){ + assert( jNph ); + (*env)->SetLongField(env, jNph, s3jni_nphop_field(env, pRef), + S3JniCast_P2L(p)); + S3JniExceptionIsFatal("Could not set NativePointerHolder.nativePointer."); +} + +#define NativePointerHolder_set(PREF,JNPH,P) \ + NativePointerHolder__set(env, PREF, JNPH, P) + +/* +** Fetches a native ptr value from NativePointerHolder object jNph, +** which must be of the native type described by pRef. This is a +** no-op if jNph is NULL. +*/ +static void * NativePointerHolder__get(JNIEnv * env, jobject jNph, + S3JniNphOp const* pRef){ + void * rv = 0; + if( jNph ){ + rv = S3JniCast_L2P( + (*env)->GetLongField(env, jNph, s3jni_nphop_field(env, pRef)) + ); + S3JniExceptionIsFatal("Cannot fetch NativePointerHolder.nativePointer."); + } + return rv; +} + +#define NativePointerHolder_get(JOBJ,NPHREF) \ + NativePointerHolder__get(env, (JOBJ), (NPHREF)) + +/* +** Helpers for extracting pointers from jobjects, noting that we rely +** on the corresponding Java interfaces having already done the +** type-checking. OBJ must be a jobject referring to a +** NativePointerHolder, where T matches PtrGet_T. Don't use these +** in contexts where that's not the case. Note that these aren't +** type-safe in the strictest sense: +** +** sqlite3 * s = PtrGet_sqlite3_stmt(...) +** +** will work, despite the incorrect macro name, so long as the +** argument is a Java sqlite3 object, as this operation only has void +** pointers to work with. +*/ +#define PtrGet_T(T,JOBJ) (T*)NativePointerHolder_get((JOBJ), S3JniNph(T)) +#define PtrGet_sqlite3(JOBJ) PtrGet_T(sqlite3, (JOBJ)) +#define PtrGet_sqlite3_backup(JOBJ) PtrGet_T(sqlite3_backup, (JOBJ)) +#define PtrGet_sqlite3_blob(JOBJ) PtrGet_T(sqlite3_blob, (JOBJ)) +#define PtrGet_sqlite3_context(JOBJ) PtrGet_T(sqlite3_context, (JOBJ)) +#define PtrGet_sqlite3_stmt(JOBJ) PtrGet_T(sqlite3_stmt, (JOBJ)) +#define PtrGet_sqlite3_value(JOBJ) PtrGet_T(sqlite3_value, (JOBJ)) +/* +** LongPtrGet_T(X,Y) expects X to be an unqualified sqlite3 struct +** type name and Y to be a native pointer to such an object in the +** form of a jlong value. The jlong is simply cast to (X*). This +** approach is, as of 2023-09-27, supplanting the former approach. We +** now do the native pointer extraction in the Java side, rather than +** the C side, because it's reportedly significantly faster. The +** intptr_t part here is necessary for compatibility with (at least) +** ARM32. +** +** 2023-11-09: testing has not revealed any measurable performance +** difference between the approach of passing type T to C compared to +** passing pointer-to-T to C, and adding support for the latter +** everywhere requires significantly more code. As of this writing, the +** older/simpler approach is being applied except for (A) where the +** newer approach has already been applied and (B) hot-spot APIs where +** a difference of microseconds (i.e. below our testing measurement +** threshold) might add up. +*/ +#define LongPtrGet_T(T,JLongAsPtr) (T*)((intptr_t)((JLongAsPtr))) +#define LongPtrGet_sqlite3(JLongAsPtr) LongPtrGet_T(sqlite3,(JLongAsPtr)) +#define LongPtrGet_sqlite3_backup(JLongAsPtr) LongPtrGet_T(sqlite3_backup,(JLongAsPtr)) +#define LongPtrGet_sqlite3_blob(JLongAsPtr) LongPtrGet_T(sqlite3_blob,(JLongAsPtr)) +#define LongPtrGet_sqlite3_stmt(JLongAsPtr) LongPtrGet_T(sqlite3_stmt,(JLongAsPtr)) +#define LongPtrGet_sqlite3_value(JLongAsPtr) LongPtrGet_T(sqlite3_value,(JLongAsPtr)) +/* +** Extracts the new S3JniDb instance from the free-list, or allocates +** one if needed, associates it with pDb, and returns. Returns NULL +** on OOM. The returned object MUST, on success of the calling +** operation, subsequently be associated with jDb via +** NativePointerHolder_set() or freed using S3JniDb_set_aside(). +*/ +static S3JniDb * S3JniDb_alloc(JNIEnv * const env, jobject jDb){ + S3JniDb * rv = 0; + S3JniDb_mutex_enter; + if( SJG.perDb.aFree ){ + rv = SJG.perDb.aFree; + SJG.perDb.aFree = rv->pNext; + rv->pNext = 0; + s3jni_incr( &SJG.metrics.nPdbRecycled ); + } + S3JniDb_mutex_leave; + if( 0==rv ){ + rv = s3jni_malloc(sizeof(S3JniDb)); + if( rv ){ + s3jni_incr( &SJG.metrics.nPdbAlloc ); + } + } + if( rv ){ + memset(rv, 0, sizeof(S3JniDb)); + rv->jDb = S3JniRefGlobal(jDb); + } + return rv; +} + +/* +** Returns the S3JniDb object for the given org.sqlite.jni.capi.sqlite3 +** object, or NULL if jDb is NULL, no pointer can be extracted +** from it, or no matching entry can be found. +*/ +static S3JniDb * S3JniDb__from_java(JNIEnv * const env, jobject jDb){ + sqlite3 * const pDb = jDb ? PtrGet_sqlite3(jDb) : 0; + return pDb ? S3JniDb_from_clientdata(pDb) : 0; +} +#define S3JniDb_from_java(jObject) S3JniDb__from_java(env,(jObject)) + +/* +** S3JniDb finalizer for use with sqlite3_set_clientdata(). +*/ +static void S3JniDb_xDestroy(void *p){ + S3JniDeclLocal_env; + S3JniDb * const ps = p; + assert( !ps->pNext && "Else ps is already in the free-list."); + S3JniDb_set_aside(ps); +} + +/* +** Evaluates to the S3JniDb object for the given sqlite3 object, or +** NULL if pDb is NULL or was not initialized via the JNI interfaces. +*/ +#define S3JniDb_from_c(sqlite3Ptr) \ + ((sqlite3Ptr) ? S3JniDb_from_clientdata(sqlite3Ptr) : 0) +#define S3JniDb_from_jlong(sqlite3PtrAsLong) \ + S3JniDb_from_c(LongPtrGet_T(sqlite3,sqlite3PtrAsLong)) + +/* +** Unref any Java-side state in (S3JniAutoExtension*) AX and zero out +** AX. +*/ +#define S3JniAutoExtension_clear(AX) S3JniHook_unref(AX); + +/* +** Initializes a pre-allocated S3JniAutoExtension object. Returns +** non-0 if there is an error collecting the required state from +** jAutoExt (which must be an AutoExtensionCallback object). On error, +** it passes ax to S3JniAutoExtension_clear(). +*/ +static int S3JniAutoExtension_init(JNIEnv *const env, + S3JniAutoExtension * const ax, + jobject const jAutoExt){ + jclass const klazz = (*env)->GetObjectClass(env, jAutoExt); + + S3JniAutoExt_mutex_assertLocker; + *ax = S3JniHook_empty; + ax->midCallback = (*env)->GetMethodID(env, klazz, "call", + "(Lorg/sqlite/jni/capi/sqlite3;)I"); + S3JniUnrefLocal(klazz); + S3JniExceptionWarnIgnore; + if( !ax->midCallback ){ + S3JniAutoExtension_clear(ax); + return SQLITE_ERROR; + } + ax->jObj = S3JniRefGlobal(jAutoExt); + return 0; +} + +/* +** Sets the value property of the OutputPointer.Bool jOut object to +** v. +*/ +static void OutputPointer_set_Bool(JNIEnv * const env, jobject const jOut, + int v){ + (*env)->SetBooleanField(env, jOut, s3jni_nphop_field( + env, S3JniNph(OutputPointer_Bool) + ), v ? JNI_TRUE : JNI_FALSE ); + S3JniExceptionIsFatal("Cannot set OutputPointer.Bool.value"); +} + +/* +** Sets the value property of the OutputPointer.Int32 jOut object to +** v. +*/ +static void OutputPointer_set_Int32(JNIEnv * const env, jobject const jOut, + int v){ + (*env)->SetIntField(env, jOut, s3jni_nphop_field( + env, S3JniNph(OutputPointer_Int32) + ), (jint)v); + S3JniExceptionIsFatal("Cannot set OutputPointer.Int32.value"); +} + +/* +** Sets the value property of the OutputPointer.Int64 jOut object to +** v. +*/ +static void OutputPointer_set_Int64(JNIEnv * const env, jobject const jOut, + jlong v){ + (*env)->SetLongField(env, jOut, s3jni_nphop_field( + env, S3JniNph(OutputPointer_Int64) + ), v); + S3JniExceptionIsFatal("Cannot set OutputPointer.Int64.value"); +} + +/* +** Internal helper for OutputPointer_set_TYPE() where TYPE is an +** Object type. +*/ +static void OutputPointer_set_obj(JNIEnv * const env, + S3JniNphOp const * const pRef, + jobject const jOut, + jobject v){ + (*env)->SetObjectField(env, jOut, s3jni_nphop_field(env, pRef), v); + S3JniExceptionIsFatal("Cannot set OutputPointer.T.value"); +} + +#ifdef SQLITE_ENABLE_FTS5 +#if 0 +/* +** Sets the value property of the OutputPointer.ByteArray jOut object +** to v. +*/ +static void OutputPointer_set_ByteArray(JNIEnv * const env, jobject const jOut, + jbyteArray const v){ + OutputPointer_set_obj(env, S3JniNph(OutputPointer_ByteArray), jOut, v); +} +#endif +#endif /* SQLITE_ENABLE_FTS5 */ + +/* +** Sets the value property of the OutputPointer.String jOut object to +** v. +*/ +static void OutputPointer_set_String(JNIEnv * const env, jobject const jOut, + jstring const v){ + OutputPointer_set_obj(env, S3JniNph(OutputPointer_String), jOut, v); +} + +/* +** Returns true if eTextRep is a valid sqlite3 encoding constant, else +** returns false. +*/ +static int encodingTypeIsValid(int eTextRep){ + switch( eTextRep ){ + case SQLITE_UTF8: case SQLITE_UTF16: + case SQLITE_UTF16LE: case SQLITE_UTF16BE: + return 1; + default: + return 0; + } +} + +/* For use with sqlite3_result_pointer(), sqlite3_value_pointer(), + sqlite3_bind_java_object(), and sqlite3_column_java_object(). */ +static const char * const s3jni__value_jref_key = "org.sqlite.jni.capi.ResultJavaVal"; + +/* +** If v is not NULL, it must be a jobject global reference. Its +** reference is relinquished. +*/ +static void S3Jni_jobject_finalizer(void *v){ + if( v ){ + S3JniDeclLocal_env; + S3JniUnrefGlobal((jobject)v); + } +} + +/* +** Returns a new Java instance of the class referred to by pRef, which +** MUST be interface-compatible with NativePointerHolder and MUST have +** a no-arg constructor. The NativePointerHolder_set() method is +** passed the new Java object (which must not be NULL) and pNative +** (which may be NULL). Hypothetically returns NULL if Java fails to +** allocate, but the JNI docs are not entirely clear on that detail. +** +** Always use a static pointer from the S3JniNphOps struct for the +** 2nd argument. +*/ +static jobject NativePointerHolder_new(JNIEnv * const env, + S3JniNphOp const * pRef, + const void * pNative){ + jobject rv = 0; + S3JniNphOp * const pNC = s3jni_nphop(pRef); + if( !pNC->midCtor ){ + S3JniNph_mutex_enter; + if( !pNC->midCtor ){ + pNC->midCtor = (*env)->GetMethodID(env, pNC->klazz, "", "()V"); + S3JniExceptionIsFatal("Cannot find constructor for class."); + } + S3JniNph_mutex_leave; + } + rv = (*env)->NewObject(env, pNC->klazz, pNC->midCtor); + S3JniExceptionIsFatal("No-arg constructor threw."); + s3jni_oom_check(rv); + if( rv ) NativePointerHolder_set(pRef, rv, pNative); + return rv; +} + +static inline jobject new_java_sqlite3(JNIEnv * const env, sqlite3 *sv){ + return NativePointerHolder_new(env, S3JniNph(sqlite3), sv); +} +static inline jobject new_java_sqlite3_backup(JNIEnv * const env, sqlite3_backup *sv){ + return NativePointerHolder_new(env, S3JniNph(sqlite3_backup), sv); +} +static inline jobject new_java_sqlite3_blob(JNIEnv * const env, sqlite3_blob *sv){ + return NativePointerHolder_new(env, S3JniNph(sqlite3_blob), sv); +} +static inline jobject new_java_sqlite3_context(JNIEnv * const env, sqlite3_context *sv){ + return NativePointerHolder_new(env, S3JniNph(sqlite3_context), sv); +} +static inline jobject new_java_sqlite3_stmt(JNIEnv * const env, sqlite3_stmt *sv){ + return NativePointerHolder_new(env, S3JniNph(sqlite3_stmt), sv); +} +static inline jobject new_java_sqlite3_value(JNIEnv * const env, sqlite3_value *sv){ + return NativePointerHolder_new(env, S3JniNph(sqlite3_value), sv); +} + +/* Helper typedefs for UDF callback types. */ +typedef void (*udf_xFunc_f)(sqlite3_context*,int,sqlite3_value**); +typedef void (*udf_xStep_f)(sqlite3_context*,int,sqlite3_value**); +typedef void (*udf_xFinal_f)(sqlite3_context*); +/*typedef void (*udf_xValue_f)(sqlite3_context*);*/ +/*typedef void (*udf_xInverse_f)(sqlite3_context*,int,sqlite3_value**);*/ + +/* +** Allocate a new S3JniUdf (User-defined Function) and associate it +** with the SQLFunction-type jObj. Returns NULL on OOM. If the +** returned object's type==UDF_UNKNOWN_TYPE then the type of UDF was +** not unambiguously detected based on which callback members it has, +** which falls into the category of user error. +** +** The caller must arrange for the returned object to eventually be +** passed to S3JniUdf_free(). +*/ +static S3JniUdf * S3JniUdf_alloc(JNIEnv * const env, jobject jObj){ + S3JniUdf * s = 0; + + S3JniGlobal_mutex_enter; + s3jni_incr(&SJG.metrics.nMutexUdf); + if( SJG.udf.aFree ){ + s = SJG.udf.aFree; + SJG.udf.aFree = s->pNext; + s->pNext = 0; + s3jni_incr(&SJG.metrics.nUdfRecycled); + } + S3JniGlobal_mutex_leave; + if( !s ){ + s = s3jni_malloc( sizeof(*s)); + s3jni_incr(&SJG.metrics.nUdfAlloc); + } + if( s ){ + const char * zFSI = /* signature for xFunc, xStep, xInverse */ + "(Lorg/sqlite/jni/capi/sqlite3_context;[Lorg/sqlite/jni/capi/sqlite3_value;)V"; + const char * zFV = /* signature for xFinal, xValue */ + "(Lorg/sqlite/jni/capi/sqlite3_context;)V"; + jclass const klazz = (*env)->GetObjectClass(env, jObj); + + memset(s, 0, sizeof(*s)); + s->jObj = S3JniRefGlobal(jObj); + +#define FGET(FuncName,FuncSig,Field) \ + s->Field = (*env)->GetMethodID(env, klazz, FuncName, FuncSig); \ + if( !s->Field ) (*env)->ExceptionClear(env) + + FGET("xFunc", zFSI, jmidxFunc); + FGET("xStep", zFSI, jmidxStep); + FGET("xFinal", zFV, jmidxFinal); + FGET("xValue", zFV, jmidxValue); + FGET("xInverse", zFSI, jmidxInverse); +#undef FGET + + S3JniUnrefLocal(klazz); + if( s->jmidxFunc ) s->type = UDF_SCALAR; + else if( s->jmidxStep && s->jmidxFinal ){ + s->type = (s->jmidxValue && s->jmidxInverse) + ? UDF_WINDOW : UDF_AGGREGATE; + }else{ + s->type = UDF_UNKNOWN_TYPE; + } + } + return s; +} + +/* +** Frees up all resources owned by s, clears its state, then either +** caches it for reuse (if cacheIt is true) or frees it. The former +** requires locking the global mutex, so it must not be held when this +** is called. +*/ +static void S3JniUdf_free(JNIEnv * const env, S3JniUdf * const s, + int cacheIt){ + assert( !s->pNext ); + if( s->jObj ){ + s3jni_call_xDestroy(s->jObj); + S3JniUnrefGlobal(s->jObj); + sqlite3_free(s->zFuncName); + assert( !s->pNext ); + memset(s, 0, sizeof(*s)); + } + if( cacheIt ){ + S3JniGlobal_mutex_enter; + s->pNext = S3JniGlobal.udf.aFree; + S3JniGlobal.udf.aFree = s; + S3JniGlobal_mutex_leave; + }else{ + sqlite3_free( s ); + } +} + +/* Finalizer for sqlite3_create_function() and friends. */ +static void S3JniUdf_finalizer(void * s){ + S3JniUdf_free(s3jni_env(), (S3JniUdf*)s, 1); +} + +/* +** Helper for processing args to UDF handlers with signature +** (sqlite3_context*,int,sqlite3_value**). +*/ +typedef struct { + jobject jcx /* sqlite3_context */; + jobjectArray jargv /* sqlite3_value[] */; +} udf_jargs; + +/* +** Converts the given (cx, argc, argv) into arguments for the given +** UDF, writing the result (Java wrappers for cx and argv) in the +** final 2 arguments. Returns 0 on success, SQLITE_NOMEM on allocation +** error. On error *jCx and *jArgv will be set to 0. The output +** objects are of type org.sqlite.jni.capi.sqlite3_context and +** array-of-org.sqlite.jni.capi.sqlite3_value, respectively. +*/ +static int udf_args(JNIEnv *env, + sqlite3_context * const cx, + int argc, sqlite3_value**argv, + jobject * jCx, jobjectArray *jArgv){ + jobjectArray ja = 0; + jobject jcx = new_java_sqlite3_context(env, cx); + jint i; + *jCx = 0; + *jArgv = 0; + if( !jcx ) goto error_oom; + ja = (*env)->NewObjectArray( + env, argc, s3jni_nphop(S3JniNph(sqlite3_value))->klazz, + NULL); + s3jni_oom_check( ja ); + if( !ja ) goto error_oom; + for(i = 0; i < argc; ++i){ + jobject jsv = new_java_sqlite3_value(env, argv[i]); + if( !jsv ) goto error_oom; + (*env)->SetObjectArrayElement(env, ja, i, jsv); + S3JniUnrefLocal(jsv)/*ja has a ref*/; + } + *jCx = jcx; + *jArgv = ja; + return 0; +error_oom: + S3JniUnrefLocal(jcx); + S3JniUnrefLocal(ja); + return SQLITE_NOMEM; +} + +/* +** Requires that jCx and jArgv are sqlite3_context +** resp. array-of-sqlite3_value values initialized by udf_args(). The +** latter will be 0-and-NULL for UDF types with no arguments. This +** function zeroes out the nativePointer member of jCx and each entry +** in jArgv. This is a safety-net precaution to avoid undefined +** behavior if a Java-side UDF holds a reference to its context or one +** of its arguments. This MUST be called from any function which +** successfully calls udf_args(), after calling the corresponding UDF +** and checking its exception status, or which Java-wraps a +** sqlite3_context for use with a UDF(ish) call. It MUST NOT be called +** in any other case. +*/ +static void udf_unargs(JNIEnv *env, jobject jCx, int argc, jobjectArray jArgv){ + int i = 0; + assert(jCx); + NativePointerHolder_set(S3JniNph(sqlite3_context), jCx, 0); + for( ; i < argc; ++i ){ + jobject jsv = (*env)->GetObjectArrayElement(env, jArgv, i); + /* + ** There is a potential Java-triggerable case of Undefined + ** Behavior here, but it would require intentional misuse of the + ** API: + ** + ** If a Java UDF grabs an sqlite3_value from its argv and then + ** assigns that element to null, it becomes unreachable to us so + ** we cannot clear out its pointer. That Java-side object's + ** getNativePointer() will then refer to a stale value, so passing + ** it into (e.g.) sqlite3_value_SOMETHING() would invoke UB. + ** + ** High-level wrappers can avoid that possibility if they do not + ** expose sqlite3_value directly to clients (as is the case in + ** org.sqlite.jni.wrapper1.SqlFunction). + ** + ** One potential (but expensive) workaround for this would be to + ** privately store a duplicate argv array in each sqlite3_context + ** wrapper object, and clear the native pointers from that copy. + */ + assert(jsv && "Someone illegally modified a UDF argument array."); + if( jsv ){ + NativePointerHolder_set(S3JniNph(sqlite3_value), jsv, 0); + } + } +} + + +/* +** Must be called immediately after a Java-side UDF callback throws. +** If translateToErr is true then it sets the exception's message in +** the result error using sqlite3_result_error(). If translateToErr is +** false then it emits a warning that the function threw but should +** not do so. In either case, it clears the exception state. +** +** Returns SQLITE_NOMEM if an allocation fails, else SQLITE_ERROR. In +** the former case it calls sqlite3_result_error_nomem(). +*/ +static int udf_report_exception(JNIEnv * const env, int translateToErr, + sqlite3_context * cx, + const char *zFuncName, const char *zFuncType ){ + jthrowable const ex = (*env)->ExceptionOccurred(env); + int rc = SQLITE_ERROR; + + assert(ex && "This must only be called when a Java exception is pending."); + if( translateToErr ){ + char * zMsg; + char * z; + + S3JniExceptionClear; + zMsg = s3jni_exception_error_msg(env, ex); + z = sqlite3_mprintf("Client-defined SQL function %s.%s() threw: %s", + zFuncName ? zFuncName : "", zFuncType, + zMsg ? zMsg : "Unknown exception" ); + sqlite3_free(zMsg); + if( z ){ + sqlite3_result_error(cx, z, -1); + sqlite3_free(z); + }else{ + sqlite3_result_error_nomem(cx); + rc = SQLITE_NOMEM; + } + }else{ + S3JniExceptionWarnCallbackThrew("client-defined SQL function"); + S3JniExceptionClear; + } + S3JniUnrefLocal(ex); + return rc; +} + +/* +** Sets up the state for calling a Java-side xFunc/xStep/xInverse() +** UDF, calls it, and returns 0 on success. +*/ +static int udf_xFSI(sqlite3_context* const pCx, int argc, + sqlite3_value** const argv, S3JniUdf * const s, + jmethodID xMethodID, const char * const zFuncType){ + S3JniDeclLocal_env; + udf_jargs args = {0,0}; + int rc = udf_args(env, pCx, argc, argv, &args.jcx, &args.jargv); + + if( 0 == rc ){ + (*env)->CallVoidMethod(env, s->jObj, xMethodID, args.jcx, args.jargv); + S3JniIfThrew{ + rc = udf_report_exception(env, 'F'==zFuncType[1]/*xFunc*/, pCx, + s->zFuncName, zFuncType); + } + udf_unargs(env, args.jcx, argc, args.jargv); + } + S3JniUnrefLocal(args.jcx); + S3JniUnrefLocal(args.jargv); + return rc; +} + +/* +** Sets up the state for calling a Java-side xFinal/xValue() UDF, +** calls it, and returns 0 on success. +*/ +static int udf_xFV(sqlite3_context* cx, S3JniUdf * s, + jmethodID xMethodID, + const char *zFuncType){ + S3JniDeclLocal_env; + jobject jcx = new_java_sqlite3_context(env, cx); + int rc = 0; + int const isFinal = 'F'==zFuncType[1]/*xFinal*/; + + if( jcx ){ + (*env)->CallVoidMethod(env, s->jObj, xMethodID, jcx); + S3JniIfThrew{ + rc = udf_report_exception(env, isFinal, cx, s->zFuncName, + zFuncType); + } + udf_unargs(env, jcx, 0, 0); + S3JniUnrefLocal(jcx); + }else{ + if( isFinal ) sqlite3_result_error_nomem(cx); + rc = SQLITE_NOMEM; + } + return rc; +} + +/* Proxy for C-to-Java xFunc. */ +static void udf_xFunc(sqlite3_context* cx, int argc, + sqlite3_value** argv){ + S3JniUdf * const s = (S3JniUdf*)sqlite3_user_data(cx); + s3jni_incr( &SJG.metrics.udf.nFunc ); + udf_xFSI(cx, argc, argv, s, s->jmidxFunc, "xFunc"); +} +/* Proxy for C-to-Java xStep. */ +static void udf_xStep(sqlite3_context* cx, int argc, + sqlite3_value** argv){ + S3JniUdf * const s = (S3JniUdf*)sqlite3_user_data(cx); + s3jni_incr( &SJG.metrics.udf.nStep ); + udf_xFSI(cx, argc, argv, s, s->jmidxStep, "xStep"); +} +/* Proxy for C-to-Java xFinal. */ +static void udf_xFinal(sqlite3_context* cx){ + S3JniUdf * const s = (S3JniUdf*)sqlite3_user_data(cx); + s3jni_incr( &SJG.metrics.udf.nFinal ); + udf_xFV(cx, s, s->jmidxFinal, "xFinal"); +} +/* Proxy for C-to-Java xValue. */ +static void udf_xValue(sqlite3_context* cx){ + S3JniUdf * const s = (S3JniUdf*)sqlite3_user_data(cx); + s3jni_incr( &SJG.metrics.udf.nValue ); + udf_xFV(cx, s, s->jmidxValue, "xValue"); +} +/* Proxy for C-to-Java xInverse. */ +static void udf_xInverse(sqlite3_context* cx, int argc, + sqlite3_value** argv){ + S3JniUdf * const s = (S3JniUdf*)sqlite3_user_data(cx); + s3jni_incr( &SJG.metrics.udf.nInverse ); + udf_xFSI(cx, argc, argv, s, s->jmidxInverse, "xInverse"); +} + + +//////////////////////////////////////////////////////////////////////// +// What follows is the JNI/C bindings. They are in alphabetical order +// except for this macro-generated subset which are kept together +// (alphabetized) here at the front... +//////////////////////////////////////////////////////////////////////// + +/** Create a trivial JNI wrapper for (int CName(void)). */ +#define WRAP_INT_VOID(JniNameSuffix,CName) \ + JniDecl(jint,JniNameSuffix)(JniArgsEnvClass){ \ + return (jint)CName(); \ + } +/** Create a trivial JNI wrapper for (int CName(int)). */ +#define WRAP_INT_INT(JniNameSuffix,CName) \ + JniDecl(jint,JniNameSuffix)(JniArgsEnvClass, jint arg){ \ + return (jint)CName((int)arg); \ + } +/* +** Create a trivial JNI wrapper for (const mutf8_string * +** CName(void)). This is only valid for functions which are known to +** return ASCII or text which is equivalent in UTF-8 and MUTF-8. +*/ +#define WRAP_MUTF8_VOID(JniNameSuffix,CName) \ + JniDecl(jstring,JniNameSuffix)(JniArgsEnvClass){ \ + jstring const rv = (*env)->NewStringUTF( env, CName() ); \ + s3jni_oom_check(rv); \ + return rv; \ + } +/** Create a trivial JNI wrapper for (int CName(sqlite3_stmt*)). */ +#define WRAP_INT_STMT(JniNameSuffix,CName) \ + JniDecl(jint,JniNameSuffix)(JniArgsEnvClass, jlong jpStmt){ \ + return (jint)CName(LongPtrGet_sqlite3_stmt(jpStmt)); \ + } +/** Create a trivial JNI wrapper for (int CName(sqlite3_stmt*,int)). */ +#define WRAP_INT_STMT_INT(JniNameSuffix,CName) \ + JniDecl(jint,JniNameSuffix)(JniArgsEnvClass, jlong jpStmt, jint n){ \ + return (jint)CName(LongPtrGet_sqlite3_stmt(jpStmt), (int)n); \ + } +/** Create a trivial JNI wrapper for (boolean CName(sqlite3_stmt*)). */ +#define WRAP_BOOL_STMT(JniNameSuffix,CName) \ + JniDecl(jboolean,JniNameSuffix)(JniArgsEnvClass, jobject jStmt){ \ + return CName(PtrGet_sqlite3_stmt(jStmt)) ? JNI_TRUE : JNI_FALSE; \ + } +/** Create a trivial JNI wrapper for (jstring CName(sqlite3_stmt*,int)). */ +#define WRAP_STR_STMT_INT(JniNameSuffix,CName) \ + JniDecl(jstring,JniNameSuffix)(JniArgsEnvClass, jlong jpStmt, jint ndx){ \ + return s3jni_utf8_to_jstring( \ + CName(LongPtrGet_sqlite3_stmt(jpStmt), (int)ndx), \ + -1); \ + } +/** Create a trivial JNI wrapper for (boolean CName(sqlite3*)). */ +#define WRAP_BOOL_DB(JniNameSuffix,CName) \ + JniDecl(jboolean,JniNameSuffix)(JniArgsEnvClass, jlong jpDb){ \ + return CName(LongPtrGet_sqlite3(jpDb)) ? JNI_TRUE : JNI_FALSE; \ + } +/** Create a trivial JNI wrapper for (int CName(sqlite3*)). */ +#define WRAP_INT_DB(JniNameSuffix,CName) \ + JniDecl(jint,JniNameSuffix)(JniArgsEnvClass, jlong jpDb){ \ + return (jint)CName(LongPtrGet_sqlite3(jpDb)); \ + } +/** Create a trivial JNI wrapper for (int64 CName(sqlite3*)). */ +#define WRAP_INT64_DB(JniNameSuffix,CName) \ + JniDecl(jlong,JniNameSuffix)(JniArgsEnvClass, jlong jpDb){ \ + return (jlong)CName(LongPtrGet_sqlite3(jpDb)); \ + } +/** Create a trivial JNI wrapper for (jstring CName(sqlite3*,int)). */ +#define WRAP_STR_DB_INT(JniNameSuffix,CName) \ + JniDecl(jstring,JniNameSuffix)(JniArgsEnvClass, jlong jpDb, jint ndx){ \ + return s3jni_utf8_to_jstring( \ + CName(LongPtrGet_sqlite3(jpDb), (int)ndx), \ + -1); \ + } +/** Create a trivial JNI wrapper for (int CName(sqlite3_value*)). */ +#define WRAP_INT_SVALUE(JniNameSuffix,CName,DfltOnNull) \ + JniDecl(jint,JniNameSuffix)(JniArgsEnvClass, jlong jpSValue){ \ + sqlite3_value * const sv = LongPtrGet_sqlite3_value(jpSValue); \ + return (jint)(sv ? CName(sv): DfltOnNull); \ + } +/** Create a trivial JNI wrapper for (boolean CName(sqlite3_value*)). */ +#define WRAP_BOOL_SVALUE(JniNameSuffix,CName,DfltOnNull) \ + JniDecl(jboolean,JniNameSuffix)(JniArgsEnvClass, jlong jpSValue){ \ + sqlite3_value * const sv = LongPtrGet_sqlite3_value(jpSValue); \ + return (jint)(sv ? CName(sv) : DfltOnNull) \ + ? JNI_TRUE : JNI_FALSE; \ + } + +WRAP_INT_DB(1changes, sqlite3_changes) +WRAP_INT64_DB(1changes64, sqlite3_changes64) +WRAP_INT_STMT(1clear_1bindings, sqlite3_clear_bindings) +WRAP_INT_STMT_INT(1column_1bytes, sqlite3_column_bytes) +WRAP_INT_STMT_INT(1column_1bytes16, sqlite3_column_bytes16) +WRAP_INT_STMT(1column_1count, sqlite3_column_count) +WRAP_STR_STMT_INT(1column_1decltype, sqlite3_column_decltype) +WRAP_STR_STMT_INT(1column_1name, sqlite3_column_name) +#ifdef SQLITE_ENABLE_COLUMN_METADATA +WRAP_STR_STMT_INT(1column_1database_1name, sqlite3_column_database_name) +WRAP_STR_STMT_INT(1column_1origin_1name, sqlite3_column_origin_name) +WRAP_STR_STMT_INT(1column_1table_1name, sqlite3_column_table_name) +#endif +WRAP_INT_STMT_INT(1column_1type, sqlite3_column_type) +WRAP_INT_STMT(1data_1count, sqlite3_data_count) +WRAP_STR_DB_INT(1db_1name, sqlite3_db_name) +WRAP_INT_DB(1error_1offset, sqlite3_error_offset) +WRAP_INT_DB(1extended_1errcode, sqlite3_extended_errcode) +WRAP_BOOL_DB(1get_1autocommit, sqlite3_get_autocommit) +WRAP_MUTF8_VOID(1libversion, sqlite3_libversion) +WRAP_INT_VOID(1libversion_1number, sqlite3_libversion_number) +WRAP_INT_VOID(1keyword_1count, sqlite3_keyword_count) +#ifdef SQLITE_ENABLE_PREUPDATE_HOOK +WRAP_INT_DB(1preupdate_1blobwrite, sqlite3_preupdate_blobwrite) +WRAP_INT_DB(1preupdate_1count, sqlite3_preupdate_count) +WRAP_INT_DB(1preupdate_1depth, sqlite3_preupdate_depth) +#endif +WRAP_INT_INT(1release_1memory, sqlite3_release_memory) +WRAP_INT_INT(1sleep, sqlite3_sleep) +WRAP_MUTF8_VOID(1sourceid, sqlite3_sourceid) +WRAP_BOOL_STMT(1stmt_1busy, sqlite3_stmt_busy) +WRAP_INT_STMT_INT(1stmt_1explain, sqlite3_stmt_explain) +WRAP_INT_STMT(1stmt_1isexplain, sqlite3_stmt_isexplain) +WRAP_BOOL_STMT(1stmt_1readonly, sqlite3_stmt_readonly) +WRAP_INT_DB(1system_1errno, sqlite3_system_errno) +WRAP_INT_VOID(1threadsafe, sqlite3_threadsafe) +WRAP_INT_DB(1total_1changes, sqlite3_total_changes) +WRAP_INT64_DB(1total_1changes64, sqlite3_total_changes64) +WRAP_INT_SVALUE(1value_1encoding, sqlite3_value_encoding,SQLITE_UTF8) +WRAP_BOOL_SVALUE(1value_1frombind, sqlite3_value_frombind,0) +WRAP_INT_SVALUE(1value_1nochange, sqlite3_value_nochange,0) +WRAP_INT_SVALUE(1value_1numeric_1type, sqlite3_value_numeric_type,SQLITE_NULL) +WRAP_INT_SVALUE(1value_1subtype, sqlite3_value_subtype,0) +WRAP_INT_SVALUE(1value_1type, sqlite3_value_type,SQLITE_NULL) + +#undef WRAP_BOOL_DB +#undef WRAP_BOOL_STMT +#undef WRAP_BOOL_SVALUE +#undef WRAP_INT64_DB +#undef WRAP_INT_DB +#undef WRAP_INT_INT +#undef WRAP_INT_STMT +#undef WRAP_INT_STMT_INT +#undef WRAP_INT_SVALUE +#undef WRAP_INT_VOID +#undef WRAP_MUTF8_VOID +#undef WRAP_STR_STMT_INT +#undef WRAP_STR_DB_INT + +S3JniApi(sqlite3_aggregate_context(),jlong,1aggregate_1context)( + JniArgsEnvClass, jobject jCx, jboolean initialize +){ + sqlite3_context * const pCx = PtrGet_sqlite3_context(jCx); + void * const p = pCx + ? sqlite3_aggregate_context(pCx, (int)(initialize + ? (int)sizeof(void*) + : 0)) + : 0; + return S3JniCast_P2L(p); +} + +/* +** Central auto-extension runner for auto-extensions created in Java. +*/ +static int s3jni_run_java_auto_extensions(sqlite3 *pDb, const char **pzErr, + const struct sqlite3_api_routines *ignored){ + int rc = 0; + unsigned i, go = 1; + JNIEnv * env = 0; + S3JniDb * ps; + S3JniEnv * jc; + + if( 0==SJG.autoExt.nExt ) return 0; + env = s3jni_env(); + jc = S3JniEnv_get(); + S3JniDb_mutex_enter; + ps = jc->pdbOpening ? jc->pdbOpening : S3JniDb_from_c(pDb); + if( !ps ){ + *pzErr = sqlite3_mprintf("Unexpected arrival of null S3JniDb in " + "auto-extension runner."); + S3JniDb_mutex_leave; + return SQLITE_ERROR; + } + assert( ps->jDb ); + if( !ps->pDb ){ + assert( jc->pdbOpening == ps ); + rc = sqlite3_set_clientdata(pDb, S3JniDb_clientdata_key, + ps, 0/* we'll re-set this after open() + completes. */); + if( rc ){ + S3JniDb_mutex_leave; + return rc; + } + } + else{ + assert( ps == jc->pdbOpening ); + jc->pdbOpening = 0; + } + S3JniDb_mutex_leave; + NativePointerHolder_set(S3JniNph(sqlite3), ps->jDb, pDb) + /* As of here, the Java/C connection is complete except for the + (temporary) lack of finalizer for the ps object. */; + ps->pDb = pDb; + for( i = 0; go && 0==rc; ++i ){ + S3JniAutoExtension ax = S3JniHook_empty + /* We need a copy of the auto-extension object, with our own + ** local reference to it, to avoid a race condition with another + ** thread manipulating the list during the call and invaliding + ** what ax references. */; + S3JniAutoExt_mutex_enter; + if( i >= SJG.autoExt.nExt ){ + go = 0; + }else{ + S3JniHook_localdup(&SJG.autoExt.aExt[i], &ax); + } + S3JniAutoExt_mutex_leave; + if( ax.jObj ){ + rc = (*env)->CallIntMethod(env, ax.jObj, ax.midCallback, ps->jDb); + S3JniHook_localundup(ax); + S3JniIfThrew { + jthrowable const ex = (*env)->ExceptionOccurred(env); + char * zMsg; + S3JniExceptionClear; + zMsg = s3jni_exception_error_msg(env, ex); + S3JniUnrefLocal(ex); + *pzErr = sqlite3_mprintf("auto-extension threw: %s", zMsg); + sqlite3_free(zMsg); + rc = SQLITE_ERROR; + } + } + } + return rc; +} + +S3JniApi(sqlite3_auto_extension(),jint,1auto_1extension)( + JniArgsEnvClass, jobject jAutoExt +){ + int i; + S3JniAutoExtension * ax = 0; + int rc = 0; + + if( !jAutoExt ) return SQLITE_MISUSE; + S3JniAutoExt_mutex_enter; + for( i = 0; i < SJG.autoExt.nExt; ++i ){ + /* Look for a match. */ + ax = &SJG.autoExt.aExt[i]; + if( ax->jObj && (*env)->IsSameObject(env, ax->jObj, jAutoExt) ){ + /* same object, so this is a no-op. */ + S3JniAutoExt_mutex_leave; + return 0; + } + } + if( i == SJG.autoExt.nExt ){ + assert( SJG.autoExt.nExt <= SJG.autoExt.nAlloc ); + if( SJG.autoExt.nExt == SJG.autoExt.nAlloc ){ + /* Allocate another slot. */ + unsigned n = 1 + SJG.autoExt.nAlloc; + S3JniAutoExtension * const aNew = + s3jni_realloc( SJG.autoExt.aExt, n * sizeof(*ax) ); + if( !aNew ){ + rc = SQLITE_NOMEM; + }else{ + SJG.autoExt.aExt = aNew; + ++SJG.autoExt.nAlloc; + } + } + if( 0==rc ){ + ax = &SJG.autoExt.aExt[SJG.autoExt.nExt]; + rc = S3JniAutoExtension_init(env, ax, jAutoExt); + assert( rc ? (0==ax->jObj && 0==ax->midCallback) + : (0!=ax->jObj && 0!=ax->midCallback) ); + } + } + if( 0==rc ){ + static int once = 0; + if( 0==once && ++once ){ + rc = sqlite3_auto_extension( + (void(*)(void))s3jni_run_java_auto_extensions + /* Reminder: the JNI binding of sqlite3_reset_auto_extension() + ** does not call the core-lib impl. It only clears Java-side + ** auto-extensions. */ + ); + if( rc ){ + assert( ax ); + S3JniAutoExtension_clear(ax); + } + } + if( 0==rc ){ + ++SJG.autoExt.nExt; + } + } + S3JniAutoExt_mutex_leave; + return rc; +} + +S3JniApi(sqlite3_backup_finish(),jint,1backup_1finish)( + JniArgsEnvClass, jlong jpBack +){ + int rc = 0; + if( jpBack!=0 ){ + rc = sqlite3_backup_finish( LongPtrGet_sqlite3_backup(jpBack) ); + } + return rc; +} + +S3JniApi(sqlite3_backup_init(),jobject,1backup_1init)( + JniArgsEnvClass, jlong jpDbDest, jstring jTDest, + jlong jpDbSrc, jstring jTSrc +){ + sqlite3 * const pDest = LongPtrGet_sqlite3(jpDbDest); + sqlite3 * const pSrc = LongPtrGet_sqlite3(jpDbSrc); + char * const zDest = s3jni_jstring_to_utf8(jTDest, 0); + char * const zSrc = s3jni_jstring_to_utf8(jTSrc, 0); + jobject rv = 0; + + if( pDest && pSrc && zDest && zSrc ){ + sqlite3_backup * const pB = + sqlite3_backup_init(pDest, zDest, pSrc, zSrc); + if( pB ){ + rv = new_java_sqlite3_backup(env, pB); + if( !rv ){ + sqlite3_backup_finish( pB ); + } + } + } + sqlite3_free(zDest); + sqlite3_free(zSrc); + return rv; +} + +S3JniApi(sqlite3_backup_pagecount(),jint,1backup_1pagecount)( + JniArgsEnvClass, jlong jpBack +){ + return sqlite3_backup_pagecount(LongPtrGet_sqlite3_backup(jpBack)); +} + +S3JniApi(sqlite3_backup_remaining(),jint,1backup_1remaining)( + JniArgsEnvClass, jlong jpBack +){ + return sqlite3_backup_remaining(LongPtrGet_sqlite3_backup(jpBack)); +} + +S3JniApi(sqlite3_backup_step(),jint,1backup_1step)( + JniArgsEnvClass, jlong jpBack, jint nPage +){ + return sqlite3_backup_step(LongPtrGet_sqlite3_backup(jpBack), (int)nPage); +} + +S3JniApi(sqlite3_bind_blob(),jint,1bind_1blob)( + JniArgsEnvClass, jlong jpStmt, jint ndx, jbyteArray baData, jint nMax +){ + jsize nBA = 0; + jbyte * const pBuf = baData ? s3jni_jbyteArray_bytes2(baData, &nBA) : 0; + int rc; + if( pBuf ){ + if( nMax>nBA ){ + nMax = nBA; + } + rc = sqlite3_bind_blob(LongPtrGet_sqlite3_stmt(jpStmt), (int)ndx, + pBuf, (int)nMax, SQLITE_TRANSIENT); + s3jni_jbyteArray_release(baData, pBuf); + }else{ + rc = baData + ? SQLITE_NOMEM + : sqlite3_bind_null( LongPtrGet_sqlite3_stmt(jpStmt), ndx ); + } + return (jint)rc; +} + +/** + Helper for use with s3jni_setup_nio_args(). +*/ +struct S3JniNioArgs { + jobject jBuf; /* input - ByteBuffer */ + jint iOffset; /* input - byte offset */ + jint iHowMany; /* input - byte count to bind/read/write */ + jint nBuf; /* output - jBuf's buffer size */ + void * p; /* output - jBuf's buffer memory */ + void * pStart; /* output - offset of p to bind/read/write */ + int nOut; /* output - number of bytes from pStart to bind/read/write */ +}; +typedef struct S3JniNioArgs S3JniNioArgs; +static const S3JniNioArgs S3JniNioArgs_empty = { + 0,0,0,0,0,0,0 +}; + +/* +** Internal helper for sqlite3_bind_nio_buffer(), +** sqlite3_result_nio_buffer(), and similar methods which take a +** ByteBuffer object as either input or output. Populates pArgs and +** returns 0 on success, non-0 if the operation should fail. The +** caller is required to check for SJG.g.byteBuffer.klazz!=0 before calling +** this and reporting it in a way appropriate for that routine. This +** function may assert() that SJG.g.byteBuffer.klazz is not 0. +** +** The (jBuffer, iOffset, iHowMany) arguments are the (ByteBuffer, offset, +** length) arguments to the bind/result method. +** +** If iHowMany is negative then it's treated as "until the end" and +** the calculated slice is trimmed to fit if needed. If iHowMany is +** positive and extends past the end of jBuffer then SQLITE_ERROR is +** returned. +** +** Returns 0 if everything looks to be in order, else some SQLITE_... +** result code +*/ +static int s3jni_setup_nio_args( + JNIEnv *env, S3JniNioArgs * pArgs, + jobject jBuffer, jint iOffset, jint iHowMany +){ + jlong iEnd = 0; + const int bAllowTruncate = iHowMany<0; + *pArgs = S3JniNioArgs_empty; + pArgs->jBuf = jBuffer; + pArgs->iOffset = iOffset; + pArgs->iHowMany = iHowMany; + assert( SJG.g.byteBuffer.klazz ); + if( pArgs->iOffset<0 ){ + return SQLITE_ERROR + /* SQLITE_MISUSE or SQLITE_RANGE would fit better but we use + SQLITE_ERROR for consistency with the code documented for a + negative target blob offset in sqlite3_blob_read/write(). */; + } + s3jni_get_nio_buffer(pArgs->jBuf, &pArgs->p, &pArgs->nBuf); + if( !pArgs->p ){ + return SQLITE_MISUSE; + }else if( pArgs->iOffset>=pArgs->nBuf ){ + pArgs->pStart = 0; + pArgs->nOut = 0; + return 0; + } + assert( pArgs->nBuf > 0 ); + assert( pArgs->iOffset < pArgs->nBuf ); + iEnd = pArgs->iHowMany<0 + ? pArgs->nBuf - pArgs->iOffset + : pArgs->iOffset + pArgs->iHowMany; + if( iEnd>(jlong)pArgs->nBuf ){ + if( bAllowTruncate ){ + iEnd = pArgs->nBuf - pArgs->iOffset; + }else{ + return SQLITE_ERROR + /* again: for consistency with blob_read/write(), though + SQLITE_MISUSE or SQLITE_RANGE would be a better fit. */; + } + } + if( iEnd - pArgs->iOffset > (jlong)SQLITE_MAX_LENGTH ){ + return SQLITE_TOOBIG; + } + assert( pArgs->iOffset >= 0 ); + assert( iEnd > pArgs->iOffset ); + pArgs->pStart = pArgs->p + pArgs->iOffset; + pArgs->nOut = (int)(iEnd - pArgs->iOffset); + assert( pArgs->nOut > 0 ); + assert( (pArgs->pStart + pArgs->nOut) <= (pArgs->p + pArgs->nBuf) ); + return 0; +} + +S3JniApi(sqlite3_bind_nio_buffer(),jint,1bind_1nio_1buffer)( + JniArgsEnvClass, jobject jpStmt, jint ndx, jobject jBuffer, + jint iOffset, jint iN +){ + sqlite3_stmt * pStmt = PtrGet_sqlite3_stmt(jpStmt); + S3JniNioArgs args; + int rc; + if( !pStmt || !SJG.g.byteBuffer.klazz ) return SQLITE_MISUSE; + rc = s3jni_setup_nio_args(env, &args, jBuffer, iOffset, iN); + if(rc){ + return rc; + }else if( !args.pStart || !args.nOut ){ + return sqlite3_bind_null(pStmt, ndx); + } + return sqlite3_bind_blob( pStmt, (int)ndx, args.pStart, + args.nOut, SQLITE_TRANSIENT ); +} + +S3JniApi(sqlite3_bind_double(),jint,1bind_1double)( + JniArgsEnvClass, jlong jpStmt, jint ndx, jdouble val +){ + return (jint)sqlite3_bind_double(LongPtrGet_sqlite3_stmt(jpStmt), + (int)ndx, (double)val); +} + +S3JniApi(sqlite3_bind_int(),jint,1bind_1int)( + JniArgsEnvClass, jlong jpStmt, jint ndx, jint val +){ + return (jint)sqlite3_bind_int(LongPtrGet_sqlite3_stmt(jpStmt), (int)ndx, (int)val); +} + +S3JniApi(sqlite3_bind_int64(),jint,1bind_1int64)( + JniArgsEnvClass, jlong jpStmt, jint ndx, jlong val +){ + return (jint)sqlite3_bind_int64(LongPtrGet_sqlite3_stmt(jpStmt), (int)ndx, (sqlite3_int64)val); +} + +/* +** Bind a new global ref to Object `val` using sqlite3_bind_pointer(). +*/ +S3JniApi(sqlite3_bind_java_object(),jint,1bind_1java_1object)( + JniArgsEnvClass, jlong jpStmt, jint ndx, jobject val +){ + sqlite3_stmt * const pStmt = LongPtrGet_sqlite3_stmt(jpStmt); + int rc = SQLITE_MISUSE; + + if(pStmt){ + jobject const rv = S3JniRefGlobal(val); + if( rv ){ + rc = sqlite3_bind_pointer(pStmt, ndx, rv, s3jni__value_jref_key, + S3Jni_jobject_finalizer); + }else if(val){ + rc = SQLITE_NOMEM; + }else{ + rc = sqlite3_bind_null(pStmt, ndx); + } + } + return rc; +} + +S3JniApi(sqlite3_bind_null(),jint,1bind_1null)( + JniArgsEnvClass, jlong jpStmt, jint ndx +){ + return (jint)sqlite3_bind_null(LongPtrGet_sqlite3_stmt(jpStmt), (int)ndx); +} + +S3JniApi(sqlite3_bind_parameter_count(),jint,1bind_1parameter_1count)( + JniArgsEnvClass, jlong jpStmt +){ + return (jint)sqlite3_bind_parameter_count(LongPtrGet_sqlite3_stmt(jpStmt)); +} + +S3JniApi(sqlite3_bind_parameter_index(),jint,1bind_1parameter_1index)( + JniArgsEnvClass, jlong jpStmt, jbyteArray jName +){ + int rc = 0; + jbyte * const pBuf = s3jni_jbyteArray_bytes(jName); + if( pBuf ){ + rc = sqlite3_bind_parameter_index(LongPtrGet_sqlite3_stmt(jpStmt), + (const char *)pBuf); + s3jni_jbyteArray_release(jName, pBuf); + } + return rc; +} + +S3JniApi(sqlite3_bind_parameter_name(),jstring,1bind_1parameter_1name)( + JniArgsEnvClass, jlong jpStmt, jint ndx +){ + const char *z = + sqlite3_bind_parameter_name(LongPtrGet_sqlite3_stmt(jpStmt), (int)ndx); + return z ? s3jni_utf8_to_jstring(z, -1) : 0; +} + +/* +** Impl of sqlite3_bind_text/text16(). +*/ +static int s3jni__bind_text(int is16, JNIEnv *env, jlong jpStmt, jint ndx, + jbyteArray baData, jint nMax){ + jsize nBA = 0; + jbyte * const pBuf = + baData ? s3jni_jbyteArray_bytes2(baData, &nBA) : 0; + int rc; + if( pBuf ){ + if( nMax>nBA ){ + nMax = nBA; + } + /* Note that we rely on the Java layer having assured that baData + is NUL-terminated if nMax is negative. In order to avoid UB for + such cases, we do not expose the byte-limit arguments in the + public API. */ + rc = is16 + ? sqlite3_bind_text16(LongPtrGet_sqlite3_stmt(jpStmt), (int)ndx, + pBuf, (int)nMax, SQLITE_TRANSIENT) + : sqlite3_bind_text(LongPtrGet_sqlite3_stmt(jpStmt), (int)ndx, + (const char *)pBuf, + (int)nMax, SQLITE_TRANSIENT); + }else{ + rc = baData + ? sqlite3_bind_null(LongPtrGet_sqlite3_stmt(jpStmt), (int)ndx) + : SQLITE_NOMEM; + } + s3jni_jbyteArray_release(baData, pBuf); + return (jint)rc; + +} + +S3JniApi(sqlite3_bind_text(),jint,1bind_1text)( + JniArgsEnvClass, jlong jpStmt, jint ndx, jbyteArray baData, jint nMax +){ + return s3jni__bind_text(0, env, jpStmt, ndx, baData, nMax); +} + +S3JniApi(sqlite3_bind_text16(),jint,1bind_1text16)( + JniArgsEnvClass, jlong jpStmt, jint ndx, jbyteArray baData, jint nMax +){ + return s3jni__bind_text(1, env, jpStmt, ndx, baData, nMax); +} + +S3JniApi(sqlite3_bind_value(),jint,1bind_1value)( + JniArgsEnvClass, jlong jpStmt, jint ndx, jlong jpValue +){ + int rc = 0; + sqlite3_stmt * pStmt = LongPtrGet_sqlite3_stmt(jpStmt); + if( pStmt ){ + sqlite3_value *v = LongPtrGet_sqlite3_value(jpValue); + if( v ){ + rc = sqlite3_bind_value(pStmt, (int)ndx, v); + }else{ + rc = sqlite3_bind_null(pStmt, (int)ndx); + } + }else{ + rc = SQLITE_MISUSE; + } + return (jint)rc; +} + +S3JniApi(sqlite3_bind_zeroblob(),jint,1bind_1zeroblob)( + JniArgsEnvClass, jlong jpStmt, jint ndx, jint n +){ + return (jint)sqlite3_bind_zeroblob(LongPtrGet_sqlite3_stmt(jpStmt), + (int)ndx, (int)n); +} + +S3JniApi(sqlite3_bind_zeroblob64(),jint,1bind_1zeroblob64)( + JniArgsEnvClass, jlong jpStmt, jint ndx, jlong n +){ + return (jint)sqlite3_bind_zeroblob64(LongPtrGet_sqlite3_stmt(jpStmt), + (int)ndx, (sqlite3_uint64)n); +} + +S3JniApi(sqlite3_blob_bytes(),jint,1blob_1bytes)( + JniArgsEnvClass, jlong jpBlob +){ + return sqlite3_blob_bytes(LongPtrGet_sqlite3_blob(jpBlob)); +} + +S3JniApi(sqlite3_blob_close(),jint,1blob_1close)( + JniArgsEnvClass, jlong jpBlob +){ + sqlite3_blob * const b = LongPtrGet_sqlite3_blob(jpBlob); + return b ? (jint)sqlite3_blob_close(b) : SQLITE_MISUSE; +} + +S3JniApi(sqlite3_blob_open(),jint,1blob_1open)( + JniArgsEnvClass, jlong jpDb, jstring jDbName, jstring jTbl, jstring jCol, + jlong jRowId, jint flags, jobject jOut +){ + sqlite3 * const db = LongPtrGet_sqlite3(jpDb); + sqlite3_blob * pBlob = 0; + char * zDbName = 0, * zTableName = 0, * zColumnName = 0; + int rc; + + if( !db || !jDbName || !jTbl || !jCol ) return SQLITE_MISUSE; + zDbName = s3jni_jstring_to_utf8(jDbName,0); + zTableName = zDbName ? s3jni_jstring_to_utf8(jTbl,0) : 0; + zColumnName = zTableName ? s3jni_jstring_to_utf8(jCol,0) : 0; + rc = zColumnName + ? sqlite3_blob_open(db, zDbName, zTableName, zColumnName, + (sqlite3_int64)jRowId, (int)flags, &pBlob) + : SQLITE_NOMEM; + if( 0==rc ){ + jobject rv = new_java_sqlite3_blob(env, pBlob); + if( !rv ){ + sqlite3_blob_close(pBlob); + rc = SQLITE_NOMEM; + } + OutputPointer_set_obj(env, S3JniNph(OutputPointer_sqlite3_blob), jOut, rv); + } + sqlite3_free(zDbName); + sqlite3_free(zTableName); + sqlite3_free(zColumnName); + return rc; +} + +S3JniApi(sqlite3_blob_read(),jint,1blob_1read)( + JniArgsEnvClass, jlong jpBlob, jbyteArray jTgt, jint iOffset +){ + jbyte * const pBa = s3jni_jbyteArray_bytes(jTgt); + int rc = jTgt ? (pBa ? SQLITE_MISUSE : SQLITE_NOMEM) : SQLITE_MISUSE; + if( pBa ){ + jsize const nTgt = (*env)->GetArrayLength(env, jTgt); + rc = sqlite3_blob_read(LongPtrGet_sqlite3_blob(jpBlob), pBa, + (int)nTgt, (int)iOffset); + if( 0==rc ){ + s3jni_jbyteArray_commit(jTgt, pBa); + }else{ + s3jni_jbyteArray_release(jTgt, pBa); + } + } + return rc; +} + +S3JniApi(sqlite3_blob_read_nio_buffer(),jint,1blob_1read_1nio_1buffer)( + JniArgsEnvClass, jlong jpBlob, jint iSrcOff, jobject jBB, jint iTgtOff, jint iHowMany +){ + sqlite3_blob * const b = LongPtrGet_sqlite3_blob(jpBlob); + S3JniNioArgs args; + int rc; + if( !b || !SJG.g.byteBuffer.klazz || iHowMany<0 ){ + return SQLITE_MISUSE; + }else if( iTgtOff<0 || iSrcOff<0 ){ + return SQLITE_ERROR + /* for consistency with underlying sqlite3_blob_read() */; + }else if( 0==iHowMany ){ + return 0; + } + rc = s3jni_setup_nio_args(env, &args, jBB, iTgtOff, iHowMany); + if(rc){ + return rc; + }else if( !args.pStart || !args.nOut ){ + return 0; + } + assert( args.iHowMany>0 ); + return sqlite3_blob_read( b, args.pStart, (int)args.nOut, (int)iSrcOff ); +} + +S3JniApi(sqlite3_blob_reopen(),jint,1blob_1reopen)( + JniArgsEnvClass, jlong jpBlob, jlong iNewRowId +){ + return (jint)sqlite3_blob_reopen(LongPtrGet_sqlite3_blob(jpBlob), + (sqlite3_int64)iNewRowId); +} + +S3JniApi(sqlite3_blob_write(),jint,1blob_1write)( + JniArgsEnvClass, jlong jpBlob, jbyteArray jBa, jint iOffset +){ + sqlite3_blob * const b = LongPtrGet_sqlite3_blob(jpBlob); + jbyte * const pBuf = b ? s3jni_jbyteArray_bytes(jBa) : 0; + const jsize nBA = pBuf ? (*env)->GetArrayLength(env, jBa) : 0; + int rc = SQLITE_MISUSE; + if(b && pBuf){ + rc = sqlite3_blob_write( b, pBuf, (int)nBA, (int)iOffset ); + } + s3jni_jbyteArray_release(jBa, pBuf); + return (jint)rc; +} + +S3JniApi(sqlite3_blob_write_nio_buffer(),jint,1blob_1write_1nio_1buffer)( + JniArgsEnvClass, jlong jpBlob, jint iTgtOff, jobject jBB, jint iSrcOff, jint iHowMany +){ + sqlite3_blob * const b = LongPtrGet_sqlite3_blob(jpBlob); + S3JniNioArgs args; + int rc; + if( !b || !SJG.g.byteBuffer.klazz ){ + return SQLITE_MISUSE; + }else if( iTgtOff<0 || iSrcOff<0 ){ + return SQLITE_ERROR + /* for consistency with underlying sqlite3_blob_write() */; + }else if( 0==iHowMany ){ + return 0; + } + rc = s3jni_setup_nio_args(env, &args, jBB, iSrcOff, iHowMany); + if(rc){ + return rc; + }else if( !args.pStart || !args.nOut ){ + return 0; + } + return sqlite3_blob_write( b, args.pStart, (int)args.nOut, (int)iTgtOff ); +} + +/* Central C-to-Java busy handler proxy. */ +static int s3jni_busy_handler(void* pState, int n){ + S3JniDb * const ps = (S3JniDb *)pState; + int rc = 0; + S3JniDeclLocal_env; + S3JniHook hook; + + S3JniHook_localdup(&ps->hooks.busyHandler, &hook); + if( hook.jObj ){ + rc = (*env)->CallIntMethod(env, hook.jObj, + hook.midCallback, (jint)n); + S3JniIfThrew{ + S3JniExceptionWarnCallbackThrew("sqlite3_busy_handler() callback"); + rc = s3jni_db_exception(ps->pDb, SQLITE_ERROR, + "sqlite3_busy_handler() callback threw."); + } + S3JniHook_localundup(hook); + } + return rc; +} + +S3JniApi(sqlite3_busy_handler(),jint,1busy_1handler)( + JniArgsEnvClass, jlong jpDb, jobject jBusy +){ + S3JniDb * const ps = S3JniDb_from_jlong(jpDb); + S3JniHook * const pHook = ps ? &ps->hooks.busyHandler : 0; + S3JniHook hook = S3JniHook_empty; + int rc = 0; + + if( !ps ) return (jint)SQLITE_MISUSE; + S3JniDb_mutex_enter; + if( jBusy ){ + if( pHook->jObj && (*env)->IsSameObject(env, pHook->jObj, jBusy) ){ + /* Same object - this is a no-op. */ + }else{ + jclass const klazz = (*env)->GetObjectClass(env, jBusy); + hook.jObj = S3JniRefGlobal(jBusy); + hook.midCallback = (*env)->GetMethodID(env, klazz, "call", "(I)I"); + S3JniUnrefLocal(klazz); + S3JniIfThrew { + rc = SQLITE_ERROR; + } + } + } + if( 0==rc ){ + if( jBusy ){ + if( hook.jObj ){ /* Replace handler */ + rc = sqlite3_busy_handler(ps->pDb, s3jni_busy_handler, ps); + if( 0==rc ){ + S3JniHook_unref(pHook); + *pHook = hook /* transfer Java ref ownership */; + hook = S3JniHook_empty; + } + }/* else no-op */ + }else{ /* Clear handler */ + rc = sqlite3_busy_handler(ps->pDb, 0, 0); + if( 0==rc ){ + S3JniHook_unref(pHook); + } + } + } + S3JniHook_unref(&hook); + S3JniDb_mutex_leave; + return rc; +} + +S3JniApi(sqlite3_busy_timeout(),jint,1busy_1timeout)( + JniArgsEnvClass, jlong jpDb, jint ms +){ + S3JniDb * const ps = S3JniDb_from_jlong(jpDb); + int rc = SQLITE_MISUSE; + if( ps ){ + S3JniDb_mutex_enter; + S3JniHook_unref(&ps->hooks.busyHandler); + rc = sqlite3_busy_timeout(ps->pDb, (int)ms); + S3JniDb_mutex_leave; + } + return rc; +} + +S3JniApi(sqlite3_cancel_auto_extension(),jboolean,1cancel_1auto_1extension)( + JniArgsEnvClass, jobject jAutoExt +){ + S3JniAutoExtension * ax; + jboolean rc = JNI_FALSE; + int i; + + if( !jAutoExt ){ + return rc; + } + S3JniAutoExt_mutex_enter; + /* This algo corresponds to the one in the core. */ + for( i = SJG.autoExt.nExt-1; i >= 0; --i ){ + ax = &SJG.autoExt.aExt[i]; + if( ax->jObj && (*env)->IsSameObject(env, ax->jObj, jAutoExt) ){ + S3JniAutoExtension_clear(ax); + /* Move final entry into this slot. */ + --SJG.autoExt.nExt; + *ax = SJG.autoExt.aExt[SJG.autoExt.nExt]; + SJG.autoExt.aExt[SJG.autoExt.nExt] = S3JniHook_empty; + assert( !SJG.autoExt.aExt[SJG.autoExt.nExt].jObj ); + rc = JNI_TRUE; + break; + } + } + S3JniAutoExt_mutex_leave; + return rc; +} + +/* Wrapper for sqlite3_close(_v2)(). */ +static jint s3jni_close_db(JNIEnv * const env, jlong jpDb, int version){ + int rc = 0; + S3JniDb * const ps = S3JniDb_from_jlong(jpDb); + + assert(version == 1 || version == 2); + if( ps ){ + rc = 1==version + ? (jint)sqlite3_close(ps->pDb) + : (jint)sqlite3_close_v2(ps->pDb); + } + return (jint)rc; +} + +S3JniApi(sqlite3_close(),jint,1close)(JniArgsEnvClass, jlong pDb){ + return s3jni_close_db(env, pDb, 1); +} + +S3JniApi(sqlite3_close_v2(),jint,1close_1v2)(JniArgsEnvClass, jlong pDb){ + return s3jni_close_db(env, pDb, 2); +} + +/* +** Assumes z is an array of unsigned short and returns the index in +** that array of the first element with the value 0. +*/ +static unsigned int s3jni_utf16_strlen(void const * z){ + unsigned int i = 0; + const unsigned short * p = z; + while( p[i] ) ++i; + return i; +} + +/* Descriptive alias for use with sqlite3_collation_needed(). */ +typedef S3JniHook S3JniCollationNeeded; + +/* Central C-to-Java sqlite3_collation_needed16() hook impl. */ +static void s3jni_collation_needed_impl16(void *pState, sqlite3 *pDb, + int eTextRep, const void * z16Name){ + S3JniCollationNeeded * const pHook = pState; + S3JniDeclLocal_env; + S3JniHook hook; + + S3JniHook_localdup(pHook, &hook); + if( hook.jObj ){ + unsigned int const nName = s3jni_utf16_strlen(z16Name); + jstring jName = (*env)->NewString(env, (jchar const *)z16Name, nName); + + s3jni_oom_check( jName ); + assert( hook.jExtra ); + S3JniIfThrew{ + S3JniExceptionClear; + }else if( hook.jExtra ){ + (*env)->CallVoidMethod(env, hook.jObj, hook.midCallback, + hook.jExtra, (jint)eTextRep, jName); + S3JniIfThrew{ + S3JniExceptionWarnCallbackThrew("sqlite3_collation_needed() callback"); + } + } + S3JniUnrefLocal(jName); + S3JniHook_localundup(hook); + } +} + +S3JniApi(sqlite3_collation_needed(),jint,1collation_1needed)( + JniArgsEnvClass, jlong jpDb, jobject jHook +){ + S3JniDb * ps; + S3JniCollationNeeded * pHook; + int rc = 0; + + S3JniDb_mutex_enter; + ps = S3JniDb_from_jlong(jpDb); + if( !ps ){ + S3JniDb_mutex_leave; + return SQLITE_MISUSE; + } + pHook = &ps->hooks.collationNeeded; + if( pHook->jObj && jHook && + (*env)->IsSameObject(env, pHook->jObj, jHook) ){ + /* no-op */ + }else if( !jHook ){ + rc = sqlite3_collation_needed(ps->pDb, 0, 0); + if( 0==rc ){ + S3JniHook_unref(pHook); + } + }else{ + jclass const klazz = (*env)->GetObjectClass(env, jHook); + jmethodID const xCallback = (*env)->GetMethodID( + env, klazz, "call", "(Lorg/sqlite/jni/capi/sqlite3;ILjava/lang/String;)V" + ); + S3JniUnrefLocal(klazz); + S3JniIfThrew { + rc = s3jni_db_exception(ps->pDb, SQLITE_MISUSE, + "Cannot not find matching call() in " + "CollationNeededCallback object."); + }else{ + rc = sqlite3_collation_needed16(ps->pDb, pHook, + s3jni_collation_needed_impl16); + if( 0==rc ){ + S3JniHook_unref(pHook); + pHook->midCallback = xCallback; + pHook->jObj = S3JniRefGlobal(jHook); + pHook->jExtra = S3JniRefGlobal(ps->jDb); + } + } + } + S3JniDb_mutex_leave; + return rc; +} + +S3JniApi(sqlite3_column_blob(),jbyteArray,1column_1blob)( + JniArgsEnvClass, jobject jpStmt, jint ndx +){ + sqlite3_stmt * const pStmt = PtrGet_sqlite3_stmt(jpStmt); + void const * const p = sqlite3_column_blob(pStmt, (int)ndx); + int const n = p ? sqlite3_column_bytes(pStmt, (int)ndx) : 0; + + return p ? s3jni_new_jbyteArray(p, n) : 0; +} + +S3JniApi(sqlite3_column_double(),jdouble,1column_1double)( + JniArgsEnvClass, jobject jpStmt, jint ndx +){ + return (jdouble)sqlite3_column_double(PtrGet_sqlite3_stmt(jpStmt), (int)ndx); +} + +S3JniApi(sqlite3_column_int(),jint,1column_1int)( + JniArgsEnvClass, jobject jpStmt, jint ndx +){ + return (jint)sqlite3_column_int(PtrGet_sqlite3_stmt(jpStmt), (int)ndx); +} + +S3JniApi(sqlite3_column_int64(),jlong,1column_1int64)( + JniArgsEnvClass, jobject jpStmt, jint ndx +){ + return (jlong)sqlite3_column_int64(PtrGet_sqlite3_stmt(jpStmt), (int)ndx); +} + +S3JniApi(sqlite3_column_java_object(),jobject,1column_1java_1object)( + JniArgsEnvClass, jlong jpStmt, jint ndx +){ + sqlite3_stmt * const stmt = LongPtrGet_sqlite3_stmt(jpStmt); + jobject rv = 0; + if( stmt ){ + sqlite3 * const db = sqlite3_db_handle(stmt); + sqlite3_value * sv; + sqlite3_mutex_enter(sqlite3_db_mutex(db)); + sv = sqlite3_column_value(stmt, (int)ndx); + if( sv ){ + rv = S3JniRefLocal( + sqlite3_value_pointer(sv, s3jni__value_jref_key) + ); + } + sqlite3_mutex_leave(sqlite3_db_mutex(db)); + } + return rv; +} + +S3JniApi(sqlite3_column_nio_buffer(),jobject,1column_1nio_1buffer)( + JniArgsEnvClass, jobject jStmt, jint ndx +){ + sqlite3_stmt * const stmt = PtrGet_sqlite3_stmt(jStmt); + jobject rv = 0; + if( stmt ){ + const void * const p = sqlite3_column_blob(stmt, (int)ndx); + if( p ){ + const int n = sqlite3_column_bytes(stmt, (int)ndx); + rv = s3jni__blob_to_ByteBuffer(env, p, n); + } + } + return rv; +} + +S3JniApi(sqlite3_column_text(),jbyteArray,1column_1text)( + JniArgsEnvClass, jobject jpStmt, jint ndx +){ + sqlite3_stmt * const stmt = PtrGet_sqlite3_stmt(jpStmt); + const unsigned char * const p = stmt ? sqlite3_column_text(stmt, (int)ndx) : 0; + const int n = p ? sqlite3_column_bytes(stmt, (int)ndx) : 0; + return p ? s3jni_new_jbyteArray(p, n) : NULL; +} + +#if 0 +// this impl might prove useful. +S3JniApi(sqlite3_column_text(),jstring,1column_1text)( + JniArgsEnvClass, jobject jpStmt, jint ndx +){ + sqlite3_stmt * const stmt = PtrGet_sqlite3_stmt(jpStmt); + const unsigned char * const p = stmt ? sqlite3_column_text(stmt, (int)ndx) : 0; + const int n = p ? sqlite3_column_bytes(stmt, (int)ndx) : 0; + return p ? s3jni_utf8_to_jstring( (const char *)p, n) : 0; +} +#endif + +S3JniApi(sqlite3_column_text16(),jstring,1column_1text16)( + JniArgsEnvClass, jobject jpStmt, jint ndx +){ + sqlite3_stmt * const stmt = PtrGet_sqlite3_stmt(jpStmt); + const void * const p = stmt ? sqlite3_column_text16(stmt, (int)ndx) : 0; + const int n = p ? sqlite3_column_bytes16(stmt, (int)ndx) : 0; + return s3jni_text16_to_jstring(env, p, n); +} + +S3JniApi(sqlite3_column_value(),jobject,1column_1value)( + JniArgsEnvClass, jobject jpStmt, jint ndx +){ + sqlite3_value * const sv = + sqlite3_column_value(PtrGet_sqlite3_stmt(jpStmt), (int)ndx) + /* reminder: returns an SQL NULL if jpStmt==NULL */; + return new_java_sqlite3_value(env, sv); +} + +/* +** Impl for commit hooks (if isCommit is true) or rollback hooks. +*/ +static int s3jni_commit_rollback_hook_impl(int isCommit, S3JniDb * const ps){ + S3JniDeclLocal_env; + int rc = 0; + S3JniHook hook; + + S3JniHook_localdup(isCommit + ? &ps->hooks.commit : &ps->hooks.rollback, + &hook); + if( hook.jObj ){ + rc = isCommit + ? (int)(*env)->CallIntMethod(env, hook.jObj, hook.midCallback) + : (int)((*env)->CallVoidMethod(env, hook.jObj, hook.midCallback), 0); + S3JniIfThrew{ + rc = s3jni_db_exception(ps->pDb, SQLITE_ERROR, + isCommit + ? "Commit hook callback threw" + : "Rollback hook callback threw"); + } + S3JniHook_localundup(hook); + } + return rc; +} + +/* C-to-Java commit hook wrapper. */ +static int s3jni_commit_hook_impl(void *pP){ + return s3jni_commit_rollback_hook_impl(1, pP); +} + +/* C-to-Java rollback hook wrapper. */ +static void s3jni_rollback_hook_impl(void *pP){ + (void)s3jni_commit_rollback_hook_impl(0, pP); +} + +/* +** Proxy for sqlite3_commit_hook() (if isCommit is true) or +** sqlite3_rollback_hook(). +*/ +static jobject s3jni_commit_rollback_hook(int isCommit, JNIEnv * const env, + jlong jpDb, jobject jHook){ + S3JniDb * ps; + jobject pOld = 0; /* previous hook */ + S3JniHook * pHook; /* ps->hooks.commit|rollback */ + + S3JniDb_mutex_enter; + ps = S3JniDb_from_jlong(jpDb); + if( !ps ){ + s3jni_db_error(ps->pDb, SQLITE_MISUSE, 0); + S3JniDb_mutex_leave; + return 0; + } + pHook = isCommit ? &ps->hooks.commit : &ps->hooks.rollback; + pOld = pHook->jObj; + if( pOld && jHook && + (*env)->IsSameObject(env, pOld, jHook) ){ + /* No-op. */ + }else if( !jHook ){ + if( pOld ){ + jobject tmp = S3JniRefLocal(pOld); + S3JniUnrefGlobal(pOld); + pOld = tmp; + } + *pHook = S3JniHook_empty; + if( isCommit ) sqlite3_commit_hook(ps->pDb, 0, 0); + else sqlite3_rollback_hook(ps->pDb, 0, 0); + }else{ + jclass const klazz = (*env)->GetObjectClass(env, jHook); + jmethodID const xCallback = (*env)->GetMethodID(env, klazz, "call", + isCommit ? "()I" : "()V"); + S3JniUnrefLocal(klazz); + S3JniIfThrew { + S3JniExceptionReport; + S3JniExceptionClear; + s3jni_db_error(ps->pDb, SQLITE_ERROR, + "Cannot not find matching call() method in" + "hook object."); + }else{ + pHook->midCallback = xCallback; + pHook->jObj = S3JniRefGlobal(jHook); + if( isCommit ) sqlite3_commit_hook(ps->pDb, s3jni_commit_hook_impl, ps); + else sqlite3_rollback_hook(ps->pDb, s3jni_rollback_hook_impl, ps); + if( pOld ){ + jobject tmp = S3JniRefLocal(pOld); + S3JniUnrefGlobal(pOld); + pOld = tmp; + } + } + } + S3JniDb_mutex_leave; + return pOld; +} + +S3JniApi(sqlite3_commit_hook(),jobject,1commit_1hook)( + JniArgsEnvClass, jlong jpDb, jobject jHook +){ + return s3jni_commit_rollback_hook(1, env, jpDb, jHook); +} + +S3JniApi(sqlite3_compileoption_get(),jstring,1compileoption_1get)( + JniArgsEnvClass, jint n +){ + const char * z = sqlite3_compileoption_get(n); + jstring const rv = z ? (*env)->NewStringUTF( env, z ) : 0; + /* We know these to be ASCII, so MUTF-8 is fine. */; + s3jni_oom_check(z ? !!rv : 1); + return rv; +} + +S3JniApi(sqlite3_compileoption_used(),jboolean,1compileoption_1used)( + JniArgsEnvClass, jstring name +){ + const char *zUtf8 = s3jni_jstring_to_mutf8(name) + /* We know these to be ASCII, so MUTF-8 is fine (and + hypothetically faster to convert). */; + const jboolean rc = + 0==sqlite3_compileoption_used(zUtf8) ? JNI_FALSE : JNI_TRUE; + s3jni_mutf8_release(name, zUtf8); + return rc; +} + +S3JniApi(sqlite3_complete(),jint,1complete)( + JniArgsEnvClass, jbyteArray jSql +){ + jbyte * const pBuf = s3jni_jbyteArray_bytes(jSql); + const jsize nBA = pBuf ? (*env)->GetArrayLength(env, jSql) : 0; + int rc; + + assert( (nBA>0 ? 0==pBuf[nBA-1] : (pBuf ? 0==*pBuf : 1)) + && "Byte array is not NUL-terminated." ); + rc = (pBuf && 0==pBuf[(nBA ? nBA-1 : 0)]) + ? sqlite3_complete( (const char *)pBuf ) + : (jSql ? SQLITE_NOMEM : SQLITE_MISUSE); + s3jni_jbyteArray_release(jSql, pBuf); + return rc; +} + +S3JniApi(sqlite3_config() /*for a small subset of options.*/ + sqlite3_config__enable()/* internal name to avoid name-mangling issues*/, + jint,1config_1_1enable)(JniArgsEnvClass, jint n){ + switch( n ){ + case SQLITE_CONFIG_SINGLETHREAD: + case SQLITE_CONFIG_MULTITHREAD: + case SQLITE_CONFIG_SERIALIZED: + return sqlite3_config( n ); + default: + return SQLITE_MISUSE; + } +} +/* C-to-Java SQLITE_CONFIG_LOG wrapper. */ +static void s3jni_config_log(void *ignored, int errCode, const char *z){ + S3JniDeclLocal_env; + S3JniHook hook = S3JniHook_empty; + + S3JniHook_localdup(&SJG.hook.configlog, &hook); + if( hook.jObj ){ + jstring const jArg1 = z ? s3jni_utf8_to_jstring(z, -1) : 0; + if( z ? !!jArg1 : 1 ){ + (*env)->CallVoidMethod(env, hook.jObj, hook.midCallback, errCode, jArg1); + } + S3JniIfThrew{ + S3JniExceptionWarnCallbackThrew("SQLITE_CONFIG_LOG callback"); + S3JniExceptionClear; + } + S3JniHook_localundup(hook); + S3JniUnrefLocal(jArg1); + } +} + +S3JniApi(sqlite3_config() /* for SQLITE_CONFIG_LOG */ + sqlite3_config__config_log() /* internal name */, + jint, 1config_1_1CONFIG_1LOG +)(JniArgsEnvClass, jobject jLog){ + S3JniHook * const pHook = &SJG.hook.configlog; + int rc = 0; + + S3JniGlobal_mutex_enter; + if( !jLog ){ + rc = sqlite3_config( SQLITE_CONFIG_LOG, NULL, NULL ); + if( 0==rc ){ + S3JniHook_unref(pHook); + } + }else if( pHook->jObj && (*env)->IsSameObject(env, jLog, pHook->jObj) ){ + /* No-op */ + }else { + jclass const klazz = (*env)->GetObjectClass(env, jLog); + jmethodID const midCallback = (*env)->GetMethodID(env, klazz, "call", + "(ILjava/lang/String;)V"); + S3JniUnrefLocal(klazz); + if( midCallback ){ + rc = sqlite3_config( SQLITE_CONFIG_LOG, s3jni_config_log, NULL ); + if( 0==rc ){ + S3JniHook_unref(pHook); + pHook->midCallback = midCallback; + pHook->jObj = S3JniRefGlobal(jLog); + } + }else{ + S3JniExceptionWarnIgnore; + rc = SQLITE_ERROR; + } + } + S3JniGlobal_mutex_leave; + return rc; +} + +#ifdef SQLITE_ENABLE_SQLLOG +/* C-to-Java SQLITE_CONFIG_SQLLOG wrapper. */ +static void s3jni_config_sqllog(void *ignored, sqlite3 *pDb, const char *z, int op){ + jobject jArg0 = 0; + jstring jArg1 = 0; + S3JniDeclLocal_env; + S3JniDb * const ps = S3JniDb_from_c(pDb); + S3JniHook hook = S3JniHook_empty; + + if( ps ){ + S3JniHook_localdup(&SJG.hook.sqllog, &hook); + } + if( !hook.jObj ) return; + jArg0 = S3JniRefLocal(ps->jDb); + switch( op ){ + case 0: /* db opened */ + case 1: /* SQL executed */ + jArg1 = s3jni_utf8_to_jstring( z, -1); + break; + case 2: /* db closed */ + break; + default: + (*env)->FatalError(env, "Unhandled 4th arg to SQLITE_CONFIG_SQLLOG."); + break; + } + (*env)->CallVoidMethod(env, hook.jObj, hook.midCallback, jArg0, jArg1, op); + S3JniIfThrew{ + S3JniExceptionWarnCallbackThrew("SQLITE_CONFIG_SQLLOG callback"); + S3JniExceptionClear; + } + S3JniHook_localundup(hook); + S3JniUnrefLocal(jArg0); + S3JniUnrefLocal(jArg1); +} +//! Requirement of SQLITE_CONFIG_SQLLOG. +void sqlite3_init_sqllog(void){ + sqlite3_config( SQLITE_CONFIG_SQLLOG, s3jni_config_sqllog, 0 ); +} +#endif + +S3JniApi(sqlite3_config() /* for SQLITE_CONFIG_SQLLOG */ + sqlite3_config__SQLLOG() /*internal name*/, + jint, 1config_1_1SQLLOG +)(JniArgsEnvClass, jobject jLog){ +#ifndef SQLITE_ENABLE_SQLLOG + return SQLITE_MISUSE; +#else + S3JniHook * const pHook = &SJG.hook.sqllog; + int rc = 0; + + S3JniGlobal_mutex_enter; + if( !jLog ){ + rc = sqlite3_config( SQLITE_CONFIG_SQLLOG, NULL ); + if( 0==rc ){ + S3JniHook_unref(pHook); + } + }else if( pHook->jObj && (*env)->IsSameObject(env, jLog, pHook->jObj) ){ + /* No-op */ + }else { + jclass const klazz = (*env)->GetObjectClass(env, jLog); + jmethodID const midCallback = (*env)->GetMethodID(env, klazz, "call", + "(Lorg/sqlite/jni/capi/sqlite3;" + "Ljava/lang/String;" + "I)V"); + S3JniUnrefLocal(klazz); + if( midCallback ){ + rc = sqlite3_config( SQLITE_CONFIG_SQLLOG, s3jni_config_sqllog, NULL ); + if( 0==rc ){ + S3JniHook_unref(pHook); + pHook->midCallback = midCallback; + pHook->jObj = S3JniRefGlobal(jLog); + } + }else{ + S3JniExceptionWarnIgnore; + rc = SQLITE_ERROR; + } + } + S3JniGlobal_mutex_leave; + return rc; +#endif +} + +S3JniApi(sqlite3_context_db_handle(),jobject,1context_1db_1handle)( + JniArgsEnvClass, jobject jpCx +){ + sqlite3_context * const pCx = PtrGet_sqlite3_context(jpCx); + sqlite3 * const pDb = pCx ? sqlite3_context_db_handle(pCx) : 0; + S3JniDb * const ps = pDb ? S3JniDb_from_c(pDb) : 0; + return ps ? ps->jDb : 0; +} + +/* +** State for CollationCallbacks. This used to be its own separate +** type, but has since been consolidated with S3JniHook. It retains +** its own typedef for code legibility and searchability reasons. +*/ +typedef S3JniHook S3JniCollationCallback; + +/* +** Proxy for Java-side CollationCallback.xCompare() callbacks. +*/ +static int CollationCallback_xCompare(void *pArg, int nLhs, const void *lhs, + int nRhs, const void *rhs){ + S3JniCollationCallback * const pCC = pArg; + S3JniDeclLocal_env; + jint rc = 0; + if( pCC->jObj ){ + jbyteArray jbaLhs = s3jni_new_jbyteArray(lhs, (jint)nLhs); + jbyteArray jbaRhs = jbaLhs + ? s3jni_new_jbyteArray(rhs, (jint)nRhs) : 0; + if( !jbaRhs ){ + S3JniUnrefLocal(jbaLhs); + /* We have no recovery strategy here. */ + s3jni_oom_check( jbaRhs ); + return 0; + } + rc = (*env)->CallIntMethod(env, pCC->jObj, pCC->midCallback, + jbaLhs, jbaRhs); + S3JniExceptionIgnore; + S3JniUnrefLocal(jbaLhs); + S3JniUnrefLocal(jbaRhs); + } + return (int)rc; +} + +/* CollationCallback finalizer for use by the sqlite3 internals. */ +static void CollationCallback_xDestroy(void *pArg){ + S3JniCollationCallback * const pCC = pArg; + S3JniDeclLocal_env; + S3JniHook_free(pCC); +} + +S3JniApi(sqlite3_create_collation() sqlite3_create_collation_v2(), + jint,1create_1collation +)(JniArgsEnvClass, jobject jDb, jstring name, jint eTextRep, + jobject oCollation){ + int rc; + S3JniDb * ps; + + if( !jDb || !name || !encodingTypeIsValid(eTextRep) ){ + return (jint)SQLITE_MISUSE; + } + S3JniDb_mutex_enter; + ps = S3JniDb_from_java(jDb); + jclass const klazz = (*env)->GetObjectClass(env, oCollation); + jmethodID const midCallback = + (*env)->GetMethodID(env, klazz, "call", "([B[B)I"); + S3JniUnrefLocal(klazz); + S3JniIfThrew{ + rc = s3jni_db_error(ps->pDb, SQLITE_ERROR, + "Could not get call() method from " + "CollationCallback object."); + }else{ + char * const zName = s3jni_jstring_to_utf8(name, 0); + S3JniCollationCallback * const pCC = + zName ? S3JniHook_alloc() : 0; + if( pCC ){ + rc = sqlite3_create_collation_v2(ps->pDb, zName, (int)eTextRep, + pCC, CollationCallback_xCompare, + CollationCallback_xDestroy); + if( 0==rc ){ + pCC->midCallback = midCallback; + pCC->jObj = S3JniRefGlobal(oCollation); + pCC->doXDestroy = 1; + }else{ + CollationCallback_xDestroy(pCC); + } + }else{ + rc = SQLITE_NOMEM; + } + sqlite3_free(zName); + } + S3JniDb_mutex_leave; + return (jint)rc; +} + +S3JniApi(sqlite3_create_function() sqlite3_create_function_v2() + sqlite3_create_window_function(), + jint,1create_1function +)(JniArgsEnvClass, jobject jDb, jstring jFuncName, jint nArg, + jint eTextRep, jobject jFunctor){ + S3JniUdf * s = 0; + int rc; + sqlite3 * const pDb = PtrGet_sqlite3(jDb); + char * zFuncName = 0; + + if( !pDb || !jFuncName ){ + return SQLITE_MISUSE; + }else if( !encodingTypeIsValid(eTextRep) ){ + return s3jni_db_error(pDb, SQLITE_FORMAT, + "Invalid function encoding option."); + } + s = S3JniUdf_alloc(env, jFunctor); + if( !s ) return SQLITE_NOMEM; + + if( UDF_UNKNOWN_TYPE==s->type ){ + rc = s3jni_db_error(pDb, SQLITE_MISUSE, + "Cannot unambiguously determine function type."); + S3JniUdf_free(env, s, 1); + goto error_cleanup; + } + zFuncName = s3jni_jstring_to_utf8(jFuncName,0); + if( !zFuncName ){ + rc = SQLITE_NOMEM; + S3JniUdf_free(env, s, 1); + goto error_cleanup; + } + s->zFuncName = zFuncName /* pass on ownership */; + if( UDF_WINDOW == s->type ){ + rc = sqlite3_create_window_function(pDb, zFuncName, nArg, eTextRep, s, + udf_xStep, udf_xFinal, udf_xValue, + udf_xInverse, S3JniUdf_finalizer); + }else{ + udf_xFunc_f xFunc = 0; + udf_xStep_f xStep = 0; + udf_xFinal_f xFinal = 0; + if( UDF_SCALAR == s->type ){ + xFunc = udf_xFunc; + }else{ + assert( UDF_AGGREGATE == s->type ); + xStep = udf_xStep; + xFinal = udf_xFinal; + } + rc = sqlite3_create_function_v2(pDb, zFuncName, nArg, eTextRep, s, + xFunc, xStep, xFinal, S3JniUdf_finalizer); + } +error_cleanup: + /* Reminder: on sqlite3_create_function() error, s will be + ** destroyed via create_function(). */ + return (jint)rc; +} + + +S3JniApi(sqlite3_db_config() /*for MAINDBNAME*/, + jint,1db_1config__Lorg_sqlite_jni_capi_sqlite3_2ILjava_lang_String_2 +)(JniArgsEnvClass, jobject jDb, jint op, jstring jStr){ + S3JniDb * const ps = S3JniDb_from_java(jDb); + int rc; + char *zStr; + + switch( (ps && jStr) ? op : 0 ){ + case SQLITE_DBCONFIG_MAINDBNAME: + S3JniDb_mutex_enter + /* Protect against a race in modifying/freeing + ps->zMainDbName. */; + zStr = s3jni_jstring_to_utf8( jStr, 0); + if( zStr ){ + rc = sqlite3_db_config(ps->pDb, (int)op, zStr); + if( rc ){ + sqlite3_free( zStr ); + }else{ + sqlite3_free( ps->zMainDbName ); + ps->zMainDbName = zStr; + } + }else{ + rc = SQLITE_NOMEM; + } + S3JniDb_mutex_leave; + break; + case 0: + default: + rc = SQLITE_MISUSE; + } + return rc; +} + +S3JniApi( + sqlite3_db_config(), + /* WARNING: openjdk v19 creates a different mangled name for this + ** function than openjdk v8 does. We account for that by exporting + ** both versions of the name. */ + jint,1db_1config__Lorg_sqlite_jni_capi_sqlite3_2IILorg_sqlite_jni_capi_OutputPointer_Int32_2 +)( + JniArgsEnvClass, jobject jDb, jint op, jint onOff, jobject jOut +){ + S3JniDb * const ps = S3JniDb_from_java(jDb); + int rc; + switch( ps ? op : 0 ){ + case SQLITE_DBCONFIG_ENABLE_FKEY: + case SQLITE_DBCONFIG_ENABLE_TRIGGER: + case SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER: + case SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION: + case SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE: + case SQLITE_DBCONFIG_ENABLE_QPSG: + case SQLITE_DBCONFIG_TRIGGER_EQP: + case SQLITE_DBCONFIG_RESET_DATABASE: + case SQLITE_DBCONFIG_DEFENSIVE: + case SQLITE_DBCONFIG_WRITABLE_SCHEMA: + case SQLITE_DBCONFIG_LEGACY_ALTER_TABLE: + case SQLITE_DBCONFIG_DQS_DML: + case SQLITE_DBCONFIG_DQS_DDL: + case SQLITE_DBCONFIG_ENABLE_VIEW: + case SQLITE_DBCONFIG_LEGACY_FILE_FORMAT: + case SQLITE_DBCONFIG_TRUSTED_SCHEMA: + case SQLITE_DBCONFIG_STMT_SCANSTATUS: + case SQLITE_DBCONFIG_REVERSE_SCANORDER: { + int pOut = 0; + rc = sqlite3_db_config( ps->pDb, (int)op, onOff, &pOut ); + if( 0==rc && jOut ){ + OutputPointer_set_Int32(env, jOut, pOut); + } + break; + } + default: + rc = SQLITE_MISUSE; + } + return (jint)rc; +} + +/* +** This is a workaround for openjdk v19 (and possibly others) encoding +** this function's name differently than JDK v8 does. If we do not +** install both names for this function then Java will not be able to +** find the function in both environments. +*/ +JniDecl(jint,1db_1config__Lorg_sqlite_jni_capi_sqlite3_2IILorg_sqlite_jni_capi_OutputPointer_00024Int32_2)( + JniArgsEnvClass, jobject jDb, jint op, jint onOff, jobject jOut +){ + return JniFuncName(1db_1config__Lorg_sqlite_jni_capi_sqlite3_2IILorg_sqlite_jni_capi_OutputPointer_Int32_2)( + env, jKlazz, jDb, op, onOff, jOut + ); +} + +S3JniApi(sqlite3_db_filename(),jstring,1db_1filename)( + JniArgsEnvClass, jobject jDb, jstring jDbName +){ + S3JniDb * const ps = S3JniDb_from_java(jDb); + char *zDbName; + jstring jRv = 0; + int nStr = 0; + + if( !ps || !jDbName ){ + return 0; + } + zDbName = s3jni_jstring_to_utf8( jDbName, &nStr); + if( zDbName ){ + char const * zRv = sqlite3_db_filename(ps->pDb, zDbName); + sqlite3_free(zDbName); + if( zRv ){ + jRv = s3jni_utf8_to_jstring( zRv, -1); + } + } + return jRv; +} + +S3JniApi(sqlite3_db_handle(),jobject,1db_1handle)( + JniArgsEnvClass, jobject jpStmt +){ + sqlite3_stmt * const pStmt = PtrGet_sqlite3_stmt(jpStmt); + sqlite3 * const pDb = pStmt ? sqlite3_db_handle(pStmt) : 0; + S3JniDb * const ps = pDb ? S3JniDb_from_c(pDb) : 0; + return ps ? ps->jDb : 0; +} + +S3JniApi(sqlite3_db_readonly(),jint,1db_1readonly)( + JniArgsEnvClass, jobject jDb, jstring jDbName +){ + int rc = 0; + S3JniDb * const ps = S3JniDb_from_java(jDb); + char *zDbName = jDbName ? s3jni_jstring_to_utf8( jDbName, 0 ) : 0; + rc = sqlite3_db_readonly(ps ? ps->pDb : 0, zDbName); + sqlite3_free(zDbName); + return (jint)rc; +} + +S3JniApi(sqlite3_db_release_memory(),jint,1db_1release_1memory)( + JniArgsEnvClass, jobject jDb +){ + sqlite3 * const pDb = PtrGet_sqlite3(jDb); + return pDb ? sqlite3_db_release_memory(pDb) : SQLITE_MISUSE; +} + +S3JniApi(sqlite3_db_status(),jint,1db_1status)( + JniArgsEnvClass, jobject jDb, jint op, jobject jOutCurrent, + jobject jOutHigh, jboolean reset +){ + int iCur = 0, iHigh = 0; + sqlite3 * const pDb = PtrGet_sqlite3(jDb); + int rc = sqlite3_db_status( pDb, op, &iCur, &iHigh, reset ); + if( 0==rc ){ + OutputPointer_set_Int32(env, jOutCurrent, iCur); + OutputPointer_set_Int32(env, jOutHigh, iHigh); + } + return (jint)rc; +} + +S3JniApi(sqlite3_errcode(),jint,1errcode)( + JniArgsEnvClass, jobject jpDb +){ + sqlite3 * const pDb = PtrGet_sqlite3(jpDb); + return pDb ? sqlite3_errcode(pDb) : SQLITE_MISUSE; +} + +S3JniApi(sqlite3_errmsg(),jstring,1errmsg)( + JniArgsEnvClass, jobject jpDb +){ + sqlite3 * const pDb = PtrGet_sqlite3(jpDb); + return pDb ? s3jni_utf8_to_jstring( sqlite3_errmsg(pDb), -1) : 0 + /* We don't use errmsg16() directly only because it would cause an + additional level of internal encoding in sqlite3. The end + effect should be identical to using errmsg16(), however. */; +} + +S3JniApi(sqlite3_set_errmsg(),jint,1set_1errmsg)( + JniArgsEnvClass, jobject jpDb, jint errCode, jstring msg +){ + sqlite3 * const pDb = PtrGet_sqlite3(jpDb); + const char *zUtf8; + jint rc; + if( !pDb ) return SQLITE_MISUSE; + zUtf8 = msg ? s3jni_jstring_to_mutf8(msg) : NULL; + rc = sqlite3_set_errmsg(pDb, (int)errCode, zUtf8); + s3jni_mutf8_release(msg, zUtf8); + return rc; +} + +S3JniApi(sqlite3_errstr(),jstring,1errstr)( + JniArgsEnvClass, jint rcCode +){ + jstring rv; + const char * z = sqlite3_errstr((int)rcCode); + if( !z ){ + /* This hypothetically cannot happen, but we'll behave like the + low-level library would in such a case... */ + z = "unknown error"; + } + rv = (*env)->NewStringUTF(env, z) + /* We know these values to be plain ASCII, so pose no MUTF-8 + ** incompatibility */; + s3jni_oom_check( rv ); + return rv; +} + +#ifndef SQLITE_ENABLE_NORMALIZE +/* Dummy stub for sqlite3_normalized_sql(). Never called. */ +static const char * sqlite3_normalized_sql(sqlite3_stmt *s){ + S3JniDeclLocal_env; + (*env)->FatalError(env, "dummy sqlite3_normalized_sql() was " + "impossibly called.") /* does not return */; + return 0; +} +#endif + +/* +** Impl for sqlite3_expanded_sql() (if isExpanded is true) and +** sqlite3_normalized_sql(). +*/ +static jstring s3jni_xn_sql(int isExpanded, JNIEnv *env, jobject jpStmt){ + jstring rv = 0; + sqlite3_stmt * const pStmt = PtrGet_sqlite3_stmt(jpStmt); + + if( pStmt ){ + char * zSql = isExpanded + ? sqlite3_expanded_sql(pStmt) + : (char*)sqlite3_normalized_sql(pStmt); + s3jni_oom_fatal(zSql); + if( zSql ){ + rv = s3jni_utf8_to_jstring(zSql, -1); + if( isExpanded ) sqlite3_free(zSql); + } + } + return rv; +} + +S3JniApi(sqlite3_expanded_sql(),jstring,1expanded_1sql)( + JniArgsEnvClass, jobject jpStmt +){ + return s3jni_xn_sql(1, env, jpStmt); +} + +S3JniApi(sqlite3_normalized_sql(),jstring,1normalized_1sql)( + JniArgsEnvClass, jobject jpStmt +){ +#ifdef SQLITE_ENABLE_NORMALIZE + return s3jni_xn_sql(0, env, jpStmt); +#else + return 0; +#endif +} + +S3JniApi(sqlite3_extended_result_codes(),jint,1extended_1result_1codes)( + JniArgsEnvClass, jobject jpDb, jboolean onoff +){ + sqlite3 * const pDb = PtrGet_sqlite3(jpDb); + int const rc = pDb + ? sqlite3_extended_result_codes(pDb, onoff ? 1 : 0) + : SQLITE_MISUSE; + return rc; +} + +S3JniApi(sqlite3_finalize(),jint,1finalize)( + JniArgsEnvClass, jlong jpStmt +){ + return jpStmt + ? sqlite3_finalize(LongPtrGet_sqlite3_stmt(jpStmt)) + : 0; +} + +S3JniApi(sqlite3_get_auxdata(),jobject,1get_1auxdata)( + JniArgsEnvClass, jobject jCx, jint n +){ + return sqlite3_get_auxdata(PtrGet_sqlite3_context(jCx), (int)n); +} + +S3JniApi(sqlite3_initialize(),jint,1initialize)( + JniArgsEnvClass +){ + return sqlite3_initialize(); +} + +S3JniApi(sqlite3_interrupt(),void,1interrupt)( + JniArgsEnvClass, jobject jpDb +){ + sqlite3 * const pDb = PtrGet_sqlite3(jpDb); + if( pDb ){ + sqlite3_interrupt(pDb); + } +} + +S3JniApi(sqlite3_is_interrupted(),jboolean,1is_1interrupted)( + JniArgsEnvClass, jobject jpDb +){ + int rc = 0; + sqlite3 * const pDb = PtrGet_sqlite3(jpDb); + if( pDb ){ + rc = sqlite3_is_interrupted(pDb); + } + return rc ? JNI_TRUE : JNI_FALSE; +} + +/* +** Uncaches the current JNIEnv from the S3JniGlobal state, clearing +** any resources owned by that cache entry and making that slot +** available for re-use. +*/ +S3JniApi(sqlite3_java_uncache_thread(), jboolean, 1java_1uncache_1thread)( + JniArgsEnvClass +){ + int rc; + S3JniEnv_mutex_enter; + rc = S3JniEnv_uncache(env); + S3JniEnv_mutex_leave; + return rc ? JNI_TRUE : JNI_FALSE; +} + +S3JniApi(sqlite3_jni_db_error(), jint, 1jni_1db_1error)( + JniArgsEnvClass, jobject jDb, jint jRc, jstring jStr +){ + S3JniDb * const ps = S3JniDb_from_java(jDb); + int rc = SQLITE_MISUSE; + if( ps ){ + char *zStr; + zStr = jStr + ? s3jni_jstring_to_utf8( jStr, 0) + : NULL; + rc = s3jni_db_error( ps->pDb, (int)jRc, zStr ); + sqlite3_free(zStr); + } + return rc; +} + +S3JniApi(sqlite3_jni_supports_nio(), jboolean,1jni_1supports_1nio)( + JniArgsEnvClass +){ + return SJG.g.byteBuffer.klazz ? JNI_TRUE : JNI_FALSE; +} + + +S3JniApi(sqlite3_keyword_check(),jboolean,1keyword_1check)( + JniArgsEnvClass, jstring jWord +){ + int nWord = 0; + char * zWord = s3jni_jstring_to_utf8(jWord, &nWord); + int rc = 0; + + s3jni_oom_check(jWord ? !!zWord : 1); + if( zWord && nWord ){ + rc = sqlite3_keyword_check(zWord, nWord); + } + sqlite3_free(zWord); + return rc ? JNI_TRUE : JNI_FALSE; +} + +S3JniApi(sqlite3_keyword_name(),jstring,1keyword_1name)( + JniArgsEnvClass, jint ndx +){ + const char * zWord = 0; + int n = 0; + jstring rv = 0; + + if( 0==sqlite3_keyword_name(ndx, &zWord, &n) ){ + rv = s3jni_utf8_to_jstring(zWord, n); + } + return rv; +} + + +S3JniApi(sqlite3_last_insert_rowid(),jlong,1last_1insert_1rowid)( + JniArgsEnvClass, jobject jpDb +){ + return (jlong)sqlite3_last_insert_rowid(PtrGet_sqlite3(jpDb)); +} + +S3JniApi(sqlite3_limit(),jint,1limit)( + JniArgsEnvClass, jobject jpDb, jint id, jint newVal +){ + jint rc = 0; + sqlite3 * const pDb = PtrGet_sqlite3(jpDb); + if( pDb ){ + rc = sqlite3_limit( pDb, (int)id, (int)newVal ); + } + return rc; +} + +/* Pre-open() code common to sqlite3_open[_v2](). */ +static int s3jni_open_pre(JNIEnv * const env, S3JniEnv **jc, + jstring jDbName, char **zDbName, + S3JniDb ** ps){ + int rc = 0; + jobject jDb = 0; + + *jc = S3JniEnv_get(); + if( !*jc ){ + rc = SQLITE_NOMEM; + goto end; + } + *zDbName = jDbName ? s3jni_jstring_to_utf8( jDbName, 0) : 0; + if( jDbName && !*zDbName ){ + rc = SQLITE_NOMEM; + goto end; + } + jDb = new_java_sqlite3(env, 0); + if( !jDb ){ + sqlite3_free(*zDbName); + *zDbName = 0; + rc = SQLITE_NOMEM; + goto end; + } + *ps = S3JniDb_alloc(env, jDb); + if( *ps ){ + (*jc)->pdbOpening = *ps; + }else{ + S3JniUnrefLocal(jDb); + rc = SQLITE_NOMEM; + } +end: + return rc; +} + +/* +** Post-open() code common to both the sqlite3_open() and +** sqlite3_open_v2() bindings. ps->jDb must be the +** org.sqlite.jni.capi.sqlite3 object which will hold the db's native +** pointer. theRc must be the result code of the open() op. If +** *ppDb is NULL then ps is set aside and its state cleared, +** else ps is associated with *ppDb. If *ppDb is not NULL then +** ps->jDb is stored in jOut (an OutputPointer.sqlite3 instance). +** +** Must be called if s3jni_open_pre() succeeds and must not be called +** if it doesn't. +** +** Returns theRc. +*/ +static int s3jni_open_post(JNIEnv * const env, S3JniEnv * const jc, + S3JniDb * ps, sqlite3 **ppDb, + jobject jOut, int theRc){ + int rc = 0; + jc->pdbOpening = 0; + if( *ppDb ){ + assert(ps->jDb); + if( 0==ps->pDb ){ + ps->pDb = *ppDb; + NativePointerHolder_set(S3JniNph(sqlite3), ps->jDb, *ppDb); + }else{ + assert( ps->pDb==*ppDb + && "Set up via s3jni_run_java_auto_extensions()" ); + } + rc = sqlite3_set_clientdata(ps->pDb, S3JniDb_clientdata_key, + ps, S3JniDb_xDestroy) + /* As of here, the Java/C connection is complete */; + }else{ + S3JniDb_set_aside(ps); + ps = 0; + } + OutputPointer_set_obj(env, S3JniNph(OutputPointer_sqlite3), + jOut, ps ? ps->jDb : 0); + return theRc ? theRc : rc; +} + +S3JniApi(sqlite3_open(),jint,1open)( + JniArgsEnvClass, jstring strName, jobject jOut +){ + sqlite3 * pOut = 0; + char *zName = 0; + S3JniDb * ps = 0; + S3JniEnv * jc = 0; + int rc; + + if( 0==jOut ) return SQLITE_MISUSE; + rc = s3jni_open_pre(env, &jc, strName, &zName, &ps); + if( 0==rc ){ + rc = s3jni_open_post(env, jc, ps, &pOut, jOut, + sqlite3_open(zName, &pOut)); + assert(rc==0 ? pOut!=0 : 1); + sqlite3_free(zName); + } + return (jint)rc; +} + +S3JniApi(sqlite3_open_v2(),jint,1open_1v2)( + JniArgsEnvClass, jstring strName, + jobject jOut, jint flags, jstring strVfs +){ + sqlite3 * pOut = 0; + char *zName = 0; + S3JniDb * ps = 0; + S3JniEnv * jc = 0; + char *zVfs = 0; + int rc; + + if( 0==jOut ) return SQLITE_MISUSE; + rc = s3jni_open_pre(env, &jc, strName, &zName, &ps); + if( 0==rc ){ + if( strVfs ){ + zVfs = s3jni_jstring_to_utf8( strVfs, 0); + if( !zVfs ){ + rc = SQLITE_NOMEM; + } + } + if( 0==rc ){ + rc = sqlite3_open_v2(zName, &pOut, (int)flags, zVfs); + } + rc = s3jni_open_post(env, jc, ps, &pOut, jOut, rc); + } + assert(rc==0 ? pOut!=0 : 1); + sqlite3_free(zName); + sqlite3_free(zVfs); + return (jint)rc; +} + +/* Proxy for the sqlite3_prepare[_v2/3]() family. */ +static jint sqlite3_jni_prepare_v123( int prepVersion, JNIEnv * const env, + jclass self, + jlong jpDb, jbyteArray baSql, + jint nMax, jint prepFlags, + jobject jOutStmt, jobject outTail){ + sqlite3_stmt * pStmt = 0; + jobject jStmt = 0; + const char * zTail = 0; + sqlite3 * const pDb = LongPtrGet_sqlite3(jpDb); + jbyte * const pBuf = pDb ? s3jni_jbyteArray_bytes(baSql) : 0; + int rc = SQLITE_ERROR; + + assert(prepVersion==1 || prepVersion==2 || prepVersion==3); + if( !pDb || !jOutStmt ){ + rc = SQLITE_MISUSE; + goto end; + }else if( !pBuf ){ + rc = baSql ? SQLITE_NOMEM : SQLITE_MISUSE; + goto end; + } + jStmt = new_java_sqlite3_stmt(env, 0); + if( !jStmt ){ + rc = SQLITE_NOMEM; + goto end; + } + switch( prepVersion ){ + case 1: rc = sqlite3_prepare(pDb, (const char *)pBuf, + (int)nMax, &pStmt, &zTail); + break; + case 2: rc = sqlite3_prepare_v2(pDb, (const char *)pBuf, + (int)nMax, &pStmt, &zTail); + break; + case 3: rc = sqlite3_prepare_v3(pDb, (const char *)pBuf, + (int)nMax, (unsigned int)prepFlags, + &pStmt, &zTail); + break; + default: + assert(!"Invalid prepare() version"); + } +end: + s3jni_jbyteArray_release(baSql,pBuf); + if( 0==rc ){ + if( 0!=outTail ){ + /* Noting that pBuf is deallocated now but its address is all we need for + ** what follows... */ + assert(zTail ? ((void*)zTail>=(void*)pBuf) : 1); + assert(zTail ? (((int)((void*)zTail - (void*)pBuf)) >= 0) : 1); + OutputPointer_set_Int32( + env, outTail, (int)(zTail ? (zTail - (const char *)pBuf) : 0) + ); + } + if( pStmt ){ + NativePointerHolder_set(S3JniNph(sqlite3_stmt), jStmt, pStmt); + }else{ + /* Happens for comments and whitespace. */ + S3JniUnrefLocal(jStmt); + jStmt = 0; + } + }else{ + S3JniUnrefLocal(jStmt); + jStmt = 0; + } + if( jOutStmt ){ + OutputPointer_set_obj(env, S3JniNph(OutputPointer_sqlite3_stmt), + jOutStmt, jStmt); + } + return (jint)rc; +} +S3JniApi(sqlite3_prepare(),jint,1prepare)( + JNIEnv * const env, jclass self, jlong jpDb, jbyteArray baSql, + jint nMax, jobject jOutStmt, jobject outTail +){ + return sqlite3_jni_prepare_v123(1, env, self, jpDb, baSql, nMax, 0, + jOutStmt, outTail); +} +S3JniApi(sqlite3_prepare_v2(),jint,1prepare_1v2)( + JNIEnv * const env, jclass self, jlong jpDb, jbyteArray baSql, + jint nMax, jobject jOutStmt, jobject outTail +){ + return sqlite3_jni_prepare_v123(2, env, self, jpDb, baSql, nMax, 0, + jOutStmt, outTail); +} +S3JniApi(sqlite3_prepare_v3(),jint,1prepare_1v3)( + JNIEnv * const env, jclass self, jlong jpDb, jbyteArray baSql, + jint nMax, jint prepFlags, jobject jOutStmt, jobject outTail +){ + return sqlite3_jni_prepare_v123(3, env, self, jpDb, baSql, nMax, + prepFlags, jOutStmt, outTail); +} + +/* +** Impl for C-to-Java of the callbacks for both sqlite3_update_hook() +** and sqlite3_preupdate_hook(). The differences are that for +** update_hook(): +** +** - pDb is NULL +** - iKey1 is the row ID +** - iKey2 is unused +*/ +static void s3jni_updatepre_hook_impl(void * pState, sqlite3 *pDb, int opId, + const char *zDb, const char *zTable, + sqlite3_int64 iKey1, sqlite3_int64 iKey2){ + S3JniDb * const ps = pState; + S3JniDeclLocal_env; + jstring jDbName; + jstring jTable; + const int isPre = 0!=pDb; + S3JniHook hook; + + S3JniHook_localdup(isPre ? +#ifdef SQLITE_ENABLE_PREUPDATE_HOOK + &ps->hooks.preUpdate +#else + &S3JniHook_empty +#endif + : &ps->hooks.update, &hook); + if( !hook.jObj ){ + return; + } + jDbName = s3jni_utf8_to_jstring( zDb, -1); + jTable = jDbName ? s3jni_utf8_to_jstring( zTable, -1) : 0; + S3JniIfThrew { + S3JniExceptionClear; + s3jni_db_error(ps->pDb, SQLITE_NOMEM, 0); + }else{ + assert( hook.jObj ); + assert( hook.midCallback ); + assert( ps->jDb ); +#ifdef SQLITE_ENABLE_PREUPDATE_HOOK + if( isPre ) (*env)->CallVoidMethod(env, hook.jObj, hook.midCallback, + ps->jDb, (jint)opId, jDbName, jTable, + (jlong)iKey1, (jlong)iKey2); + else +#endif + (*env)->CallVoidMethod(env, hook.jObj, hook.midCallback, + (jint)opId, jDbName, jTable, (jlong)iKey1); + S3JniIfThrew{ + S3JniExceptionWarnCallbackThrew("sqlite3_(pre)update_hook() callback"); + s3jni_db_exception(ps->pDb, 0, + "sqlite3_(pre)update_hook() callback threw"); + } + } + S3JniUnrefLocal(jDbName); + S3JniUnrefLocal(jTable); + S3JniHook_localundup(hook); +} + +#ifdef SQLITE_ENABLE_PREUPDATE_HOOK +static void s3jni_preupdate_hook_impl(void * pState, sqlite3 *pDb, int opId, + const char *zDb, const char *zTable, + sqlite3_int64 iKey1, sqlite3_int64 iKey2){ + return s3jni_updatepre_hook_impl(pState, pDb, opId, zDb, zTable, + iKey1, iKey2); +} +#endif /* SQLITE_ENABLE_PREUPDATE_HOOK */ + +static void s3jni_update_hook_impl(void * pState, int opId, const char *zDb, + const char *zTable, sqlite3_int64 nRowid){ + return s3jni_updatepre_hook_impl(pState, NULL, opId, zDb, zTable, nRowid, 0); +} + +#if !defined(SQLITE_ENABLE_PREUPDATE_HOOK) +/* We need no-op impls for preupdate_{count,depth,blobwrite}() */ +S3JniApi(sqlite3_preupdate_blobwrite(),jint,1preupdate_1blobwrite)( + JniArgsEnvClass, jlong jDb){ return SQLITE_MISUSE; } +S3JniApi(sqlite3_preupdate_count(),jint,1preupdate_1count)( + JniArgsEnvClass, jlong jDb){ return SQLITE_MISUSE; } +S3JniApi(sqlite3_preupdate_depth(),jint,1preupdate_1depth)( + JniArgsEnvClass, jlong jDb){ return SQLITE_MISUSE; } +#endif /* !SQLITE_ENABLE_PREUPDATE_HOOK */ + +/* +** JNI wrapper for both sqlite3_update_hook() and +** sqlite3_preupdate_hook() (if isPre is true). +*/ +static jobject s3jni_updatepre_hook(JNIEnv * env, int isPre, jlong jpDb, jobject jHook){ + S3JniDb * const ps = S3JniDb_from_jlong(jpDb); + jclass klazz; + jobject pOld = 0; + jmethodID xCallback; + S3JniHook * pHook; + + if( !ps ) return 0; + S3JniDb_mutex_enter; + pHook = isPre ? +#ifdef SQLITE_ENABLE_PREUPDATE_HOOK + &ps->hooks.preUpdate +#else + 0 +#endif + : &ps->hooks.update; + if( !pHook ){ + goto end; + } + pOld = pHook->jObj; + if( pOld && jHook && (*env)->IsSameObject(env, pOld, jHook) ){ + goto end; + } + if( !jHook ){ + if( pOld ){ + jobject tmp = S3JniRefLocal(pOld); + S3JniUnrefGlobal(pOld); + pOld = tmp; + } + *pHook = S3JniHook_empty; +#ifdef SQLITE_ENABLE_PREUPDATE_HOOK + if( isPre ) sqlite3_preupdate_hook(ps->pDb, 0, 0); + else +#endif + sqlite3_update_hook(ps->pDb, 0, 0); + goto end; + } + klazz = (*env)->GetObjectClass(env, jHook); + xCallback = isPre + ? (*env)->GetMethodID(env, klazz, "call", + "(Lorg/sqlite/jni/capi/sqlite3;" + "I" + "Ljava/lang/String;" + "Ljava/lang/String;" + "JJ)V") + : (*env)->GetMethodID(env, klazz, "call", + "(ILjava/lang/String;Ljava/lang/String;J)V"); + S3JniUnrefLocal(klazz); + S3JniIfThrew { + S3JniExceptionClear; + s3jni_db_error(ps->pDb, SQLITE_ERROR, + "Cannot not find matching callback on " + "(pre)update hook object."); + }else{ + pHook->midCallback = xCallback; + pHook->jObj = S3JniRefGlobal(jHook); +#ifdef SQLITE_ENABLE_PREUPDATE_HOOK + if( isPre ) sqlite3_preupdate_hook(ps->pDb, s3jni_preupdate_hook_impl, ps); + else +#endif + sqlite3_update_hook(ps->pDb, s3jni_update_hook_impl, ps); + if( pOld ){ + jobject tmp = S3JniRefLocal(pOld); + S3JniUnrefGlobal(pOld); + pOld = tmp; + } + } +end: + S3JniDb_mutex_leave; + return pOld; +} + + +S3JniApi(sqlite3_preupdate_hook(),jobject,1preupdate_1hook)( + JniArgsEnvClass, jlong jpDb, jobject jHook +){ +#ifdef SQLITE_ENABLE_PREUPDATE_HOOK + return s3jni_updatepre_hook(env, 1, jpDb, jHook); +#else + return NULL; +#endif /* SQLITE_ENABLE_PREUPDATE_HOOK */ +} + +/* Impl for sqlite3_preupdate_{new,old}(). */ +static int s3jni_preupdate_newold(JNIEnv * const env, int isNew, jlong jpDb, + jint iCol, jobject jOut){ +#ifdef SQLITE_ENABLE_PREUPDATE_HOOK + sqlite3 * const pDb = LongPtrGet_sqlite3(jpDb); + int rc = SQLITE_MISUSE; + if( pDb ){ + sqlite3_value * pOut = 0; + int (*fOrig)(sqlite3*,int,sqlite3_value**) = + isNew ? sqlite3_preupdate_new : sqlite3_preupdate_old; + rc = fOrig(pDb, (int)iCol, &pOut); + if( 0==rc ){ + jobject pWrap = new_java_sqlite3_value(env, pOut); + if( !pWrap ){ + rc = SQLITE_NOMEM; + } + OutputPointer_set_obj(env, S3JniNph(OutputPointer_sqlite3_value), + jOut, pWrap); + S3JniUnrefLocal(pWrap); + } + } + return rc; +#else + return SQLITE_MISUSE; +#endif +} + +S3JniApi(sqlite3_preupdate_new(),jint,1preupdate_1new)( + JniArgsEnvClass, jlong jpDb, jint iCol, jobject jOut +){ + return s3jni_preupdate_newold(env, 1, jpDb, iCol, jOut); +} + +S3JniApi(sqlite3_preupdate_old(),jint,1preupdate_1old)( + JniArgsEnvClass, jlong jpDb, jint iCol, jobject jOut +){ + return s3jni_preupdate_newold(env, 0, jpDb, iCol, jOut); +} + + +/* Central C-to-Java sqlite3_progress_handler() proxy. */ +static int s3jni_progress_handler_impl(void *pP){ + S3JniDb * const ps = (S3JniDb *)pP; + int rc = 0; + S3JniDeclLocal_env; + S3JniHook hook; + + S3JniHook_localdup(&ps->hooks.progress, &hook); + if( hook.jObj ){ + rc = (int)(*env)->CallIntMethod(env, hook.jObj, hook.midCallback); + S3JniIfThrew{ + rc = s3jni_db_exception(ps->pDb, rc, + "sqlite3_progress_handler() callback threw"); + } + S3JniHook_localundup(hook); + } + return rc; +} + +S3JniApi(sqlite3_progress_handler(),void,1progress_1handler)( + JniArgsEnvClass,jobject jDb, jint n, jobject jProgress +){ + S3JniDb * const ps = S3JniDb_from_java(jDb); + S3JniHook * const pHook = ps ? &ps->hooks.progress : 0; + + if( !ps ) return; + S3JniDb_mutex_enter; + if( n<1 || !jProgress ){ + S3JniHook_unref(pHook); + sqlite3_progress_handler(ps->pDb, 0, 0, 0); + }else{ + jclass const klazz = (*env)->GetObjectClass(env, jProgress); + jmethodID const xCallback = (*env)->GetMethodID(env, klazz, "call", "()I"); + S3JniUnrefLocal(klazz); + S3JniIfThrew { + S3JniExceptionClear; + s3jni_db_error(ps->pDb, SQLITE_ERROR, + "Cannot not find matching xCallback() on " + "ProgressHandler object."); + }else{ + S3JniUnrefGlobal(pHook->jObj); + pHook->midCallback = xCallback; + pHook->jObj = S3JniRefGlobal(jProgress); + sqlite3_progress_handler(ps->pDb, (int)n, s3jni_progress_handler_impl, ps); + } + } + S3JniDb_mutex_leave; +} + +S3JniApi(sqlite3_randomness(),void,1randomness)( + JniArgsEnvClass, jbyteArray jTgt +){ + jbyte * const jba = s3jni_jbyteArray_bytes(jTgt); + if( jba ){ + jsize const nTgt = (*env)->GetArrayLength(env, jTgt); + sqlite3_randomness( (int)nTgt, jba ); + s3jni_jbyteArray_commit(jTgt, jba); + } +} + + +S3JniApi(sqlite3_reset(),jint,1reset)( + JniArgsEnvClass, jobject jpStmt +){ + sqlite3_stmt * const pStmt = PtrGet_sqlite3_stmt(jpStmt); + return pStmt ? sqlite3_reset(pStmt) : SQLITE_MISUSE; +} + +/* Clears all entries from S3JniGlobal.autoExt. */ +static void s3jni_reset_auto_extension(JNIEnv *env){ + int i; + S3JniAutoExt_mutex_enter; + for( i = 0; i < SJG.autoExt.nExt; ++i ){ + S3JniAutoExtension_clear( &SJG.autoExt.aExt[i] ); + } + SJG.autoExt.nExt = 0; + S3JniAutoExt_mutex_leave; +} + +S3JniApi(sqlite3_reset_auto_extension(),void,1reset_1auto_1extension)( + JniArgsEnvClass +){ + s3jni_reset_auto_extension(env); +} + +/* Impl for sqlite3_result_text/blob() and friends. */ +static void result_blob_text(int as64 /* true for text64/blob64() mode */, + int eTextRep /* 0 for blobs, else SQLITE_UTF... */, + JNIEnv * const env, sqlite3_context *pCx, + jbyteArray jBa, jlong nMax){ + int const asBlob = 0==eTextRep; + if( !pCx ){ + /* We should arguably emit a warning here. But where to log it? */ + return; + }else if( jBa ){ + jbyte * const pBuf = s3jni_jbyteArray_bytes(jBa); + jsize nBA = (*env)->GetArrayLength(env, jBa); + if( nMax>=0 && nBA>(jsize)nMax ){ + nBA = (jsize)nMax; + /** + From the sqlite docs: + + > If the 3rd parameter to any of the sqlite3_result_text* + interfaces other than sqlite3_result_text64() is negative, + then SQLite computes the string length itself by searching + the 2nd parameter for the first zero character. + + Note that the text64() interfaces take an unsigned value for + the length, which Java does not support. This binding takes + the approach of passing on negative values to the C API, + which will in turn fail with SQLITE_TOOBIG at some later + point (recall that the sqlite3_result_xyz() family do not + have result values). + */ + } + if( as64 ){ /* 64-bit... */ + static const jsize nLimit64 = + SQLITE_MAX_ALLOCATION_SIZE/*only _kinda_ arbitrary*/; + if( nBA > nLimit64 ){ + sqlite3_result_error_toobig(pCx); + }else if( asBlob ){ + sqlite3_result_blob64(pCx, pBuf, (sqlite3_uint64)nBA, + SQLITE_TRANSIENT); + }else{ /* text64... */ + if( encodingTypeIsValid(eTextRep) ){ + sqlite3_result_text64(pCx, (const char *)pBuf, + (sqlite3_uint64)nBA, + SQLITE_TRANSIENT, eTextRep); + }else{ + sqlite3_result_error_code(pCx, SQLITE_FORMAT); + } + } + }else{ /* 32-bit... */ + static const jsize nLimit = SQLITE_MAX_ALLOCATION_SIZE; + if( nBA > nLimit ){ + sqlite3_result_error_toobig(pCx); + }else if( asBlob ){ + sqlite3_result_blob(pCx, pBuf, (int)nBA, + SQLITE_TRANSIENT); + }else{ + switch( eTextRep ){ + case SQLITE_UTF8: + sqlite3_result_text(pCx, (const char *)pBuf, (int)nBA, + SQLITE_TRANSIENT); + break; + case SQLITE_UTF16: + sqlite3_result_text16(pCx, (const char *)pBuf, (int)nBA, + SQLITE_TRANSIENT); + break; + case SQLITE_UTF16LE: + sqlite3_result_text16le(pCx, (const char *)pBuf, (int)nBA, + SQLITE_TRANSIENT); + break; + case SQLITE_UTF16BE: + sqlite3_result_text16be(pCx, (const char *)pBuf, (int)nBA, + SQLITE_TRANSIENT); + break; + } + } + s3jni_jbyteArray_release(jBa, pBuf); + } + }else{ + sqlite3_result_null(pCx); + } +} + +S3JniApi(sqlite3_result_blob(),void,1result_1blob)( + JniArgsEnvClass, jobject jpCx, jbyteArray jBa, jint nMax +){ + return result_blob_text(0, 0, env, PtrGet_sqlite3_context(jpCx), jBa, nMax); +} + +S3JniApi(sqlite3_result_blob64(),void,1result_1blob64)( + JniArgsEnvClass, jobject jpCx, jbyteArray jBa, jlong nMax +){ + return result_blob_text(1, 0, env, PtrGet_sqlite3_context(jpCx), jBa, nMax); +} + +S3JniApi(sqlite3_result_double(),void,1result_1double)( + JniArgsEnvClass, jobject jpCx, jdouble v +){ + sqlite3_result_double(PtrGet_sqlite3_context(jpCx), v); +} + +S3JniApi(sqlite3_result_error(),void,1result_1error)( + JniArgsEnvClass, jobject jpCx, jbyteArray baMsg, jint eTextRep +){ + const char * zUnspecified = "Unspecified error."; + jsize const baLen = (*env)->GetArrayLength(env, baMsg); + jbyte * const pjBuf = baMsg ? s3jni_jbyteArray_bytes(baMsg) : NULL; + switch( pjBuf ? eTextRep : SQLITE_UTF8 ){ + case SQLITE_UTF8: { + const char *zMsg = pjBuf ? (const char *)pjBuf : zUnspecified; + int const n = pjBuf ? (int)baLen : (int)sqlite3Strlen30(zMsg); + sqlite3_result_error(PtrGet_sqlite3_context(jpCx), zMsg, n); + break; + } + case SQLITE_UTF16: { + const void *zMsg = pjBuf; + sqlite3_result_error16(PtrGet_sqlite3_context(jpCx), zMsg, (int)baLen); + break; + } + default: + sqlite3_result_error(PtrGet_sqlite3_context(jpCx), + "Invalid encoding argument passed " + "to sqlite3_result_error().", -1); + break; + } + s3jni_jbyteArray_release(baMsg,pjBuf); +} + +S3JniApi(sqlite3_result_error_code(),void,1result_1error_1code)( + JniArgsEnvClass, jobject jpCx, jint v +){ + sqlite3_result_error_code(PtrGet_sqlite3_context(jpCx), (int)v); +} + +S3JniApi(sqlite3_result_error_nomem(),void,1result_1error_1nomem)( + JniArgsEnvClass, jobject jpCx +){ + sqlite3_result_error_nomem(PtrGet_sqlite3_context(jpCx)); +} + +S3JniApi(sqlite3_result_error_toobig(),void,1result_1error_1toobig)( + JniArgsEnvClass, jobject jpCx +){ + sqlite3_result_error_toobig(PtrGet_sqlite3_context(jpCx)); +} + +S3JniApi(sqlite3_result_int(),void,1result_1int)( + JniArgsEnvClass, jobject jpCx, jint v +){ + sqlite3_result_int(PtrGet_sqlite3_context(jpCx), (int)v); +} + +S3JniApi(sqlite3_result_int64(),void,1result_1int64)( + JniArgsEnvClass, jobject jpCx, jlong v +){ + sqlite3_result_int64(PtrGet_sqlite3_context(jpCx), (sqlite3_int64)v); +} + +S3JniApi(sqlite3_result_java_object(),void,1result_1java_1object)( + JniArgsEnvClass, jobject jpCx, jobject v +){ + sqlite3_context * pCx = PtrGet_sqlite3_context(jpCx); + if( !pCx ) return; + else if( v ){ + jobject const rjv = S3JniRefGlobal(v); + if( rjv ){ + sqlite3_result_pointer(pCx, rjv, + s3jni__value_jref_key, S3Jni_jobject_finalizer); + }else{ + sqlite3_result_error_nomem(PtrGet_sqlite3_context(jpCx)); + } + }else{ + sqlite3_result_null(PtrGet_sqlite3_context(jpCx)); + } +} + +S3JniApi(sqlite3_result_nio_buffer(),void,1result_1nio_1buffer)( + JniArgsEnvClass, jobject jpCtx, jobject jBuffer, + jint iOffset, jint iN +){ + sqlite3_context * pCx = PtrGet_sqlite3_context(jpCtx); + int rc; + S3JniNioArgs args; + if( !pCx ){ + return; + }else if( !SJG.g.byteBuffer.klazz ){ + sqlite3_result_error( + pCx, "This JVM does not support JNI access to ByteBuffers.", -1 + ); + return; + } + rc = s3jni_setup_nio_args(env, &args, jBuffer, iOffset, iN); + if(rc){ + if( iOffset<0 ){ + sqlite3_result_error(pCx, "Start index may not be negative.", -1); + }else if( SQLITE_TOOBIG==rc ){ + sqlite3_result_error_toobig(pCx); + }else{ + sqlite3_result_error( + pCx, "Invalid arguments to sqlite3_result_nio_buffer().", -1 + ); + } + }else if( !args.pStart || !args.nOut ){ + sqlite3_result_null(pCx); + }else{ + sqlite3_result_blob(pCx, args.pStart, args.nOut, SQLITE_TRANSIENT); + } +} + + +S3JniApi(sqlite3_result_null(),void,1result_1null)( + JniArgsEnvClass, jobject jpCx +){ + sqlite3_result_null(PtrGet_sqlite3_context(jpCx)); +} + +S3JniApi(sqlite3_result_subtype(),void,1result_1subtype)( + JniArgsEnvClass, jobject jpCx, jint v +){ + sqlite3_result_subtype(PtrGet_sqlite3_context(jpCx), (unsigned int)v); +} + + +S3JniApi(sqlite3_result_text(),void,1result_1text)( + JniArgsEnvClass, jobject jpCx, jbyteArray jBa, jint nMax +){ + return result_blob_text(0, SQLITE_UTF8, env, + PtrGet_sqlite3_context(jpCx), jBa, nMax); +} + +S3JniApi(sqlite3_result_text64(),void,1result_1text64)( + JniArgsEnvClass, jobject jpCx, jbyteArray jBa, jlong nMax, + jint eTextRep +){ + return result_blob_text(1, eTextRep, env, + PtrGet_sqlite3_context(jpCx), jBa, nMax); +} + +S3JniApi(sqlite3_result_value(),void,1result_1value)( + JniArgsEnvClass, jobject jpCx, jobject jpSVal +){ + sqlite3_result_value(PtrGet_sqlite3_context(jpCx), + PtrGet_sqlite3_value(jpSVal)); +} + +S3JniApi(sqlite3_result_zeroblob(),void,1result_1zeroblob)( + JniArgsEnvClass, jobject jpCx, jint v +){ + sqlite3_result_zeroblob(PtrGet_sqlite3_context(jpCx), (int)v); +} + +S3JniApi(sqlite3_result_zeroblob64(),jint,1result_1zeroblob64)( + JniArgsEnvClass, jobject jpCx, jlong v +){ + return (jint)sqlite3_result_zeroblob64(PtrGet_sqlite3_context(jpCx), + (sqlite3_int64)v); +} + +S3JniApi(sqlite3_rollback_hook(),jobject,1rollback_1hook)( + JniArgsEnvClass, jlong jpDb, jobject jHook +){ + return s3jni_commit_rollback_hook(0, env, jpDb, jHook); +} + +/* Callback for sqlite3_set_authorizer(). */ +int s3jni_xAuth(void* pState, int op,const char*z0, const char*z1, + const char*z2,const char*z3){ + S3JniDb * const ps = pState; + S3JniDeclLocal_env; + S3JniHook hook; + int rc = 0; + + S3JniHook_localdup(&ps->hooks.auth, &hook ); + if( hook.jObj ){ + jstring const s0 = z0 ? s3jni_utf8_to_jstring( z0, -1) : 0; + jstring const s1 = z1 ? s3jni_utf8_to_jstring( z1, -1) : 0; + jstring const s2 = z2 ? s3jni_utf8_to_jstring( z2, -1) : 0; + jstring const s3 = z3 ? s3jni_utf8_to_jstring( z3, -1) : 0; + + rc = (*env)->CallIntMethod(env, hook.jObj, hook.midCallback, (jint)op, + s0, s1, s3, s3); + S3JniIfThrew{ + rc = s3jni_db_exception(ps->pDb, rc, "sqlite3_set_authorizer() callback"); + } + S3JniUnrefLocal(s0); + S3JniUnrefLocal(s1); + S3JniUnrefLocal(s2); + S3JniUnrefLocal(s3); + S3JniHook_localundup(hook); + } + return rc; +} + +S3JniApi(sqlite3_set_authorizer(),jint,1set_1authorizer)( + JniArgsEnvClass,jobject jDb, jobject jHook +){ + S3JniDb * const ps = S3JniDb_from_java(jDb); + S3JniHook * const pHook = ps ? &ps->hooks.auth : 0; + int rc = 0; + + if( !ps ) return SQLITE_MISUSE; + S3JniDb_mutex_enter; + if( !jHook ){ + S3JniHook_unref(pHook); + rc = sqlite3_set_authorizer( ps->pDb, 0, 0 ); + }else{ + jclass klazz; + if( pHook->jObj ){ + if( (*env)->IsSameObject(env, pHook->jObj, jHook) ){ + /* Same object - this is a no-op. */ + S3JniDb_mutex_leave; + return 0; + } + S3JniHook_unref(pHook); + } + pHook->jObj = S3JniRefGlobal(jHook); + klazz = (*env)->GetObjectClass(env, jHook); + pHook->midCallback = (*env)->GetMethodID(env, klazz, + "call", + "(I" + "Ljava/lang/String;" + "Ljava/lang/String;" + "Ljava/lang/String;" + "Ljava/lang/String;" + ")I"); + S3JniUnrefLocal(klazz); + S3JniIfThrew { + rc = s3jni_db_error(ps->pDb, SQLITE_ERROR, + "Error setting up Java parts of authorizer hook."); + }else{ + rc = sqlite3_set_authorizer(ps->pDb, s3jni_xAuth, ps); + } + if( rc ) S3JniHook_unref(pHook); + } + S3JniDb_mutex_leave; + return rc; +} + +S3JniApi(sqlite3_set_auxdata(),void,1set_1auxdata)( + JniArgsEnvClass, jobject jCx, jint n, jobject jAux +){ + sqlite3_set_auxdata(PtrGet_sqlite3_context(jCx), (int)n, + S3JniRefGlobal(jAux), S3Jni_jobject_finalizer); +} + +S3JniApi(sqlite3_set_last_insert_rowid(),void,1set_1last_1insert_1rowid)( + JniArgsEnvClass, jobject jpDb, jlong rowId +){ + sqlite3_set_last_insert_rowid(PtrGet_sqlite3(jpDb), + (sqlite3_int64)rowId); +} + +S3JniApi(sqlite3_shutdown(),jint,1shutdown)( + JniArgsEnvClass +){ + s3jni_reset_auto_extension(env); +#ifdef SQLITE_ENABLE_SQLLOG + S3JniHook_unref(&SJG.hook.sqllog); +#endif + S3JniHook_unref(&SJG.hook.configlog); + /* Free up S3JniDb recycling bin. */ + S3JniDb_mutex_enter; { + while( S3JniGlobal.perDb.aFree ){ + S3JniDb * const d = S3JniGlobal.perDb.aFree; + S3JniGlobal.perDb.aFree = d->pNext; + S3JniDb_clear(env, d); + sqlite3_free(d); + } + } S3JniDb_mutex_leave; + S3JniGlobal_mutex_enter; { + /* Free up S3JniUdf recycling bin. */ + while( S3JniGlobal.udf.aFree ){ + S3JniUdf * const u = S3JniGlobal.udf.aFree; + S3JniGlobal.udf.aFree = u->pNext; + u->pNext = 0; + S3JniUdf_free(env, u, 0); + } + } S3JniGlobal_mutex_leave; + S3JniHook_mutex_enter; { + /* Free up S3JniHook recycling bin. */ + while( S3JniGlobal.hook.aFree ){ + S3JniHook * const u = S3JniGlobal.hook.aFree; + S3JniGlobal.hook.aFree = u->pNext; + u->pNext = 0; + assert( !u->doXDestroy ); + assert( !u->jObj ); + assert( !u->jExtra ); + sqlite3_free( u ); + } + } S3JniHook_mutex_leave; + /* Free up env cache. */ + S3JniEnv_mutex_enter; { + while( SJG.envCache.aHead ){ + S3JniEnv_uncache( SJG.envCache.aHead->env ); + } + } S3JniEnv_mutex_leave; + /* Do not clear S3JniGlobal.jvm or S3JniGlobal.g: it's legal to + ** restart the lib. */ + return sqlite3_shutdown(); +} + +S3JniApi(sqlite3_status(),jint,1status)( + JniArgsEnvClass, jint op, jobject jOutCurrent, jobject jOutHigh, + jboolean reset +){ + int iCur = 0, iHigh = 0; + int rc = sqlite3_status( op, &iCur, &iHigh, reset ); + if( 0==rc ){ + OutputPointer_set_Int32(env, jOutCurrent, iCur); + OutputPointer_set_Int32(env, jOutHigh, iHigh); + } + return (jint)rc; +} + +S3JniApi(sqlite3_status64(),jint,1status64)( + JniArgsEnvClass, jint op, jobject jOutCurrent, jobject jOutHigh, + jboolean reset +){ + sqlite3_int64 iCur = 0, iHigh = 0; + int rc = sqlite3_status64( op, &iCur, &iHigh, reset ); + if( 0==rc ){ + OutputPointer_set_Int64(env, jOutCurrent, iCur); + OutputPointer_set_Int64(env, jOutHigh, iHigh); + } + return (jint)rc; +} + +S3JniApi(sqlite3_stmt_status(),jint,1stmt_1status)( + JniArgsEnvClass, jobject jStmt, jint op, jboolean reset +){ + return sqlite3_stmt_status(PtrGet_sqlite3_stmt(jStmt), + (int)op, reset ? 1 : 0); +} + + +static int s3jni_strlike_glob(int isLike, JNIEnv *const env, + jbyteArray baG, jbyteArray baT, jint escLike){ + int rc = 0; + jbyte * const pG = s3jni_jbyteArray_bytes(baG); + jbyte * const pT = s3jni_jbyteArray_bytes(baT); + + /* Note that we're relying on the byte arrays having been + NUL-terminated on the Java side. */ + rc = isLike + ? sqlite3_strlike((const char *)pG, (const char *)pT, + (unsigned int)escLike) + : sqlite3_strglob((const char *)pG, (const char *)pT); + s3jni_jbyteArray_release(baG, pG); + s3jni_jbyteArray_release(baT, pT); + return rc; +} + +S3JniApi(sqlite3_strglob(),jint,1strglob)( + JniArgsEnvClass, jbyteArray baG, jbyteArray baT +){ + return s3jni_strlike_glob(0, env, baG, baT, 0); +} + +S3JniApi(sqlite3_strlike(),jint,1strlike)( + JniArgsEnvClass, jbyteArray baG, jbyteArray baT, jint escChar +){ + return s3jni_strlike_glob(1, env, baG, baT, escChar); +} + +S3JniApi(sqlite3_sql(),jstring,1sql)( + JniArgsEnvClass, jobject jpStmt +){ + sqlite3_stmt * const pStmt = PtrGet_sqlite3_stmt(jpStmt); + jstring rv = 0; + if( pStmt ){ + const char * zSql = 0; + zSql = sqlite3_sql(pStmt); + rv = s3jni_utf8_to_jstring( zSql, -1); + } + return rv; +} + +S3JniApi(sqlite3_step(),jint,1step)( + JniArgsEnvClass, jlong jpStmt +){ + sqlite3_stmt * const pStmt = LongPtrGet_sqlite3_stmt(jpStmt); + return pStmt ? (jint)sqlite3_step(pStmt) : (jint)SQLITE_MISUSE; +} + +S3JniApi(sqlite3_table_column_metadata(),jint,1table_1column_1metadata)( + JniArgsEnvClass, jobject jDb, jstring jDbName, jstring jTableName, + jstring jColumnName, jobject jDataType, jobject jCollSeq, jobject jNotNull, + jobject jPrimaryKey, jobject jAutoinc +){ + sqlite3 * const db = PtrGet_sqlite3(jDb); + char * zDbName = 0, * zTableName = 0, * zColumnName = 0; + const char * pzCollSeq = 0; + const char * pzDataType = 0; + int pNotNull = 0, pPrimaryKey = 0, pAutoinc = 0; + int rc; + + if( !db || !jDbName || !jTableName ) return SQLITE_MISUSE; + zDbName = s3jni_jstring_to_utf8(jDbName,0); + zTableName = zDbName ? s3jni_jstring_to_utf8(jTableName,0) : 0; + zColumnName = (zTableName && jColumnName) + ? s3jni_jstring_to_utf8(jColumnName,0) : 0; + rc = zTableName + ? sqlite3_table_column_metadata(db, zDbName, zTableName, + zColumnName, &pzDataType, &pzCollSeq, + &pNotNull, &pPrimaryKey, &pAutoinc) + : SQLITE_NOMEM; + if( 0==rc ){ + jstring jseq = jCollSeq + ? (pzCollSeq ? s3jni_utf8_to_jstring(pzCollSeq, -1) : 0) + : 0; + jstring jdtype = jDataType + ? (pzDataType ? s3jni_utf8_to_jstring(pzDataType, -1) : 0) + : 0; + if( (jCollSeq && pzCollSeq && !jseq) + || (jDataType && pzDataType && !jdtype) ){ + rc = SQLITE_NOMEM; + }else{ + if( jNotNull ) OutputPointer_set_Bool(env, jNotNull, pNotNull); + if( jPrimaryKey ) OutputPointer_set_Bool(env, jPrimaryKey, pPrimaryKey); + if( jAutoinc ) OutputPointer_set_Bool(env, jAutoinc, pAutoinc); + if( jCollSeq ) OutputPointer_set_String(env, jCollSeq, jseq); + if( jDataType ) OutputPointer_set_String(env, jDataType, jdtype); + } + S3JniUnrefLocal(jseq); + S3JniUnrefLocal(jdtype); + } + sqlite3_free(zDbName); + sqlite3_free(zTableName); + sqlite3_free(zColumnName); + return rc; +} + +static int s3jni_trace_impl(unsigned traceflag, void *pC, void *pP, void *pX){ + S3JniDb * const ps = (S3JniDb *)pC; + S3JniDeclLocal_env; + jobject jX = NULL /* the tracer's X arg */; + jobject jP = NULL /* the tracer's P arg */; + jobject jPUnref = NULL /* potentially a local ref to jP */; + int rc = 0; + S3JniHook hook; + + S3JniHook_localdup(&ps->hooks.trace, &hook ); + if( !hook.jObj ){ + return 0; + } + switch( traceflag ){ + case SQLITE_TRACE_STMT: + jX = s3jni_utf8_to_jstring( (const char *)pX, -1); + if( !jX ) rc = SQLITE_NOMEM; + break; + case SQLITE_TRACE_PROFILE: + jX = (*env)->NewObject(env, SJG.g.cLong, SJG.g.ctorLong1, + (jlong)*((sqlite3_int64*)pX)); + // hmm. ^^^ (*pX) really is zero. + // MARKER(("profile time = %llu\n", *((sqlite3_int64*)pX))); + s3jni_oom_check( jX ); + if( !jX ) rc = SQLITE_NOMEM; + break; + case SQLITE_TRACE_ROW: + break; + case SQLITE_TRACE_CLOSE: + jP = jPUnref = S3JniRefLocal(ps->jDb); + break; + default: + assert(!"cannot happen - unknown trace flag"); + rc = SQLITE_ERROR; + } + if( 0==rc ){ + if( !jP ){ + /* Create a new temporary sqlite3_stmt wrapper */ + jP = jPUnref = new_java_sqlite3_stmt(env, pP); + if( !jP ){ + rc = SQLITE_NOMEM; + } + } + if( 0==rc ){ + assert(jP); + rc = (int)(*env)->CallIntMethod(env, hook.jObj, hook.midCallback, + (jint)traceflag, jP, jX); + S3JniIfThrew{ + rc = s3jni_db_exception(ps->pDb, SQLITE_ERROR, + "sqlite3_trace_v2() callback threw."); + } + } + } + S3JniUnrefLocal(jPUnref); + S3JniUnrefLocal(jX); + S3JniHook_localundup(hook); + return rc; +} + +S3JniApi(sqlite3_trace_v2(),jint,1trace_1v2)( + JniArgsEnvClass,jobject jDb, jint traceMask, jobject jTracer +){ + S3JniDb * const ps = S3JniDb_from_java(jDb); + int rc; + + if( !ps ) return SQLITE_MISUSE; + if( !traceMask || !jTracer ){ + S3JniDb_mutex_enter; + rc = (jint)sqlite3_trace_v2(ps->pDb, 0, 0, 0); + S3JniHook_unref(&ps->hooks.trace); + S3JniDb_mutex_leave; + }else{ + jclass const klazz = (*env)->GetObjectClass(env, jTracer); + S3JniHook hook = S3JniHook_empty; + hook.midCallback = (*env)->GetMethodID( + env, klazz, "call", "(ILjava/lang/Object;Ljava/lang/Object;)I" + ); + S3JniUnrefLocal(klazz); + S3JniIfThrew { + S3JniExceptionClear; + rc = s3jni_db_error(ps->pDb, SQLITE_ERROR, + "Cannot not find matching call() on " + "TracerCallback object."); + }else{ + hook.jObj = S3JniRefGlobal(jTracer); + S3JniDb_mutex_enter; + rc = sqlite3_trace_v2(ps->pDb, (unsigned)traceMask, s3jni_trace_impl, ps); + if( 0==rc ){ + S3JniHook_unref(&ps->hooks.trace); + ps->hooks.trace = hook /* transfer ownership of reference */; + }else{ + S3JniHook_unref(&hook); + } + S3JniDb_mutex_leave; + } + } + return rc; +} + +S3JniApi(sqlite3_txn_state(),jint,1txn_1state)( + JniArgsEnvClass,jobject jDb, jstring jSchema +){ + sqlite3 * const pDb = PtrGet_sqlite3(jDb); + int rc = SQLITE_MISUSE; + if( pDb ){ + char * zSchema = jSchema + ? s3jni_jstring_to_utf8(jSchema, 0) + : 0; + if( !jSchema || (zSchema && jSchema) ){ + rc = sqlite3_txn_state(pDb, zSchema); + sqlite3_free(zSchema); + }else{ + rc = SQLITE_NOMEM; + } + } + return rc; +} + +S3JniApi(sqlite3_update_hook(),jobject,1update_1hook)( + JniArgsEnvClass, jlong jpDb, jobject jHook +){ + return s3jni_updatepre_hook(env, 0, jpDb, jHook); +} + + +S3JniApi(sqlite3_value_blob(),jbyteArray,1value_1blob)( + JniArgsEnvClass, jlong jpSVal +){ + sqlite3_value * const sv = LongPtrGet_sqlite3_value(jpSVal); + const jbyte * pBytes = sv ? sqlite3_value_blob(sv) : 0; + int const nLen = pBytes ? sqlite3_value_bytes(sv) : 0; + + s3jni_oom_check( nLen ? !!pBytes : 1 ); + return pBytes + ? s3jni_new_jbyteArray(pBytes, nLen) + : NULL; +} + +S3JniApi(sqlite3_value_bytes(),jint,1value_1bytes)( + JniArgsEnvClass, jlong jpSVal +){ + sqlite3_value * const sv = LongPtrGet_sqlite3_value(jpSVal); + return sv ? sqlite3_value_bytes(sv) : 0; +} + +S3JniApi(sqlite3_value_bytes16(),jint,1value_1bytes16)( + JniArgsEnvClass, jlong jpSVal +){ + sqlite3_value * const sv = LongPtrGet_sqlite3_value(jpSVal); + return sv ? sqlite3_value_bytes16(sv) : 0; +} + + +S3JniApi(sqlite3_value_double(),jdouble,1value_1double)( + JniArgsEnvClass, jlong jpSVal +){ + sqlite3_value * const sv = LongPtrGet_sqlite3_value(jpSVal); + return (jdouble) (sv ? sqlite3_value_double(sv) : 0.0); +} + + +S3JniApi(sqlite3_value_dup(),jobject,1value_1dup)( + JniArgsEnvClass, jlong jpSVal +){ + sqlite3_value * const sv = LongPtrGet_sqlite3_value(jpSVal); + sqlite3_value * const sd = sv ? sqlite3_value_dup(sv) : 0; + jobject rv = sd ? new_java_sqlite3_value(env, sd) : 0; + if( sd && !rv ) { + /* OOM */ + sqlite3_value_free(sd); + } + return rv; +} + +S3JniApi(sqlite3_value_free(),void,1value_1free)( + JniArgsEnvClass, jlong jpSVal +){ + sqlite3_value * const sv = LongPtrGet_sqlite3_value(jpSVal); + if( sv ){ + sqlite3_value_free(sv); + } +} + +S3JniApi(sqlite3_value_int(),jint,1value_1int)( + JniArgsEnvClass, jlong jpSVal +){ + sqlite3_value * const sv = LongPtrGet_sqlite3_value(jpSVal); + return (jint) (sv ? sqlite3_value_int(sv) : 0); +} + +S3JniApi(sqlite3_value_int64(),jlong,1value_1int64)( + JniArgsEnvClass, jlong jpSVal +){ + sqlite3_value * const sv = LongPtrGet_sqlite3_value(jpSVal); + return (jlong) (sv ? sqlite3_value_int64(sv) : 0LL); +} + +S3JniApi(sqlite3_value_java_object(),jobject,1value_1java_1object)( + JniArgsEnvClass, jlong jpSVal +){ + sqlite3_value * const sv = LongPtrGet_sqlite3_value(jpSVal); + return sv + ? sqlite3_value_pointer(sv, s3jni__value_jref_key) + : 0; +} + +S3JniApi(sqlite3_value_nio_buffer(),jobject,1value_1nio_1buffer)( + JniArgsEnvClass, jobject jVal +){ + sqlite3_value * const sv = PtrGet_sqlite3_value(jVal); + jobject rv = 0; + if( sv ){ + const void * const p = sqlite3_value_blob(sv); + if( p ){ + const int n = sqlite3_value_bytes(sv); + rv = s3jni__blob_to_ByteBuffer(env, p, n); + } + } + return rv; +} + +S3JniApi(sqlite3_value_text(),jbyteArray,1value_1text)( + JniArgsEnvClass, jlong jpSVal +){ + sqlite3_value * const sv = LongPtrGet_sqlite3_value(jpSVal); + const unsigned char * const p = sv ? sqlite3_value_text(sv) : 0; + int const n = p ? sqlite3_value_bytes(sv) : 0; + return p ? s3jni_new_jbyteArray(p, n) : 0; +} + +#if 0 +// this impl might prove useful. +S3JniApi(sqlite3_value_text(),jstring,1value_1text)( + JniArgsEnvClass, jlong jpSVal +){ + sqlite3_value * const sv = LongPtrGet_sqlite3_value(jpSVal); + const unsigned char * const p = sv ? sqlite3_value_text(sv) : 0; + int const n = p ? sqlite3_value_bytes(sv) : 0; + return p ? s3jni_utf8_to_jstring( (const char *)p, n) : 0; +} +#endif + +S3JniApi(sqlite3_value_text16(),jstring,1value_1text16)( + JniArgsEnvClass, jlong jpSVal +){ + sqlite3_value * const sv = LongPtrGet_sqlite3_value(jpSVal); + const int n = sv ? sqlite3_value_bytes16(sv) : 0; + const void * const p = sv ? sqlite3_value_text16(sv) : 0; + return p ? s3jni_text16_to_jstring(env, p, n) : 0; +} + +JniDecl(void,1jni_1internal_1details)(JniArgsEnvClass){ + MARKER(("\nVarious bits of internal info:\n")); + puts("FTS5 is " +#ifdef SQLITE_ENABLE_FTS5 + "available" +#else + "unavailable" +#endif + "." + ); + puts("sizeofs:"); +#define SO(T) printf("\tsizeof(" #T ") = %u\n", (unsigned)sizeof(T)) + SO(void*); + SO(jmethodID); + SO(jfieldID); + SO(S3JniEnv); + SO(S3JniHook); + SO(S3JniDb); + SO(S3JniNphOps); + printf("\t(^^^ %u NativePointerHolder/OutputPointer.T types)\n", + (unsigned)S3Jni_NphCache_size); + SO(S3JniGlobal); + SO(S3JniGlobal.nph); + SO(S3JniGlobal.metrics); + SO(S3JniAutoExtension); + SO(S3JniUdf); +#undef SO +#ifdef SQLITE_JNI_ENABLE_METRICS + printf("Cache info:\n"); + printf("\tJNIEnv cache: %u allocs, %u misses, %u hits\n", + SJG.metrics.nEnvAlloc, SJG.metrics.nEnvMiss, + SJG.metrics.nEnvHit); + printf("Mutex entry:" + "\n\tglobal = %u" + "\n\tenv = %u" + "\n\tnph = %u for S3JniNphOp init" + "\n\thook = %u" + "\n\tperDb = %u" + "\n\tautoExt list = %u" + "\n\tS3JniUdf = %u (free-list)" + "\n\tmetrics = %u\n", + SJG.metrics.nMutexGlobal, SJG.metrics.nMutexEnv, + SJG.metrics.nMutexNph, SJG.metrics.nMutexHook, + SJG.metrics.nMutexPerDb, SJG.metrics.nMutexAutoExt, + SJG.metrics.nMutexUdf, SJG.metrics.nMetrics); + puts("Allocs:"); + printf("\tS3JniDb: %u alloced (*%u = %u bytes), %u recycled\n", + SJG.metrics.nPdbAlloc, (unsigned) sizeof(S3JniDb), + (unsigned)(SJG.metrics.nPdbAlloc * sizeof(S3JniDb)), + SJG.metrics.nPdbRecycled); + printf("\tS3JniUdf: %u alloced (*%u = %u bytes), %u recycled\n", + SJG.metrics.nUdfAlloc, (unsigned) sizeof(S3JniUdf), + (unsigned)(SJG.metrics.nUdfAlloc * sizeof(S3JniUdf)), + SJG.metrics.nUdfRecycled); + printf("\tS3JniHook: %u alloced (*%u = %u bytes), %u recycled\n", + SJG.metrics.nHookAlloc, (unsigned) sizeof(S3JniHook), + (unsigned)(SJG.metrics.nHookAlloc * sizeof(S3JniHook)), + SJG.metrics.nHookRecycled); + printf("\tS3JniEnv: %u alloced (*%u = %u bytes)\n", + SJG.metrics.nEnvAlloc, (unsigned) sizeof(S3JniEnv), + (unsigned)(SJG.metrics.nEnvAlloc * sizeof(S3JniEnv))); + puts("Java-side UDF calls:"); +#define UDF(T) printf("\t%-8s = %u\n", "x" #T, SJG.metrics.udf.n##T) + UDF(Func); UDF(Step); UDF(Final); UDF(Value); UDF(Inverse); +#undef UDF + printf("xDestroy calls across all callback types: %u\n", + SJG.metrics.nDestroy); +#else + puts("Built without SQLITE_JNI_ENABLE_METRICS."); +#endif +} + +//////////////////////////////////////////////////////////////////////// +// End of the sqlite3_... API bindings. Next up, FTS5... +//////////////////////////////////////////////////////////////////////// +#ifdef SQLITE_ENABLE_FTS5 + +/* Creates a verbose JNI Fts5 function name. */ +#define JniFuncNameFtsXA(Suffix) \ + Java_org_sqlite_jni_fts5_Fts5ExtensionApi_ ## Suffix +#define JniFuncNameFtsApi(Suffix) \ + Java_org_sqlite_jni_fts5_fts5_1api_ ## Suffix +#define JniFuncNameFtsTok(Suffix) \ + Java_org_sqlite_jni_fts5_fts5_tokenizer_ ## Suffix + +#define JniDeclFtsXA(ReturnType,Suffix) \ + JNIEXPORT ReturnType JNICALL \ + JniFuncNameFtsXA(Suffix) +#define JniDeclFtsApi(ReturnType,Suffix) \ + JNIEXPORT ReturnType JNICALL \ + JniFuncNameFtsApi(Suffix) +#define JniDeclFtsTok(ReturnType,Suffix) \ + JNIEXPORT ReturnType JNICALL \ + JniFuncNameFtsTok(Suffix) + +#define PtrGet_fts5_api(OBJ) NativePointerHolder_get(OBJ,S3JniNph(fts5_api)) +#define PtrGet_fts5_tokenizer(OBJ) NativePointerHolder_get(OBJ,S3JniNph(fts5_tokenizer)) +#define PtrGet_Fts5Context(OBJ) NativePointerHolder_get(OBJ,S3JniNph(Fts5Context)) +#define PtrGet_Fts5Tokenizer(OBJ) NativePointerHolder_get(OBJ,S3JniNph(Fts5Tokenizer)) +#define s3jni_ftsext() &sFts5Api/*singleton from sqlite3.c*/ +#define Fts5ExtDecl Fts5ExtensionApi const * const ext = s3jni_ftsext() + +/** + State for binding Java-side FTS5 auxiliary functions. +*/ +typedef struct { + jobject jObj /* functor instance */; + jobject jUserData /* 2nd arg to JNI binding of + xCreateFunction(), ostensibly the 3rd arg + to the lib-level xCreateFunction(), except + that we necessarily use that slot for a + Fts5JniAux instance. */; + char * zFuncName /* Only for error reporting and debug logging */; + jmethodID jmid /* callback member's method ID */; +} Fts5JniAux; + +static void Fts5JniAux_free(Fts5JniAux * const s){ + S3JniDeclLocal_env; + if( env ){ + /*MARKER(("FTS5 aux function cleanup: %s\n", s->zFuncName));*/ + s3jni_call_xDestroy(s->jObj); + S3JniUnrefGlobal(s->jObj); + S3JniUnrefGlobal(s->jUserData); + } + sqlite3_free(s->zFuncName); + sqlite3_free(s); +} + +static void Fts5JniAux_xDestroy(void *p){ + if( p ) Fts5JniAux_free(p); +} + +static Fts5JniAux * Fts5JniAux_alloc(JNIEnv * const env, jobject jObj){ + Fts5JniAux * s = s3jni_malloc( sizeof(Fts5JniAux)); + + if( s ){ + jclass klazz; + memset(s, 0, sizeof(Fts5JniAux)); + s->jObj = S3JniRefGlobal(jObj); + klazz = (*env)->GetObjectClass(env, jObj); + s->jmid = (*env)->GetMethodID(env, klazz, "call", + "(Lorg/sqlite/jni/fts5/Fts5ExtensionApi;" + "Lorg/sqlite/jni/fts5/Fts5Context;" + "Lorg/sqlite/jni/capi/sqlite3_context;" + "[Lorg/sqlite/jni/capi/sqlite3_value;)V"); + S3JniUnrefLocal(klazz); + S3JniIfThrew{ + S3JniExceptionReport; + S3JniExceptionClear; + Fts5JniAux_free(s); + s = 0; + } + } + return s; +} + +static inline jobject new_java_Fts5Context(JNIEnv * const env, Fts5Context *sv){ + return NativePointerHolder_new(env, S3JniNph(Fts5Context), sv); +} +static inline jobject new_java_fts5_api(JNIEnv * const env, fts5_api *sv){ + return NativePointerHolder_new(env, S3JniNph(fts5_api), sv); +} + +/* +** Returns a per-JNIEnv global ref to the Fts5ExtensionApi singleton +** instance, or NULL on OOM. +*/ +static jobject s3jni_getFts5ExtensionApi(JNIEnv * const env){ + if( !SJG.fts5.jExt ){ + S3JniGlobal_mutex_enter; + if( !SJG.fts5.jExt ){ + jobject const pNPH = NativePointerHolder_new( + env, S3JniNph(Fts5ExtensionApi), s3jni_ftsext() + ); + if( pNPH ){ + SJG.fts5.jExt = S3JniRefGlobal(pNPH); + S3JniUnrefLocal(pNPH); + } + } + S3JniGlobal_mutex_leave; + } + return SJG.fts5.jExt; +} + +/* +** Returns a pointer to the fts5_api instance for database connection +** db. If an error occurs, returns NULL and leaves an error in the +** database handle (accessible using sqlite3_errcode()/errmsg()). +*/ +static fts5_api *s3jni_fts5_api_from_db(sqlite3 *db){ + fts5_api *pRet = 0; + sqlite3_stmt *pStmt = 0; + if( SQLITE_OK==sqlite3_prepare(db, "SELECT fts5(?1)", -1, &pStmt, 0) ){ + sqlite3_bind_pointer(pStmt, 1, (void*)&pRet, "fts5_api_ptr", NULL); + sqlite3_step(pStmt); + } + sqlite3_finalize(pStmt); + return pRet; +} + +JniDeclFtsApi(jobject,getInstanceForDb)(JniArgsEnvClass,jobject jDb){ + S3JniDb * const ps = S3JniDb_from_java(jDb); +#if 0 + jobject rv = 0; + if( !ps ) return 0; + else if( ps->fts.jApi ){ + rv = ps->fts.jApi; + }else{ + fts5_api * const pApi = s3jni_fts5_api_from_db(ps->pDb); + if( pApi ){ + rv = new_java_fts5_api(env, pApi); + ps->fts.jApi = rv ? S3JniRefGlobal(rv) : 0; + } + } + return rv; +#else + if( ps && !ps->fts.jApi ){ + S3JniDb_mutex_enter; + if( !ps->fts.jApi ){ + fts5_api * const pApi = s3jni_fts5_api_from_db(ps->pDb); + if( pApi ){ + jobject const rv = new_java_fts5_api(env, pApi); + ps->fts.jApi = rv ? S3JniRefGlobal(rv) : 0; + } + } + S3JniDb_mutex_leave; + } + return ps ? ps->fts.jApi : 0; +#endif +} + + +JniDeclFtsXA(jobject,getInstance)(JniArgsEnvClass){ + return s3jni_getFts5ExtensionApi(env); +} + +JniDeclFtsXA(jint,xColumnCount)(JniArgsEnvObj,jobject jCtx){ + Fts5ExtDecl; + return (jint)ext->xColumnCount(PtrGet_Fts5Context(jCtx)); +} + +JniDeclFtsXA(jint,xColumnSize)(JniArgsEnvObj,jobject jCtx, jint iIdx, jobject jOut32){ + Fts5ExtDecl; + int n1 = 0; + int const rc = ext->xColumnSize(PtrGet_Fts5Context(jCtx), (int)iIdx, &n1); + if( 0==rc ) OutputPointer_set_Int32(env, jOut32, n1); + return rc; +} + +JniDeclFtsXA(jint,xColumnText)(JniArgsEnvObj,jobject jCtx, jint iCol, + jobject jOut){ + Fts5ExtDecl; + const char *pz = 0; + int pn = 0; + int rc = ext->xColumnText(PtrGet_Fts5Context(jCtx), (int)iCol, + &pz, &pn); + if( 0==rc ){ + jstring jstr = pz ? s3jni_utf8_to_jstring( pz, pn) : 0; + if( pz ){ + if( jstr ){ + OutputPointer_set_String(env, jOut, jstr); + S3JniUnrefLocal(jstr)/*jOut has a reference*/; + }else{ + rc = SQLITE_NOMEM; + } + } + } + return (jint)rc; +} + +JniDeclFtsXA(jint,xColumnTotalSize)(JniArgsEnvObj,jobject jCtx, jint iCol, jobject jOut64){ + Fts5ExtDecl; + sqlite3_int64 nOut = 0; + int const rc = ext->xColumnTotalSize(PtrGet_Fts5Context(jCtx), (int)iCol, &nOut); + if( 0==rc && jOut64 ) OutputPointer_set_Int64(env, jOut64, (jlong)nOut); + return (jint)rc; +} + +/* +** Proxy for fts5_extension_function instances plugged in via +** fts5_api::xCreateFunction(). +*/ +static void s3jni_fts5_extension_function(Fts5ExtensionApi const *pApi, + Fts5Context *pFts, + sqlite3_context *pCx, + int argc, + sqlite3_value **argv){ + Fts5JniAux * const pAux = pApi->xUserData(pFts); + jobject jpCx = 0; + jobjectArray jArgv = 0; + jobject jpFts = 0; + jobject jFXA; + int rc; + S3JniDeclLocal_env; + + assert(pAux); + jFXA = s3jni_getFts5ExtensionApi(env); + if( !jFXA ) goto error_oom; + jpFts = new_java_Fts5Context(env, pFts); + if( !jpFts ) goto error_oom; + rc = udf_args(env, pCx, argc, argv, &jpCx, &jArgv); + if( rc ) goto error_oom; + (*env)->CallVoidMethod(env, pAux->jObj, pAux->jmid, + jFXA, jpFts, jpCx, jArgv); + S3JniIfThrew{ + udf_report_exception(env, 1, pCx, pAux->zFuncName, "call"); + } + udf_unargs(env, jpCx, argc, jArgv); + S3JniUnrefLocal(jpFts); + S3JniUnrefLocal(jpCx); + S3JniUnrefLocal(jArgv); + return; +error_oom: + s3jni_db_oom( sqlite3_context_db_handle(pCx) ); + assert( !jArgv ); + assert( !jpCx ); + S3JniUnrefLocal(jpFts); + sqlite3_result_error_nomem(pCx); + return; +} + +JniDeclFtsApi(jint,xCreateFunction)(JniArgsEnvObj, jstring jName, + jobject jUserData, jobject jFunc){ + fts5_api * const pApi = PtrGet_fts5_api(jSelf); + int rc; + char * zName; + Fts5JniAux * pAux; + + assert(pApi); + zName = s3jni_jstring_to_utf8( jName, 0); + if(!zName) return SQLITE_NOMEM; + pAux = Fts5JniAux_alloc(env, jFunc); + if( pAux ){ + rc = pApi->xCreateFunction(pApi, zName, pAux, + s3jni_fts5_extension_function, + Fts5JniAux_xDestroy); + }else{ + rc = SQLITE_NOMEM; + } + if( 0==rc ){ + pAux->jUserData = jUserData ? S3JniRefGlobal(jUserData) : 0; + pAux->zFuncName = zName; + }else{ + sqlite3_free(zName); + } + return (jint)rc; +} + + +typedef struct S3JniFts5AuxData S3JniFts5AuxData; +/* +** TODO: this middle-man struct is no longer necessary. Consider +** removing it and passing around jObj itself instead. +*/ +struct S3JniFts5AuxData { + jobject jObj; +}; + +static void S3JniFts5AuxData_xDestroy(void *x){ + if( x ){ + S3JniFts5AuxData * const p = x; + if( p->jObj ){ + S3JniDeclLocal_env; + s3jni_call_xDestroy(p->jObj); + S3JniUnrefGlobal(p->jObj); + } + sqlite3_free(x); + } +} + +JniDeclFtsXA(jobject,xGetAuxdata)(JniArgsEnvObj,jobject jCtx, jboolean bClear){ + Fts5ExtDecl; + jobject rv = 0; + S3JniFts5AuxData * const pAux = ext->xGetAuxdata(PtrGet_Fts5Context(jCtx), bClear); + if( pAux ){ + if( bClear ){ + if( pAux->jObj ){ + rv = S3JniRefLocal(pAux->jObj); + S3JniUnrefGlobal(pAux->jObj); + } + /* Note that we do not call xDestroy() in this case. */ + sqlite3_free(pAux); + }else{ + rv = pAux->jObj; + } + } + return rv; +} + +JniDeclFtsXA(jint,xInst)(JniArgsEnvObj,jobject jCtx, jint iIdx, jobject jOutPhrase, + jobject jOutCol, jobject jOutOff){ + Fts5ExtDecl; + int n1 = 0, n2 = 2, n3 = 0; + int const rc = ext->xInst(PtrGet_Fts5Context(jCtx), (int)iIdx, &n1, &n2, &n3); + if( 0==rc ){ + OutputPointer_set_Int32(env, jOutPhrase, n1); + OutputPointer_set_Int32(env, jOutCol, n2); + OutputPointer_set_Int32(env, jOutOff, n3); + } + return rc; +} + +JniDeclFtsXA(jint,xInstCount)(JniArgsEnvObj,jobject jCtx, jobject jOut32){ + Fts5ExtDecl; + int nOut = 0; + int const rc = ext->xInstCount(PtrGet_Fts5Context(jCtx), &nOut); + if( 0==rc && jOut32 ) OutputPointer_set_Int32(env, jOut32, nOut); + return (jint)rc; +} + +JniDeclFtsXA(jint,xPhraseCount)(JniArgsEnvObj,jobject jCtx){ + Fts5ExtDecl; + return (jint)ext->xPhraseCount(PtrGet_Fts5Context(jCtx)); +} + +/* Copy the 'a' and 'b' fields from pSrc to Fts5PhraseIter object jIter. */ +static void s3jni_phraseIter_NToJ(JNIEnv *const env, + Fts5PhraseIter const * const pSrc, + jobject jIter){ + S3JniGlobalType * const g = &S3JniGlobal; + assert(g->fts5.jPhraseIter.fidA); + (*env)->SetLongField(env, jIter, g->fts5.jPhraseIter.fidA, + S3JniCast_P2L(pSrc->a)); + S3JniExceptionIsFatal("Cannot set Fts5PhraseIter.a field."); + (*env)->SetLongField(env, jIter, g->fts5.jPhraseIter.fidB, + S3JniCast_P2L(pSrc->b)); + S3JniExceptionIsFatal("Cannot set Fts5PhraseIter.b field."); +} + +/* Copy the 'a' and 'b' fields from Fts5PhraseIter object jIter to pDest. */ +static void s3jni_phraseIter_JToN(JNIEnv *const env, jobject jIter, + Fts5PhraseIter * const pDest){ + S3JniGlobalType * const g = &S3JniGlobal; + assert(g->fts5.jPhraseIter.fidA); + pDest->a = S3JniCast_L2P( + (*env)->GetLongField(env, jIter, g->fts5.jPhraseIter.fidA) + ); + S3JniExceptionIsFatal("Cannot get Fts5PhraseIter.a field."); + pDest->b = S3JniCast_L2P( + (*env)->GetLongField(env, jIter, g->fts5.jPhraseIter.fidB) + ); + S3JniExceptionIsFatal("Cannot get Fts5PhraseIter.b field."); +} + +JniDeclFtsXA(jint,xPhraseFirst)(JniArgsEnvObj,jobject jCtx, jint iPhrase, + jobject jIter, jobject jOutCol, + jobject jOutOff){ + Fts5ExtDecl; + Fts5PhraseIter iter; + int rc, iCol = 0, iOff = 0; + rc = ext->xPhraseFirst(PtrGet_Fts5Context(jCtx), (int)iPhrase, + &iter, &iCol, &iOff); + if( 0==rc ){ + OutputPointer_set_Int32(env, jOutCol, iCol); + OutputPointer_set_Int32(env, jOutOff, iOff); + s3jni_phraseIter_NToJ(env, &iter, jIter); + } + return rc; +} + +JniDeclFtsXA(jint,xPhraseFirstColumn)(JniArgsEnvObj,jobject jCtx, jint iPhrase, + jobject jIter, jobject jOutCol){ + Fts5ExtDecl; + Fts5PhraseIter iter; + int rc, iCol = 0; + rc = ext->xPhraseFirstColumn(PtrGet_Fts5Context(jCtx), (int)iPhrase, + &iter, &iCol); + if( 0==rc ){ + OutputPointer_set_Int32(env, jOutCol, iCol); + s3jni_phraseIter_NToJ(env, &iter, jIter); + } + return rc; +} + +JniDeclFtsXA(void,xPhraseNext)(JniArgsEnvObj,jobject jCtx, jobject jIter, + jobject jOutCol, jobject jOutOff){ + Fts5ExtDecl; + Fts5PhraseIter iter; + int iCol = 0, iOff = 0; + s3jni_phraseIter_JToN(env, jIter, &iter); + ext->xPhraseNext(PtrGet_Fts5Context(jCtx), &iter, &iCol, &iOff); + OutputPointer_set_Int32(env, jOutCol, iCol); + OutputPointer_set_Int32(env, jOutOff, iOff); + s3jni_phraseIter_NToJ(env, &iter, jIter); +} + +JniDeclFtsXA(void,xPhraseNextColumn)(JniArgsEnvObj,jobject jCtx, jobject jIter, + jobject jOutCol){ + Fts5ExtDecl; + Fts5PhraseIter iter; + int iCol = 0; + s3jni_phraseIter_JToN(env, jIter, &iter); + ext->xPhraseNextColumn(PtrGet_Fts5Context(jCtx), &iter, &iCol); + OutputPointer_set_Int32(env, jOutCol, iCol); + s3jni_phraseIter_NToJ(env, &iter, jIter); +} + + +JniDeclFtsXA(jint,xPhraseSize)(JniArgsEnvObj,jobject jCtx, jint iPhrase){ + Fts5ExtDecl; + return (jint)ext->xPhraseSize(PtrGet_Fts5Context(jCtx), (int)iPhrase); +} + +/* State for use with xQueryPhrase() and xTokenize(). */ +struct s3jni_xQueryPhraseState { + Fts5ExtensionApi const * ext; + jmethodID midCallback; /* jCallback->call() method */ + jobject jCallback; /* Fts5ExtensionApi.XQueryPhraseCallback instance */ + jobject jFcx; /* (Fts5Context*) for xQueryPhrase() + callback. This is NOT the instance that is + passed to xQueryPhrase(), it's the one + created by xQueryPhrase() for use by its + callback. */ + /* State for xTokenize() */ + struct { + const char * zPrev; + int nPrev; + jbyteArray jba; + } tok; +}; + +static int s3jni_xQueryPhrase(const Fts5ExtensionApi *xapi, + Fts5Context * pFcx, void *pData){ + struct s3jni_xQueryPhraseState * const s = pData; + S3JniDeclLocal_env; + + if( !s->jFcx ){ + s->jFcx = new_java_Fts5Context(env, pFcx); + if( !s->jFcx ) return SQLITE_NOMEM; + } + int rc = (int)(*env)->CallIntMethod(env, s->jCallback, s->midCallback, + SJG.fts5.jExt, s->jFcx); + S3JniIfThrew{ + S3JniExceptionWarnCallbackThrew("xQueryPhrase() callback"); + S3JniExceptionClear; + rc = SQLITE_ERROR; + } + return rc; +} + +JniDeclFtsXA(jint,xQueryPhrase)(JniArgsEnvObj,jobject jFcx, jint iPhrase, + jobject jCallback){ + Fts5ExtDecl; + int rc; + struct s3jni_xQueryPhraseState s; + jclass klazz = jCallback ? (*env)->GetObjectClass(env, jCallback) : NULL; + + if( !klazz ) return SQLITE_MISUSE; + s.jCallback = jCallback; + s.jFcx = 0; + s.ext = ext; + s.midCallback = (*env)->GetMethodID(env, klazz, "call", + "(Lorg/sqlite/jni/fts5/Fts5ExtensionApi;" + "Lorg/sqlite/jni/fts5/Fts5Context;)I"); + S3JniUnrefLocal(klazz); + S3JniExceptionIsFatal("Could not extract xQueryPhraseCallback.call() method."); + rc = ext->xQueryPhrase(PtrGet_Fts5Context(jFcx), iPhrase, &s, + s3jni_xQueryPhrase); + S3JniUnrefLocal(s.jFcx); + return (jint)rc; +} + + +JniDeclFtsXA(jint,xRowCount)(JniArgsEnvObj,jobject jCtx, jobject jOut64){ + Fts5ExtDecl; + sqlite3_int64 nOut = 0; + int const rc = ext->xRowCount(PtrGet_Fts5Context(jCtx), &nOut); + if( 0==rc && jOut64 ) OutputPointer_set_Int64(env, jOut64, (jlong)nOut); + return (jint)rc; +} + +JniDeclFtsXA(jlong,xRowid)(JniArgsEnvObj,jobject jCtx){ + Fts5ExtDecl; + return (jlong)ext->xRowid(PtrGet_Fts5Context(jCtx)); +} + +JniDeclFtsXA(jint,xSetAuxdata)(JniArgsEnvObj,jobject jCtx, jobject jAux){ + Fts5ExtDecl; + int rc; + S3JniFts5AuxData * pAux; + + pAux = s3jni_malloc( sizeof(*pAux)); + if( !pAux ){ + if( jAux ){ + /* Emulate how xSetAuxdata() behaves when it cannot alloc + ** its auxdata wrapper. */ + s3jni_call_xDestroy(jAux); + } + return SQLITE_NOMEM; + } + pAux->jObj = S3JniRefGlobal(jAux); + rc = ext->xSetAuxdata(PtrGet_Fts5Context(jCtx), pAux, + S3JniFts5AuxData_xDestroy); + return rc; +} + +/* xToken() impl for xTokenize(). */ +static int s3jni_xTokenize_xToken(void *p, int tFlags, const char* z, + int nZ, int iStart, int iEnd){ + int rc; + S3JniDeclLocal_env; + struct s3jni_xQueryPhraseState * const s = p; + jbyteArray jba; + + S3JniUnrefLocal(s->tok.jba); + s->tok.zPrev = z; + s->tok.nPrev = nZ; + s->tok.jba = s3jni_new_jbyteArray(z, nZ); + if( !s->tok.jba ) return SQLITE_NOMEM; + jba = s->tok.jba; + rc = (int)(*env)->CallIntMethod(env, s->jCallback, s->midCallback, + (jint)tFlags, jba, (jint)iStart, + (jint)iEnd); + S3JniIfThrew { + S3JniExceptionWarnCallbackThrew("xTokenize() callback"); + rc = SQLITE_ERROR; + } + return rc; +} + +/* +** Proxy for Fts5ExtensionApi.xTokenize() and +** fts5_tokenizer.xTokenize() +*/ +static jint s3jni_fts5_xTokenize(JniArgsEnvObj, S3JniNphOp const *pRef, + jint tokFlags, jobject jFcx, + jbyteArray jbaText, jobject jCallback){ + Fts5ExtDecl; + struct s3jni_xQueryPhraseState s; + int rc = 0; + jbyte * const pText = jCallback ? s3jni_jbyteArray_bytes(jbaText) : 0; + jsize nText = pText ? (*env)->GetArrayLength(env, jbaText) : 0; + jclass const klazz = jCallback ? (*env)->GetObjectClass(env, jCallback) : NULL; + + if( !klazz ) return SQLITE_MISUSE; + memset(&s, 0, sizeof(s)); + s.jCallback = jCallback; + s.jFcx = jFcx; + s.ext = ext; + s.midCallback = (*env)->GetMethodID(env, klazz, "call", "(I[BII)I"); + S3JniUnrefLocal(klazz); + S3JniIfThrew { + S3JniExceptionReport; + S3JniExceptionClear; + s3jni_jbyteArray_release(jbaText, pText); + return SQLITE_ERROR; + } + s.tok.jba = S3JniRefLocal(jbaText); + s.tok.zPrev = (const char *)pText; + s.tok.nPrev = (int)nText; + if( pRef == S3JniNph(Fts5ExtensionApi) ){ + rc = ext->xTokenize(PtrGet_Fts5Context(jFcx), + (const char *)pText, (int)nText, + &s, s3jni_xTokenize_xToken); + }else if( pRef == S3JniNph(fts5_tokenizer) ){ + fts5_tokenizer * const pTok = PtrGet_fts5_tokenizer(jSelf); + rc = pTok->xTokenize(PtrGet_Fts5Tokenizer(jFcx), &s, tokFlags, + (const char *)pText, (int)nText, + s3jni_xTokenize_xToken); + }else{ + (*env)->FatalError(env, "This cannot happen. Maintenance required."); + } + if( s.tok.jba ){ + assert( s.tok.zPrev ); + S3JniUnrefLocal(s.tok.jba); + } + s3jni_jbyteArray_release(jbaText, pText); + return (jint)rc; +} + +JniDeclFtsXA(jint,xTokenize)(JniArgsEnvObj,jobject jFcx, jbyteArray jbaText, + jobject jCallback){ + return s3jni_fts5_xTokenize(env, jSelf, S3JniNph(Fts5ExtensionApi), + 0, jFcx, jbaText, jCallback); +} + +JniDeclFtsTok(jint,xTokenize)(JniArgsEnvObj,jobject jFcx, jint tokFlags, + jbyteArray jbaText, jobject jCallback){ + return s3jni_fts5_xTokenize(env, jSelf, S3JniNph(Fts5Tokenizer), + tokFlags, jFcx, jbaText, jCallback); +} + + +JniDeclFtsXA(jobject,xUserData)(JniArgsEnvObj,jobject jFcx){ + Fts5ExtDecl; + Fts5JniAux * const pAux = ext->xUserData(PtrGet_Fts5Context(jFcx)); + return pAux ? pAux->jUserData : 0; +} + +#endif /* SQLITE_ENABLE_FTS5 */ + +//////////////////////////////////////////////////////////////////////// +// End of the main API bindings. Start of SQLTester bits... +//////////////////////////////////////////////////////////////////////// + +#ifdef SQLITE_JNI_ENABLE_SQLTester +typedef struct SQLTesterJni SQLTesterJni; +struct SQLTesterJni { + sqlite3_int64 nDup; +}; +static SQLTesterJni SQLTester = { + 0 +}; + +static void SQLTester_dup_destructor(void*pToFree){ + u64 *p = (u64*)pToFree; + assert( p!=0 ); + p--; + assert( p[0]==0x2bbf4b7c ); + p[0] = 0; + p[1] = 0; + sqlite3_free(p); +} + +/* +** Implementation of +** +** dup(TEXT) +** +** This SQL function simply makes a copy of its text argument. But it +** returns the result using a custom destructor, in order to provide +** tests for the use of Mem.xDel() in the SQLite VDBE. +*/ +static void SQLTester_dup_func( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + u64 *pOut; + char *z; + int n = sqlite3_value_bytes(argv[0]); + SQLTesterJni * const p = (SQLTesterJni *)sqlite3_user_data(context); + S3JniDeclLocal_env; + + ++p->nDup; + if( n>0 && (pOut = s3jni_malloc( (n+16)&~7 ))!=0 ){ + pOut[0] = 0x2bbf4b7c; + z = (char*)&pOut[1]; + memcpy(z, sqlite3_value_text(argv[0]), n); + z[n] = 0; + sqlite3_result_text(context, z, n, SQLTester_dup_destructor); + } + return; +} + +/* +** Return the number of calls to the dup() SQL function since the +** SQLTester context was opened or since the last dup_count() call. +*/ +static void SQLTester_dup_count_func( + sqlite3_context *context, + int argc, + sqlite3_value **argv +){ + SQLTesterJni * const p = (SQLTesterJni *)sqlite3_user_data(context); + sqlite3_result_int64(context, p->nDup); + p->nDup = 0; +} + +/* +** Return non-zero if string z matches glob pattern zGlob and zero if the +** pattern does not match. +** +** To repeat: +** +** zero == no match +** non-zero == match +** +** Globbing rules: +** +** '*' Matches any sequence of zero or more characters. +** +** '?' Matches exactly one character. +** +** [...] Matches one character from the enclosed list of +** characters. +** +** [^...] Matches one character not in the enclosed list. +** +** '#' Matches any sequence of one or more digits with an +** optional + or - sign in front, or a hexadecimal +** literal of the form 0x... +*/ +static int SQLTester_strnotglob(const char *zGlob, const char *z){ + int c, c2; + int invert; + int seen; + + while( (c = (*(zGlob++)))!=0 ){ + if( c=='*' ){ + while( (c=(*(zGlob++))) == '*' || c=='?' ){ + if( c=='?' && (*(z++))==0 ) return 0; + } + if( c==0 ){ + return 1; + }else if( c=='[' ){ + while( *z && SQLTester_strnotglob(zGlob-1,z)==0 ){ + z++; + } + return (*z)!=0; + } + while( (c2 = (*(z++)))!=0 ){ + while( c2!=c ){ + c2 = *(z++); + if( c2==0 ) return 0; + } + if( SQLTester_strnotglob(zGlob,z) ) return 1; + } + return 0; + }else if( c=='?' ){ + if( (*(z++))==0 ) return 0; + }else if( c=='[' ){ + int prior_c = 0; + seen = 0; + invert = 0; + c = *(z++); + if( c==0 ) return 0; + c2 = *(zGlob++); + if( c2=='^' ){ + invert = 1; + c2 = *(zGlob++); + } + if( c2==']' ){ + if( c==']' ) seen = 1; + c2 = *(zGlob++); + } + while( c2 && c2!=']' ){ + if( c2=='-' && zGlob[0]!=']' && zGlob[0]!=0 && prior_c>0 ){ + c2 = *(zGlob++); + if( c>=prior_c && c<=c2 ) seen = 1; + prior_c = 0; + }else{ + if( c==c2 ){ + seen = 1; + } + prior_c = c2; + } + c2 = *(zGlob++); + } + if( c2==0 || (seen ^ invert)==0 ) return 0; + }else if( c=='#' ){ + if( z[0]=='0' + && (z[1]=='x' || z[1]=='X') + && sqlite3Isxdigit(z[2]) + ){ + z += 3; + while( sqlite3Isxdigit(z[0]) ){ z++; } + }else{ + if( (z[0]=='-' || z[0]=='+') && sqlite3Isdigit(z[1]) ) z++; + if( !sqlite3Isdigit(z[0]) ) return 0; + z++; + while( sqlite3Isdigit(z[0]) ){ z++; } + } + }else{ + if( c!=(*(z++)) ) return 0; + } + } + return *z==0; +} + +JNIEXPORT jint JNICALL +Java_org_sqlite_jni_capi_SQLTester_strglob( + JniArgsEnvClass, jbyteArray baG, jbyteArray baT +){ + int rc = 0; + jbyte * const pG = s3jni_jbyteArray_bytes(baG); + jbyte * const pT = pG ? s3jni_jbyteArray_bytes(baT) : 0; + + s3jni_oom_fatal(pT); + /* Note that we're relying on the byte arrays having been + NUL-terminated on the Java side. */ + rc = !SQLTester_strnotglob((const char *)pG, (const char *)pT); + s3jni_jbyteArray_release(baG, pG); + s3jni_jbyteArray_release(baT, pT); + return rc; +} + + +static int SQLTester_auto_extension(sqlite3 *pDb, const char **pzErr, + const struct sqlite3_api_routines *ignored){ + sqlite3_create_function(pDb, "dup", 1, SQLITE_UTF8, &SQLTester, + SQLTester_dup_func, 0, 0); + sqlite3_create_function(pDb, "dup_count", 0, SQLITE_UTF8, &SQLTester, + SQLTester_dup_count_func, 0, 0); + return 0; +} + +JNIEXPORT void JNICALL +Java_org_sqlite_jni_capi_SQLTester_installCustomExtensions(JniArgsEnvClass){ + sqlite3_auto_extension( (void(*)(void))SQLTester_auto_extension ); +} + +#endif /* SQLITE_JNI_ENABLE_SQLTester */ +//////////////////////////////////////////////////////////////////////// +// End of SQLTester bindings. Start of lower-level bits. +//////////////////////////////////////////////////////////////////////// + +/* +** Called during static init of the CApi class to set up global +** state. +*/ +JNIEXPORT void JNICALL +Java_org_sqlite_jni_capi_CApi_init(JniArgsEnvClass){ + jclass klazz; + + memset(&S3JniGlobal, 0, sizeof(S3JniGlobal)); + if( (*env)->GetJavaVM(env, &SJG.jvm) ){ + (*env)->FatalError(env, "GetJavaVM() failure shouldn't be possible."); + return; + } + + /* Grab references to various global classes and objects... */ + SJG.g.cLong = S3JniRefGlobal((*env)->FindClass(env,"java/lang/Long")); + S3JniExceptionIsFatal("Error getting reference to Long class."); + SJG.g.ctorLong1 = (*env)->GetMethodID(env, SJG.g.cLong, + "", "(J)V"); + S3JniExceptionIsFatal("Error getting reference to Long constructor."); + + SJG.g.cString = S3JniRefGlobal((*env)->FindClass(env,"java/lang/String")); + S3JniExceptionIsFatal("Error getting reference to String class."); + SJG.g.ctorStringBA = + (*env)->GetMethodID(env, SJG.g.cString, + "", "([BLjava/nio/charset/Charset;)V"); + S3JniExceptionIsFatal("Error getting reference to String(byte[],Charset) ctor."); + SJG.g.stringGetBytes = + (*env)->GetMethodID(env, SJG.g.cString, + "getBytes", "(Ljava/nio/charset/Charset;)[B"); + S3JniExceptionIsFatal("Error getting reference to String.getBytes(Charset)."); + + { /* java.nio.charset.StandardCharsets.UTF_8 */ + jfieldID fUtf8; + klazz = (*env)->FindClass(env,"java/nio/charset/StandardCharsets"); + S3JniExceptionIsFatal("Error getting reference to StandardCharsets class."); + fUtf8 = (*env)->GetStaticFieldID(env, klazz, "UTF_8", + "Ljava/nio/charset/Charset;"); + S3JniExceptionIsFatal("Error getting StandardCharsets.UTF_8 field."); + SJG.g.oCharsetUtf8 = + S3JniRefGlobal((*env)->GetStaticObjectField(env, klazz, fUtf8)); + S3JniExceptionIsFatal("Error getting reference to StandardCharsets.UTF_8."); + S3JniUnrefLocal(klazz); + } + +#ifdef SQLITE_ENABLE_FTS5 + klazz = (*env)->FindClass(env, "org/sqlite/jni/fts5/Fts5PhraseIter"); + S3JniExceptionIsFatal("Error getting reference to org.sqlite.jni.fts5.Fts5PhraseIter."); + SJG.fts5.jPhraseIter.fidA = (*env)->GetFieldID(env, klazz, "a", "J"); + S3JniExceptionIsFatal("Cannot get Fts5PhraseIter.a field."); + SJG.fts5.jPhraseIter.fidB = (*env)->GetFieldID(env, klazz, "b", "J"); + S3JniExceptionIsFatal("Cannot get Fts5PhraseIter.b field."); + S3JniUnrefLocal(klazz); +#endif + + SJG.mutex = sqlite3_mutex_alloc(SQLITE_MUTEX_FAST); + s3jni_oom_fatal( SJG.mutex ); + SJG.hook.mutex = sqlite3_mutex_alloc(SQLITE_MUTEX_FAST); + s3jni_oom_fatal( SJG.hook.mutex ); + SJG.nph.mutex = sqlite3_mutex_alloc(SQLITE_MUTEX_FAST); + s3jni_oom_fatal( SJG.nph.mutex ); + SJG.envCache.mutex = sqlite3_mutex_alloc(SQLITE_MUTEX_FAST); + s3jni_oom_fatal( SJG.envCache.mutex ); + SJG.perDb.mutex = sqlite3_mutex_alloc(SQLITE_MUTEX_FAST); + s3jni_oom_fatal( SJG.perDb.mutex ); + SJG.autoExt.mutex = sqlite3_mutex_alloc(SQLITE_MUTEX_FAST); + s3jni_oom_fatal( SJG.autoExt.mutex ); + +#if S3JNI_METRICS_MUTEX + SJG.metrics.mutex = sqlite3_mutex_alloc(SQLITE_MUTEX_FAST); + s3jni_oom_fatal( SJG.metrics.mutex ); +#endif + + { + /* Test whether this JVM supports direct memory access via + ByteBuffer. */ + unsigned char buf[16] = {0}; + jobject bb = (*env)->NewDirectByteBuffer(env, buf, 16); + if( bb ){ + SJG.g.byteBuffer.klazz = S3JniRefGlobal((*env)->GetObjectClass(env, bb)); + SJG.g.byteBuffer.midAlloc = (*env)->GetStaticMethodID( + env, SJG.g.byteBuffer.klazz, "allocateDirect", "(I)Ljava/nio/ByteBuffer;" + ); + S3JniExceptionIsFatal("Error getting ByteBuffer.allocateDirect() method."); + SJG.g.byteBuffer.midLimit = (*env)->GetMethodID( + env, SJG.g.byteBuffer.klazz, "limit", "()I" + ); + S3JniExceptionIsFatal("Error getting ByteBuffer.limit() method."); + S3JniUnrefLocal(bb); + }else{ + SJG.g.byteBuffer.klazz = 0; + SJG.g.byteBuffer.midAlloc = 0; + } + } + + sqlite3_shutdown() + /* So that it becomes legal for Java-level code to call + ** sqlite3_config(). */; +} diff --git a/ext/jni/src/c/sqlite3-jni.h b/ext/jni/src/c/sqlite3-jni.h new file mode 100644 index 0000000000..81af5cbde1 --- /dev/null +++ b/ext/jni/src/c/sqlite3-jni.h @@ -0,0 +1,2469 @@ +/* DO NOT EDIT THIS FILE - it is machine generated */ +#include +/* Header for class org_sqlite_jni_capi_CApi */ + +#ifndef _Included_org_sqlite_jni_capi_CApi +#define _Included_org_sqlite_jni_capi_CApi +#ifdef __cplusplus +extern "C" { +#endif +#undef org_sqlite_jni_capi_CApi_SQLITE_ACCESS_EXISTS +#define org_sqlite_jni_capi_CApi_SQLITE_ACCESS_EXISTS 0L +#undef org_sqlite_jni_capi_CApi_SQLITE_ACCESS_READWRITE +#define org_sqlite_jni_capi_CApi_SQLITE_ACCESS_READWRITE 1L +#undef org_sqlite_jni_capi_CApi_SQLITE_ACCESS_READ +#define org_sqlite_jni_capi_CApi_SQLITE_ACCESS_READ 2L +#undef org_sqlite_jni_capi_CApi_SQLITE_DENY +#define org_sqlite_jni_capi_CApi_SQLITE_DENY 1L +#undef org_sqlite_jni_capi_CApi_SQLITE_IGNORE +#define org_sqlite_jni_capi_CApi_SQLITE_IGNORE 2L +#undef org_sqlite_jni_capi_CApi_SQLITE_CREATE_INDEX +#define org_sqlite_jni_capi_CApi_SQLITE_CREATE_INDEX 1L +#undef org_sqlite_jni_capi_CApi_SQLITE_CREATE_TABLE +#define org_sqlite_jni_capi_CApi_SQLITE_CREATE_TABLE 2L +#undef org_sqlite_jni_capi_CApi_SQLITE_CREATE_TEMP_INDEX +#define org_sqlite_jni_capi_CApi_SQLITE_CREATE_TEMP_INDEX 3L +#undef org_sqlite_jni_capi_CApi_SQLITE_CREATE_TEMP_TABLE +#define org_sqlite_jni_capi_CApi_SQLITE_CREATE_TEMP_TABLE 4L +#undef org_sqlite_jni_capi_CApi_SQLITE_CREATE_TEMP_TRIGGER +#define org_sqlite_jni_capi_CApi_SQLITE_CREATE_TEMP_TRIGGER 5L +#undef org_sqlite_jni_capi_CApi_SQLITE_CREATE_TEMP_VIEW +#define org_sqlite_jni_capi_CApi_SQLITE_CREATE_TEMP_VIEW 6L +#undef org_sqlite_jni_capi_CApi_SQLITE_CREATE_TRIGGER +#define org_sqlite_jni_capi_CApi_SQLITE_CREATE_TRIGGER 7L +#undef org_sqlite_jni_capi_CApi_SQLITE_CREATE_VIEW +#define org_sqlite_jni_capi_CApi_SQLITE_CREATE_VIEW 8L +#undef org_sqlite_jni_capi_CApi_SQLITE_DELETE +#define org_sqlite_jni_capi_CApi_SQLITE_DELETE 9L +#undef org_sqlite_jni_capi_CApi_SQLITE_DROP_INDEX +#define org_sqlite_jni_capi_CApi_SQLITE_DROP_INDEX 10L +#undef org_sqlite_jni_capi_CApi_SQLITE_DROP_TABLE +#define org_sqlite_jni_capi_CApi_SQLITE_DROP_TABLE 11L +#undef org_sqlite_jni_capi_CApi_SQLITE_DROP_TEMP_INDEX +#define org_sqlite_jni_capi_CApi_SQLITE_DROP_TEMP_INDEX 12L +#undef org_sqlite_jni_capi_CApi_SQLITE_DROP_TEMP_TABLE +#define org_sqlite_jni_capi_CApi_SQLITE_DROP_TEMP_TABLE 13L +#undef org_sqlite_jni_capi_CApi_SQLITE_DROP_TEMP_TRIGGER +#define org_sqlite_jni_capi_CApi_SQLITE_DROP_TEMP_TRIGGER 14L +#undef org_sqlite_jni_capi_CApi_SQLITE_DROP_TEMP_VIEW +#define org_sqlite_jni_capi_CApi_SQLITE_DROP_TEMP_VIEW 15L +#undef org_sqlite_jni_capi_CApi_SQLITE_DROP_TRIGGER +#define org_sqlite_jni_capi_CApi_SQLITE_DROP_TRIGGER 16L +#undef org_sqlite_jni_capi_CApi_SQLITE_DROP_VIEW +#define org_sqlite_jni_capi_CApi_SQLITE_DROP_VIEW 17L +#undef org_sqlite_jni_capi_CApi_SQLITE_INSERT +#define org_sqlite_jni_capi_CApi_SQLITE_INSERT 18L +#undef org_sqlite_jni_capi_CApi_SQLITE_PRAGMA +#define org_sqlite_jni_capi_CApi_SQLITE_PRAGMA 19L +#undef org_sqlite_jni_capi_CApi_SQLITE_READ +#define org_sqlite_jni_capi_CApi_SQLITE_READ 20L +#undef org_sqlite_jni_capi_CApi_SQLITE_SELECT +#define org_sqlite_jni_capi_CApi_SQLITE_SELECT 21L +#undef org_sqlite_jni_capi_CApi_SQLITE_TRANSACTION +#define org_sqlite_jni_capi_CApi_SQLITE_TRANSACTION 22L +#undef org_sqlite_jni_capi_CApi_SQLITE_UPDATE +#define org_sqlite_jni_capi_CApi_SQLITE_UPDATE 23L +#undef org_sqlite_jni_capi_CApi_SQLITE_ATTACH +#define org_sqlite_jni_capi_CApi_SQLITE_ATTACH 24L +#undef org_sqlite_jni_capi_CApi_SQLITE_DETACH +#define org_sqlite_jni_capi_CApi_SQLITE_DETACH 25L +#undef org_sqlite_jni_capi_CApi_SQLITE_ALTER_TABLE +#define org_sqlite_jni_capi_CApi_SQLITE_ALTER_TABLE 26L +#undef org_sqlite_jni_capi_CApi_SQLITE_REINDEX +#define org_sqlite_jni_capi_CApi_SQLITE_REINDEX 27L +#undef org_sqlite_jni_capi_CApi_SQLITE_ANALYZE +#define org_sqlite_jni_capi_CApi_SQLITE_ANALYZE 28L +#undef org_sqlite_jni_capi_CApi_SQLITE_CREATE_VTABLE +#define org_sqlite_jni_capi_CApi_SQLITE_CREATE_VTABLE 29L +#undef org_sqlite_jni_capi_CApi_SQLITE_DROP_VTABLE +#define org_sqlite_jni_capi_CApi_SQLITE_DROP_VTABLE 30L +#undef org_sqlite_jni_capi_CApi_SQLITE_FUNCTION +#define org_sqlite_jni_capi_CApi_SQLITE_FUNCTION 31L +#undef org_sqlite_jni_capi_CApi_SQLITE_SAVEPOINT +#define org_sqlite_jni_capi_CApi_SQLITE_SAVEPOINT 32L +#undef org_sqlite_jni_capi_CApi_SQLITE_RECURSIVE +#define org_sqlite_jni_capi_CApi_SQLITE_RECURSIVE 33L +#undef org_sqlite_jni_capi_CApi_SQLITE_STATIC +#define org_sqlite_jni_capi_CApi_SQLITE_STATIC 0LL +#undef org_sqlite_jni_capi_CApi_SQLITE_TRANSIENT +#define org_sqlite_jni_capi_CApi_SQLITE_TRANSIENT -1LL +#undef org_sqlite_jni_capi_CApi_SQLITE_CHANGESETSTART_INVERT +#define org_sqlite_jni_capi_CApi_SQLITE_CHANGESETSTART_INVERT 2L +#undef org_sqlite_jni_capi_CApi_SQLITE_CHANGESETAPPLY_NOSAVEPOINT +#define org_sqlite_jni_capi_CApi_SQLITE_CHANGESETAPPLY_NOSAVEPOINT 1L +#undef org_sqlite_jni_capi_CApi_SQLITE_CHANGESETAPPLY_INVERT +#define org_sqlite_jni_capi_CApi_SQLITE_CHANGESETAPPLY_INVERT 2L +#undef org_sqlite_jni_capi_CApi_SQLITE_CHANGESETAPPLY_IGNORENOOP +#define org_sqlite_jni_capi_CApi_SQLITE_CHANGESETAPPLY_IGNORENOOP 4L +#undef org_sqlite_jni_capi_CApi_SQLITE_CHANGESET_DATA +#define org_sqlite_jni_capi_CApi_SQLITE_CHANGESET_DATA 1L +#undef org_sqlite_jni_capi_CApi_SQLITE_CHANGESET_NOTFOUND +#define org_sqlite_jni_capi_CApi_SQLITE_CHANGESET_NOTFOUND 2L +#undef org_sqlite_jni_capi_CApi_SQLITE_CHANGESET_CONFLICT +#define org_sqlite_jni_capi_CApi_SQLITE_CHANGESET_CONFLICT 3L +#undef org_sqlite_jni_capi_CApi_SQLITE_CHANGESET_CONSTRAINT +#define org_sqlite_jni_capi_CApi_SQLITE_CHANGESET_CONSTRAINT 4L +#undef org_sqlite_jni_capi_CApi_SQLITE_CHANGESET_FOREIGN_KEY +#define org_sqlite_jni_capi_CApi_SQLITE_CHANGESET_FOREIGN_KEY 5L +#undef org_sqlite_jni_capi_CApi_SQLITE_CHANGESET_OMIT +#define org_sqlite_jni_capi_CApi_SQLITE_CHANGESET_OMIT 0L +#undef org_sqlite_jni_capi_CApi_SQLITE_CHANGESET_REPLACE +#define org_sqlite_jni_capi_CApi_SQLITE_CHANGESET_REPLACE 1L +#undef org_sqlite_jni_capi_CApi_SQLITE_CHANGESET_ABORT +#define org_sqlite_jni_capi_CApi_SQLITE_CHANGESET_ABORT 2L +#undef org_sqlite_jni_capi_CApi_SQLITE_CONFIG_SINGLETHREAD +#define org_sqlite_jni_capi_CApi_SQLITE_CONFIG_SINGLETHREAD 1L +#undef org_sqlite_jni_capi_CApi_SQLITE_CONFIG_MULTITHREAD +#define org_sqlite_jni_capi_CApi_SQLITE_CONFIG_MULTITHREAD 2L +#undef org_sqlite_jni_capi_CApi_SQLITE_CONFIG_SERIALIZED +#define org_sqlite_jni_capi_CApi_SQLITE_CONFIG_SERIALIZED 3L +#undef org_sqlite_jni_capi_CApi_SQLITE_CONFIG_MALLOC +#define org_sqlite_jni_capi_CApi_SQLITE_CONFIG_MALLOC 4L +#undef org_sqlite_jni_capi_CApi_SQLITE_CONFIG_GETMALLOC +#define org_sqlite_jni_capi_CApi_SQLITE_CONFIG_GETMALLOC 5L +#undef org_sqlite_jni_capi_CApi_SQLITE_CONFIG_SCRATCH +#define org_sqlite_jni_capi_CApi_SQLITE_CONFIG_SCRATCH 6L +#undef org_sqlite_jni_capi_CApi_SQLITE_CONFIG_PAGECACHE +#define org_sqlite_jni_capi_CApi_SQLITE_CONFIG_PAGECACHE 7L +#undef org_sqlite_jni_capi_CApi_SQLITE_CONFIG_HEAP +#define org_sqlite_jni_capi_CApi_SQLITE_CONFIG_HEAP 8L +#undef org_sqlite_jni_capi_CApi_SQLITE_CONFIG_MEMSTATUS +#define org_sqlite_jni_capi_CApi_SQLITE_CONFIG_MEMSTATUS 9L +#undef org_sqlite_jni_capi_CApi_SQLITE_CONFIG_MUTEX +#define org_sqlite_jni_capi_CApi_SQLITE_CONFIG_MUTEX 10L +#undef org_sqlite_jni_capi_CApi_SQLITE_CONFIG_GETMUTEX +#define org_sqlite_jni_capi_CApi_SQLITE_CONFIG_GETMUTEX 11L +#undef org_sqlite_jni_capi_CApi_SQLITE_CONFIG_LOOKASIDE +#define org_sqlite_jni_capi_CApi_SQLITE_CONFIG_LOOKASIDE 13L +#undef org_sqlite_jni_capi_CApi_SQLITE_CONFIG_PCACHE +#define org_sqlite_jni_capi_CApi_SQLITE_CONFIG_PCACHE 14L +#undef org_sqlite_jni_capi_CApi_SQLITE_CONFIG_GETPCACHE +#define org_sqlite_jni_capi_CApi_SQLITE_CONFIG_GETPCACHE 15L +#undef org_sqlite_jni_capi_CApi_SQLITE_CONFIG_LOG +#define org_sqlite_jni_capi_CApi_SQLITE_CONFIG_LOG 16L +#undef org_sqlite_jni_capi_CApi_SQLITE_CONFIG_URI +#define org_sqlite_jni_capi_CApi_SQLITE_CONFIG_URI 17L +#undef org_sqlite_jni_capi_CApi_SQLITE_CONFIG_PCACHE2 +#define org_sqlite_jni_capi_CApi_SQLITE_CONFIG_PCACHE2 18L +#undef org_sqlite_jni_capi_CApi_SQLITE_CONFIG_GETPCACHE2 +#define org_sqlite_jni_capi_CApi_SQLITE_CONFIG_GETPCACHE2 19L +#undef org_sqlite_jni_capi_CApi_SQLITE_CONFIG_COVERING_INDEX_SCAN +#define org_sqlite_jni_capi_CApi_SQLITE_CONFIG_COVERING_INDEX_SCAN 20L +#undef org_sqlite_jni_capi_CApi_SQLITE_CONFIG_SQLLOG +#define org_sqlite_jni_capi_CApi_SQLITE_CONFIG_SQLLOG 21L +#undef org_sqlite_jni_capi_CApi_SQLITE_CONFIG_MMAP_SIZE +#define org_sqlite_jni_capi_CApi_SQLITE_CONFIG_MMAP_SIZE 22L +#undef org_sqlite_jni_capi_CApi_SQLITE_CONFIG_WIN32_HEAPSIZE +#define org_sqlite_jni_capi_CApi_SQLITE_CONFIG_WIN32_HEAPSIZE 23L +#undef org_sqlite_jni_capi_CApi_SQLITE_CONFIG_PCACHE_HDRSZ +#define org_sqlite_jni_capi_CApi_SQLITE_CONFIG_PCACHE_HDRSZ 24L +#undef org_sqlite_jni_capi_CApi_SQLITE_CONFIG_PMASZ +#define org_sqlite_jni_capi_CApi_SQLITE_CONFIG_PMASZ 25L +#undef org_sqlite_jni_capi_CApi_SQLITE_CONFIG_STMTJRNL_SPILL +#define org_sqlite_jni_capi_CApi_SQLITE_CONFIG_STMTJRNL_SPILL 26L +#undef org_sqlite_jni_capi_CApi_SQLITE_CONFIG_SMALL_MALLOC +#define org_sqlite_jni_capi_CApi_SQLITE_CONFIG_SMALL_MALLOC 27L +#undef org_sqlite_jni_capi_CApi_SQLITE_CONFIG_SORTERREF_SIZE +#define org_sqlite_jni_capi_CApi_SQLITE_CONFIG_SORTERREF_SIZE 28L +#undef org_sqlite_jni_capi_CApi_SQLITE_CONFIG_MEMDB_MAXSIZE +#define org_sqlite_jni_capi_CApi_SQLITE_CONFIG_MEMDB_MAXSIZE 29L +#undef org_sqlite_jni_capi_CApi_SQLITE_INTEGER +#define org_sqlite_jni_capi_CApi_SQLITE_INTEGER 1L +#undef org_sqlite_jni_capi_CApi_SQLITE_FLOAT +#define org_sqlite_jni_capi_CApi_SQLITE_FLOAT 2L +#undef org_sqlite_jni_capi_CApi_SQLITE_TEXT +#define org_sqlite_jni_capi_CApi_SQLITE_TEXT 3L +#undef org_sqlite_jni_capi_CApi_SQLITE_BLOB +#define org_sqlite_jni_capi_CApi_SQLITE_BLOB 4L +#undef org_sqlite_jni_capi_CApi_SQLITE_NULL +#define org_sqlite_jni_capi_CApi_SQLITE_NULL 5L +#undef org_sqlite_jni_capi_CApi_SQLITE_DBCONFIG_MAINDBNAME +#define org_sqlite_jni_capi_CApi_SQLITE_DBCONFIG_MAINDBNAME 1000L +#undef org_sqlite_jni_capi_CApi_SQLITE_DBCONFIG_LOOKASIDE +#define org_sqlite_jni_capi_CApi_SQLITE_DBCONFIG_LOOKASIDE 1001L +#undef org_sqlite_jni_capi_CApi_SQLITE_DBCONFIG_ENABLE_FKEY +#define org_sqlite_jni_capi_CApi_SQLITE_DBCONFIG_ENABLE_FKEY 1002L +#undef org_sqlite_jni_capi_CApi_SQLITE_DBCONFIG_ENABLE_TRIGGER +#define org_sqlite_jni_capi_CApi_SQLITE_DBCONFIG_ENABLE_TRIGGER 1003L +#undef org_sqlite_jni_capi_CApi_SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER +#define org_sqlite_jni_capi_CApi_SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER 1004L +#undef org_sqlite_jni_capi_CApi_SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION +#define org_sqlite_jni_capi_CApi_SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION 1005L +#undef org_sqlite_jni_capi_CApi_SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE +#define org_sqlite_jni_capi_CApi_SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE 1006L +#undef org_sqlite_jni_capi_CApi_SQLITE_DBCONFIG_ENABLE_QPSG +#define org_sqlite_jni_capi_CApi_SQLITE_DBCONFIG_ENABLE_QPSG 1007L +#undef org_sqlite_jni_capi_CApi_SQLITE_DBCONFIG_TRIGGER_EQP +#define org_sqlite_jni_capi_CApi_SQLITE_DBCONFIG_TRIGGER_EQP 1008L +#undef org_sqlite_jni_capi_CApi_SQLITE_DBCONFIG_RESET_DATABASE +#define org_sqlite_jni_capi_CApi_SQLITE_DBCONFIG_RESET_DATABASE 1009L +#undef org_sqlite_jni_capi_CApi_SQLITE_DBCONFIG_DEFENSIVE +#define org_sqlite_jni_capi_CApi_SQLITE_DBCONFIG_DEFENSIVE 1010L +#undef org_sqlite_jni_capi_CApi_SQLITE_DBCONFIG_WRITABLE_SCHEMA +#define org_sqlite_jni_capi_CApi_SQLITE_DBCONFIG_WRITABLE_SCHEMA 1011L +#undef org_sqlite_jni_capi_CApi_SQLITE_DBCONFIG_LEGACY_ALTER_TABLE +#define org_sqlite_jni_capi_CApi_SQLITE_DBCONFIG_LEGACY_ALTER_TABLE 1012L +#undef org_sqlite_jni_capi_CApi_SQLITE_DBCONFIG_DQS_DML +#define org_sqlite_jni_capi_CApi_SQLITE_DBCONFIG_DQS_DML 1013L +#undef org_sqlite_jni_capi_CApi_SQLITE_DBCONFIG_DQS_DDL +#define org_sqlite_jni_capi_CApi_SQLITE_DBCONFIG_DQS_DDL 1014L +#undef org_sqlite_jni_capi_CApi_SQLITE_DBCONFIG_ENABLE_VIEW +#define org_sqlite_jni_capi_CApi_SQLITE_DBCONFIG_ENABLE_VIEW 1015L +#undef org_sqlite_jni_capi_CApi_SQLITE_DBCONFIG_LEGACY_FILE_FORMAT +#define org_sqlite_jni_capi_CApi_SQLITE_DBCONFIG_LEGACY_FILE_FORMAT 1016L +#undef org_sqlite_jni_capi_CApi_SQLITE_DBCONFIG_TRUSTED_SCHEMA +#define org_sqlite_jni_capi_CApi_SQLITE_DBCONFIG_TRUSTED_SCHEMA 1017L +#undef org_sqlite_jni_capi_CApi_SQLITE_DBCONFIG_STMT_SCANSTATUS +#define org_sqlite_jni_capi_CApi_SQLITE_DBCONFIG_STMT_SCANSTATUS 1018L +#undef org_sqlite_jni_capi_CApi_SQLITE_DBCONFIG_REVERSE_SCANORDER +#define org_sqlite_jni_capi_CApi_SQLITE_DBCONFIG_REVERSE_SCANORDER 1019L +#undef org_sqlite_jni_capi_CApi_SQLITE_DBCONFIG_MAX +#define org_sqlite_jni_capi_CApi_SQLITE_DBCONFIG_MAX 1019L +#undef org_sqlite_jni_capi_CApi_SQLITE_DBSTATUS_LOOKASIDE_USED +#define org_sqlite_jni_capi_CApi_SQLITE_DBSTATUS_LOOKASIDE_USED 0L +#undef org_sqlite_jni_capi_CApi_SQLITE_DBSTATUS_CACHE_USED +#define org_sqlite_jni_capi_CApi_SQLITE_DBSTATUS_CACHE_USED 1L +#undef org_sqlite_jni_capi_CApi_SQLITE_DBSTATUS_SCHEMA_USED +#define org_sqlite_jni_capi_CApi_SQLITE_DBSTATUS_SCHEMA_USED 2L +#undef org_sqlite_jni_capi_CApi_SQLITE_DBSTATUS_STMT_USED +#define org_sqlite_jni_capi_CApi_SQLITE_DBSTATUS_STMT_USED 3L +#undef org_sqlite_jni_capi_CApi_SQLITE_DBSTATUS_LOOKASIDE_HIT +#define org_sqlite_jni_capi_CApi_SQLITE_DBSTATUS_LOOKASIDE_HIT 4L +#undef org_sqlite_jni_capi_CApi_SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE +#define org_sqlite_jni_capi_CApi_SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE 5L +#undef org_sqlite_jni_capi_CApi_SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL +#define org_sqlite_jni_capi_CApi_SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL 6L +#undef org_sqlite_jni_capi_CApi_SQLITE_DBSTATUS_CACHE_HIT +#define org_sqlite_jni_capi_CApi_SQLITE_DBSTATUS_CACHE_HIT 7L +#undef org_sqlite_jni_capi_CApi_SQLITE_DBSTATUS_CACHE_MISS +#define org_sqlite_jni_capi_CApi_SQLITE_DBSTATUS_CACHE_MISS 8L +#undef org_sqlite_jni_capi_CApi_SQLITE_DBSTATUS_CACHE_WRITE +#define org_sqlite_jni_capi_CApi_SQLITE_DBSTATUS_CACHE_WRITE 9L +#undef org_sqlite_jni_capi_CApi_SQLITE_DBSTATUS_DEFERRED_FKS +#define org_sqlite_jni_capi_CApi_SQLITE_DBSTATUS_DEFERRED_FKS 10L +#undef org_sqlite_jni_capi_CApi_SQLITE_DBSTATUS_CACHE_USED_SHARED +#define org_sqlite_jni_capi_CApi_SQLITE_DBSTATUS_CACHE_USED_SHARED 11L +#undef org_sqlite_jni_capi_CApi_SQLITE_DBSTATUS_CACHE_SPILL +#define org_sqlite_jni_capi_CApi_SQLITE_DBSTATUS_CACHE_SPILL 12L +#undef org_sqlite_jni_capi_CApi_SQLITE_DBSTATUS_MAX +#define org_sqlite_jni_capi_CApi_SQLITE_DBSTATUS_MAX 12L +#undef org_sqlite_jni_capi_CApi_SQLITE_UTF8 +#define org_sqlite_jni_capi_CApi_SQLITE_UTF8 1L +#undef org_sqlite_jni_capi_CApi_SQLITE_UTF16LE +#define org_sqlite_jni_capi_CApi_SQLITE_UTF16LE 2L +#undef org_sqlite_jni_capi_CApi_SQLITE_UTF16BE +#define org_sqlite_jni_capi_CApi_SQLITE_UTF16BE 3L +#undef org_sqlite_jni_capi_CApi_SQLITE_UTF16 +#define org_sqlite_jni_capi_CApi_SQLITE_UTF16 4L +#undef org_sqlite_jni_capi_CApi_SQLITE_UTF16_ALIGNED +#define org_sqlite_jni_capi_CApi_SQLITE_UTF16_ALIGNED 8L +#undef org_sqlite_jni_capi_CApi_SQLITE_FCNTL_LOCKSTATE +#define org_sqlite_jni_capi_CApi_SQLITE_FCNTL_LOCKSTATE 1L +#undef org_sqlite_jni_capi_CApi_SQLITE_FCNTL_GET_LOCKPROXYFILE +#define org_sqlite_jni_capi_CApi_SQLITE_FCNTL_GET_LOCKPROXYFILE 2L +#undef org_sqlite_jni_capi_CApi_SQLITE_FCNTL_SET_LOCKPROXYFILE +#define org_sqlite_jni_capi_CApi_SQLITE_FCNTL_SET_LOCKPROXYFILE 3L +#undef org_sqlite_jni_capi_CApi_SQLITE_FCNTL_LAST_ERRNO +#define org_sqlite_jni_capi_CApi_SQLITE_FCNTL_LAST_ERRNO 4L +#undef org_sqlite_jni_capi_CApi_SQLITE_FCNTL_SIZE_HINT +#define org_sqlite_jni_capi_CApi_SQLITE_FCNTL_SIZE_HINT 5L +#undef org_sqlite_jni_capi_CApi_SQLITE_FCNTL_CHUNK_SIZE +#define org_sqlite_jni_capi_CApi_SQLITE_FCNTL_CHUNK_SIZE 6L +#undef org_sqlite_jni_capi_CApi_SQLITE_FCNTL_FILE_POINTER +#define org_sqlite_jni_capi_CApi_SQLITE_FCNTL_FILE_POINTER 7L +#undef org_sqlite_jni_capi_CApi_SQLITE_FCNTL_SYNC_OMITTED +#define org_sqlite_jni_capi_CApi_SQLITE_FCNTL_SYNC_OMITTED 8L +#undef org_sqlite_jni_capi_CApi_SQLITE_FCNTL_WIN32_AV_RETRY +#define org_sqlite_jni_capi_CApi_SQLITE_FCNTL_WIN32_AV_RETRY 9L +#undef org_sqlite_jni_capi_CApi_SQLITE_FCNTL_PERSIST_WAL +#define org_sqlite_jni_capi_CApi_SQLITE_FCNTL_PERSIST_WAL 10L +#undef org_sqlite_jni_capi_CApi_SQLITE_FCNTL_OVERWRITE +#define org_sqlite_jni_capi_CApi_SQLITE_FCNTL_OVERWRITE 11L +#undef org_sqlite_jni_capi_CApi_SQLITE_FCNTL_VFSNAME +#define org_sqlite_jni_capi_CApi_SQLITE_FCNTL_VFSNAME 12L +#undef org_sqlite_jni_capi_CApi_SQLITE_FCNTL_POWERSAFE_OVERWRITE +#define org_sqlite_jni_capi_CApi_SQLITE_FCNTL_POWERSAFE_OVERWRITE 13L +#undef org_sqlite_jni_capi_CApi_SQLITE_FCNTL_PRAGMA +#define org_sqlite_jni_capi_CApi_SQLITE_FCNTL_PRAGMA 14L +#undef org_sqlite_jni_capi_CApi_SQLITE_FCNTL_BUSYHANDLER +#define org_sqlite_jni_capi_CApi_SQLITE_FCNTL_BUSYHANDLER 15L +#undef org_sqlite_jni_capi_CApi_SQLITE_FCNTL_TEMPFILENAME +#define org_sqlite_jni_capi_CApi_SQLITE_FCNTL_TEMPFILENAME 16L +#undef org_sqlite_jni_capi_CApi_SQLITE_FCNTL_MMAP_SIZE +#define org_sqlite_jni_capi_CApi_SQLITE_FCNTL_MMAP_SIZE 18L +#undef org_sqlite_jni_capi_CApi_SQLITE_FCNTL_TRACE +#define org_sqlite_jni_capi_CApi_SQLITE_FCNTL_TRACE 19L +#undef org_sqlite_jni_capi_CApi_SQLITE_FCNTL_HAS_MOVED +#define org_sqlite_jni_capi_CApi_SQLITE_FCNTL_HAS_MOVED 20L +#undef org_sqlite_jni_capi_CApi_SQLITE_FCNTL_SYNC +#define org_sqlite_jni_capi_CApi_SQLITE_FCNTL_SYNC 21L +#undef org_sqlite_jni_capi_CApi_SQLITE_FCNTL_COMMIT_PHASETWO +#define org_sqlite_jni_capi_CApi_SQLITE_FCNTL_COMMIT_PHASETWO 22L +#undef org_sqlite_jni_capi_CApi_SQLITE_FCNTL_WIN32_SET_HANDLE +#define org_sqlite_jni_capi_CApi_SQLITE_FCNTL_WIN32_SET_HANDLE 23L +#undef org_sqlite_jni_capi_CApi_SQLITE_FCNTL_WAL_BLOCK +#define org_sqlite_jni_capi_CApi_SQLITE_FCNTL_WAL_BLOCK 24L +#undef org_sqlite_jni_capi_CApi_SQLITE_FCNTL_ZIPVFS +#define org_sqlite_jni_capi_CApi_SQLITE_FCNTL_ZIPVFS 25L +#undef org_sqlite_jni_capi_CApi_SQLITE_FCNTL_RBU +#define org_sqlite_jni_capi_CApi_SQLITE_FCNTL_RBU 26L +#undef org_sqlite_jni_capi_CApi_SQLITE_FCNTL_VFS_POINTER +#define org_sqlite_jni_capi_CApi_SQLITE_FCNTL_VFS_POINTER 27L +#undef org_sqlite_jni_capi_CApi_SQLITE_FCNTL_JOURNAL_POINTER +#define org_sqlite_jni_capi_CApi_SQLITE_FCNTL_JOURNAL_POINTER 28L +#undef org_sqlite_jni_capi_CApi_SQLITE_FCNTL_WIN32_GET_HANDLE +#define org_sqlite_jni_capi_CApi_SQLITE_FCNTL_WIN32_GET_HANDLE 29L +#undef org_sqlite_jni_capi_CApi_SQLITE_FCNTL_PDB +#define org_sqlite_jni_capi_CApi_SQLITE_FCNTL_PDB 30L +#undef org_sqlite_jni_capi_CApi_SQLITE_FCNTL_BEGIN_ATOMIC_WRITE +#define org_sqlite_jni_capi_CApi_SQLITE_FCNTL_BEGIN_ATOMIC_WRITE 31L +#undef org_sqlite_jni_capi_CApi_SQLITE_FCNTL_COMMIT_ATOMIC_WRITE +#define org_sqlite_jni_capi_CApi_SQLITE_FCNTL_COMMIT_ATOMIC_WRITE 32L +#undef org_sqlite_jni_capi_CApi_SQLITE_FCNTL_ROLLBACK_ATOMIC_WRITE +#define org_sqlite_jni_capi_CApi_SQLITE_FCNTL_ROLLBACK_ATOMIC_WRITE 33L +#undef org_sqlite_jni_capi_CApi_SQLITE_FCNTL_LOCK_TIMEOUT +#define org_sqlite_jni_capi_CApi_SQLITE_FCNTL_LOCK_TIMEOUT 34L +#undef org_sqlite_jni_capi_CApi_SQLITE_FCNTL_DATA_VERSION +#define org_sqlite_jni_capi_CApi_SQLITE_FCNTL_DATA_VERSION 35L +#undef org_sqlite_jni_capi_CApi_SQLITE_FCNTL_SIZE_LIMIT +#define org_sqlite_jni_capi_CApi_SQLITE_FCNTL_SIZE_LIMIT 36L +#undef org_sqlite_jni_capi_CApi_SQLITE_FCNTL_CKPT_DONE +#define org_sqlite_jni_capi_CApi_SQLITE_FCNTL_CKPT_DONE 37L +#undef org_sqlite_jni_capi_CApi_SQLITE_FCNTL_RESERVE_BYTES +#define org_sqlite_jni_capi_CApi_SQLITE_FCNTL_RESERVE_BYTES 38L +#undef org_sqlite_jni_capi_CApi_SQLITE_FCNTL_CKPT_START +#define org_sqlite_jni_capi_CApi_SQLITE_FCNTL_CKPT_START 39L +#undef org_sqlite_jni_capi_CApi_SQLITE_FCNTL_EXTERNAL_READER +#define org_sqlite_jni_capi_CApi_SQLITE_FCNTL_EXTERNAL_READER 40L +#undef org_sqlite_jni_capi_CApi_SQLITE_FCNTL_CKSM_FILE +#define org_sqlite_jni_capi_CApi_SQLITE_FCNTL_CKSM_FILE 41L +#undef org_sqlite_jni_capi_CApi_SQLITE_FCNTL_RESET_CACHE +#define org_sqlite_jni_capi_CApi_SQLITE_FCNTL_RESET_CACHE 42L +#undef org_sqlite_jni_capi_CApi_SQLITE_LOCK_NONE +#define org_sqlite_jni_capi_CApi_SQLITE_LOCK_NONE 0L +#undef org_sqlite_jni_capi_CApi_SQLITE_LOCK_SHARED +#define org_sqlite_jni_capi_CApi_SQLITE_LOCK_SHARED 1L +#undef org_sqlite_jni_capi_CApi_SQLITE_LOCK_RESERVED +#define org_sqlite_jni_capi_CApi_SQLITE_LOCK_RESERVED 2L +#undef org_sqlite_jni_capi_CApi_SQLITE_LOCK_PENDING +#define org_sqlite_jni_capi_CApi_SQLITE_LOCK_PENDING 3L +#undef org_sqlite_jni_capi_CApi_SQLITE_LOCK_EXCLUSIVE +#define org_sqlite_jni_capi_CApi_SQLITE_LOCK_EXCLUSIVE 4L +#undef org_sqlite_jni_capi_CApi_SQLITE_IOCAP_ATOMIC +#define org_sqlite_jni_capi_CApi_SQLITE_IOCAP_ATOMIC 1L +#undef org_sqlite_jni_capi_CApi_SQLITE_IOCAP_ATOMIC512 +#define org_sqlite_jni_capi_CApi_SQLITE_IOCAP_ATOMIC512 2L +#undef org_sqlite_jni_capi_CApi_SQLITE_IOCAP_ATOMIC1K +#define org_sqlite_jni_capi_CApi_SQLITE_IOCAP_ATOMIC1K 4L +#undef org_sqlite_jni_capi_CApi_SQLITE_IOCAP_ATOMIC2K +#define org_sqlite_jni_capi_CApi_SQLITE_IOCAP_ATOMIC2K 8L +#undef org_sqlite_jni_capi_CApi_SQLITE_IOCAP_ATOMIC4K +#define org_sqlite_jni_capi_CApi_SQLITE_IOCAP_ATOMIC4K 16L +#undef org_sqlite_jni_capi_CApi_SQLITE_IOCAP_ATOMIC8K +#define org_sqlite_jni_capi_CApi_SQLITE_IOCAP_ATOMIC8K 32L +#undef org_sqlite_jni_capi_CApi_SQLITE_IOCAP_ATOMIC16K +#define org_sqlite_jni_capi_CApi_SQLITE_IOCAP_ATOMIC16K 64L +#undef org_sqlite_jni_capi_CApi_SQLITE_IOCAP_ATOMIC32K +#define org_sqlite_jni_capi_CApi_SQLITE_IOCAP_ATOMIC32K 128L +#undef org_sqlite_jni_capi_CApi_SQLITE_IOCAP_ATOMIC64K +#define org_sqlite_jni_capi_CApi_SQLITE_IOCAP_ATOMIC64K 256L +#undef org_sqlite_jni_capi_CApi_SQLITE_IOCAP_SAFE_APPEND +#define org_sqlite_jni_capi_CApi_SQLITE_IOCAP_SAFE_APPEND 512L +#undef org_sqlite_jni_capi_CApi_SQLITE_IOCAP_SEQUENTIAL +#define org_sqlite_jni_capi_CApi_SQLITE_IOCAP_SEQUENTIAL 1024L +#undef org_sqlite_jni_capi_CApi_SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN +#define org_sqlite_jni_capi_CApi_SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN 2048L +#undef org_sqlite_jni_capi_CApi_SQLITE_IOCAP_POWERSAFE_OVERWRITE +#define org_sqlite_jni_capi_CApi_SQLITE_IOCAP_POWERSAFE_OVERWRITE 4096L +#undef org_sqlite_jni_capi_CApi_SQLITE_IOCAP_IMMUTABLE +#define org_sqlite_jni_capi_CApi_SQLITE_IOCAP_IMMUTABLE 8192L +#undef org_sqlite_jni_capi_CApi_SQLITE_IOCAP_BATCH_ATOMIC +#define org_sqlite_jni_capi_CApi_SQLITE_IOCAP_BATCH_ATOMIC 16384L +#undef org_sqlite_jni_capi_CApi_SQLITE_LIMIT_LENGTH +#define org_sqlite_jni_capi_CApi_SQLITE_LIMIT_LENGTH 0L +#undef org_sqlite_jni_capi_CApi_SQLITE_LIMIT_SQL_LENGTH +#define org_sqlite_jni_capi_CApi_SQLITE_LIMIT_SQL_LENGTH 1L +#undef org_sqlite_jni_capi_CApi_SQLITE_LIMIT_COLUMN +#define org_sqlite_jni_capi_CApi_SQLITE_LIMIT_COLUMN 2L +#undef org_sqlite_jni_capi_CApi_SQLITE_LIMIT_EXPR_DEPTH +#define org_sqlite_jni_capi_CApi_SQLITE_LIMIT_EXPR_DEPTH 3L +#undef org_sqlite_jni_capi_CApi_SQLITE_LIMIT_COMPOUND_SELECT +#define org_sqlite_jni_capi_CApi_SQLITE_LIMIT_COMPOUND_SELECT 4L +#undef org_sqlite_jni_capi_CApi_SQLITE_LIMIT_VDBE_OP +#define org_sqlite_jni_capi_CApi_SQLITE_LIMIT_VDBE_OP 5L +#undef org_sqlite_jni_capi_CApi_SQLITE_LIMIT_FUNCTION_ARG +#define org_sqlite_jni_capi_CApi_SQLITE_LIMIT_FUNCTION_ARG 6L +#undef org_sqlite_jni_capi_CApi_SQLITE_LIMIT_ATTACHED +#define org_sqlite_jni_capi_CApi_SQLITE_LIMIT_ATTACHED 7L +#undef org_sqlite_jni_capi_CApi_SQLITE_LIMIT_LIKE_PATTERN_LENGTH +#define org_sqlite_jni_capi_CApi_SQLITE_LIMIT_LIKE_PATTERN_LENGTH 8L +#undef org_sqlite_jni_capi_CApi_SQLITE_LIMIT_VARIABLE_NUMBER +#define org_sqlite_jni_capi_CApi_SQLITE_LIMIT_VARIABLE_NUMBER 9L +#undef org_sqlite_jni_capi_CApi_SQLITE_LIMIT_TRIGGER_DEPTH +#define org_sqlite_jni_capi_CApi_SQLITE_LIMIT_TRIGGER_DEPTH 10L +#undef org_sqlite_jni_capi_CApi_SQLITE_LIMIT_WORKER_THREADS +#define org_sqlite_jni_capi_CApi_SQLITE_LIMIT_WORKER_THREADS 11L +#undef org_sqlite_jni_capi_CApi_SQLITE_OPEN_READONLY +#define org_sqlite_jni_capi_CApi_SQLITE_OPEN_READONLY 1L +#undef org_sqlite_jni_capi_CApi_SQLITE_OPEN_READWRITE +#define org_sqlite_jni_capi_CApi_SQLITE_OPEN_READWRITE 2L +#undef org_sqlite_jni_capi_CApi_SQLITE_OPEN_CREATE +#define org_sqlite_jni_capi_CApi_SQLITE_OPEN_CREATE 4L +#undef org_sqlite_jni_capi_CApi_SQLITE_OPEN_URI +#define org_sqlite_jni_capi_CApi_SQLITE_OPEN_URI 64L +#undef org_sqlite_jni_capi_CApi_SQLITE_OPEN_MEMORY +#define org_sqlite_jni_capi_CApi_SQLITE_OPEN_MEMORY 128L +#undef org_sqlite_jni_capi_CApi_SQLITE_OPEN_NOMUTEX +#define org_sqlite_jni_capi_CApi_SQLITE_OPEN_NOMUTEX 32768L +#undef org_sqlite_jni_capi_CApi_SQLITE_OPEN_FULLMUTEX +#define org_sqlite_jni_capi_CApi_SQLITE_OPEN_FULLMUTEX 65536L +#undef org_sqlite_jni_capi_CApi_SQLITE_OPEN_SHAREDCACHE +#define org_sqlite_jni_capi_CApi_SQLITE_OPEN_SHAREDCACHE 131072L +#undef org_sqlite_jni_capi_CApi_SQLITE_OPEN_PRIVATECACHE +#define org_sqlite_jni_capi_CApi_SQLITE_OPEN_PRIVATECACHE 262144L +#undef org_sqlite_jni_capi_CApi_SQLITE_OPEN_NOFOLLOW +#define org_sqlite_jni_capi_CApi_SQLITE_OPEN_NOFOLLOW 16777216L +#undef org_sqlite_jni_capi_CApi_SQLITE_OPEN_EXRESCODE +#define org_sqlite_jni_capi_CApi_SQLITE_OPEN_EXRESCODE 33554432L +#undef org_sqlite_jni_capi_CApi_SQLITE_PREPARE_PERSISTENT +#define org_sqlite_jni_capi_CApi_SQLITE_PREPARE_PERSISTENT 1L +#undef org_sqlite_jni_capi_CApi_SQLITE_PREPARE_NO_VTAB +#define org_sqlite_jni_capi_CApi_SQLITE_PREPARE_NO_VTAB 4L +#undef org_sqlite_jni_capi_CApi_SQLITE_OK +#define org_sqlite_jni_capi_CApi_SQLITE_OK 0L +#undef org_sqlite_jni_capi_CApi_SQLITE_ERROR +#define org_sqlite_jni_capi_CApi_SQLITE_ERROR 1L +#undef org_sqlite_jni_capi_CApi_SQLITE_INTERNAL +#define org_sqlite_jni_capi_CApi_SQLITE_INTERNAL 2L +#undef org_sqlite_jni_capi_CApi_SQLITE_PERM +#define org_sqlite_jni_capi_CApi_SQLITE_PERM 3L +#undef org_sqlite_jni_capi_CApi_SQLITE_ABORT +#define org_sqlite_jni_capi_CApi_SQLITE_ABORT 4L +#undef org_sqlite_jni_capi_CApi_SQLITE_BUSY +#define org_sqlite_jni_capi_CApi_SQLITE_BUSY 5L +#undef org_sqlite_jni_capi_CApi_SQLITE_LOCKED +#define org_sqlite_jni_capi_CApi_SQLITE_LOCKED 6L +#undef org_sqlite_jni_capi_CApi_SQLITE_NOMEM +#define org_sqlite_jni_capi_CApi_SQLITE_NOMEM 7L +#undef org_sqlite_jni_capi_CApi_SQLITE_READONLY +#define org_sqlite_jni_capi_CApi_SQLITE_READONLY 8L +#undef org_sqlite_jni_capi_CApi_SQLITE_INTERRUPT +#define org_sqlite_jni_capi_CApi_SQLITE_INTERRUPT 9L +#undef org_sqlite_jni_capi_CApi_SQLITE_IOERR +#define org_sqlite_jni_capi_CApi_SQLITE_IOERR 10L +#undef org_sqlite_jni_capi_CApi_SQLITE_CORRUPT +#define org_sqlite_jni_capi_CApi_SQLITE_CORRUPT 11L +#undef org_sqlite_jni_capi_CApi_SQLITE_NOTFOUND +#define org_sqlite_jni_capi_CApi_SQLITE_NOTFOUND 12L +#undef org_sqlite_jni_capi_CApi_SQLITE_FULL +#define org_sqlite_jni_capi_CApi_SQLITE_FULL 13L +#undef org_sqlite_jni_capi_CApi_SQLITE_CANTOPEN +#define org_sqlite_jni_capi_CApi_SQLITE_CANTOPEN 14L +#undef org_sqlite_jni_capi_CApi_SQLITE_PROTOCOL +#define org_sqlite_jni_capi_CApi_SQLITE_PROTOCOL 15L +#undef org_sqlite_jni_capi_CApi_SQLITE_EMPTY +#define org_sqlite_jni_capi_CApi_SQLITE_EMPTY 16L +#undef org_sqlite_jni_capi_CApi_SQLITE_SCHEMA +#define org_sqlite_jni_capi_CApi_SQLITE_SCHEMA 17L +#undef org_sqlite_jni_capi_CApi_SQLITE_TOOBIG +#define org_sqlite_jni_capi_CApi_SQLITE_TOOBIG 18L +#undef org_sqlite_jni_capi_CApi_SQLITE_CONSTRAINT +#define org_sqlite_jni_capi_CApi_SQLITE_CONSTRAINT 19L +#undef org_sqlite_jni_capi_CApi_SQLITE_MISMATCH +#define org_sqlite_jni_capi_CApi_SQLITE_MISMATCH 20L +#undef org_sqlite_jni_capi_CApi_SQLITE_MISUSE +#define org_sqlite_jni_capi_CApi_SQLITE_MISUSE 21L +#undef org_sqlite_jni_capi_CApi_SQLITE_NOLFS +#define org_sqlite_jni_capi_CApi_SQLITE_NOLFS 22L +#undef org_sqlite_jni_capi_CApi_SQLITE_AUTH +#define org_sqlite_jni_capi_CApi_SQLITE_AUTH 23L +#undef org_sqlite_jni_capi_CApi_SQLITE_FORMAT +#define org_sqlite_jni_capi_CApi_SQLITE_FORMAT 24L +#undef org_sqlite_jni_capi_CApi_SQLITE_RANGE +#define org_sqlite_jni_capi_CApi_SQLITE_RANGE 25L +#undef org_sqlite_jni_capi_CApi_SQLITE_NOTADB +#define org_sqlite_jni_capi_CApi_SQLITE_NOTADB 26L +#undef org_sqlite_jni_capi_CApi_SQLITE_NOTICE +#define org_sqlite_jni_capi_CApi_SQLITE_NOTICE 27L +#undef org_sqlite_jni_capi_CApi_SQLITE_WARNING +#define org_sqlite_jni_capi_CApi_SQLITE_WARNING 28L +#undef org_sqlite_jni_capi_CApi_SQLITE_ROW +#define org_sqlite_jni_capi_CApi_SQLITE_ROW 100L +#undef org_sqlite_jni_capi_CApi_SQLITE_DONE +#define org_sqlite_jni_capi_CApi_SQLITE_DONE 101L +#undef org_sqlite_jni_capi_CApi_SQLITE_ERROR_MISSING_COLLSEQ +#define org_sqlite_jni_capi_CApi_SQLITE_ERROR_MISSING_COLLSEQ 257L +#undef org_sqlite_jni_capi_CApi_SQLITE_ERROR_RETRY +#define org_sqlite_jni_capi_CApi_SQLITE_ERROR_RETRY 513L +#undef org_sqlite_jni_capi_CApi_SQLITE_ERROR_SNAPSHOT +#define org_sqlite_jni_capi_CApi_SQLITE_ERROR_SNAPSHOT 769L +#undef org_sqlite_jni_capi_CApi_SQLITE_IOERR_READ +#define org_sqlite_jni_capi_CApi_SQLITE_IOERR_READ 266L +#undef org_sqlite_jni_capi_CApi_SQLITE_IOERR_SHORT_READ +#define org_sqlite_jni_capi_CApi_SQLITE_IOERR_SHORT_READ 522L +#undef org_sqlite_jni_capi_CApi_SQLITE_IOERR_WRITE +#define org_sqlite_jni_capi_CApi_SQLITE_IOERR_WRITE 778L +#undef org_sqlite_jni_capi_CApi_SQLITE_IOERR_FSYNC +#define org_sqlite_jni_capi_CApi_SQLITE_IOERR_FSYNC 1034L +#undef org_sqlite_jni_capi_CApi_SQLITE_IOERR_DIR_FSYNC +#define org_sqlite_jni_capi_CApi_SQLITE_IOERR_DIR_FSYNC 1290L +#undef org_sqlite_jni_capi_CApi_SQLITE_IOERR_TRUNCATE +#define org_sqlite_jni_capi_CApi_SQLITE_IOERR_TRUNCATE 1546L +#undef org_sqlite_jni_capi_CApi_SQLITE_IOERR_FSTAT +#define org_sqlite_jni_capi_CApi_SQLITE_IOERR_FSTAT 1802L +#undef org_sqlite_jni_capi_CApi_SQLITE_IOERR_UNLOCK +#define org_sqlite_jni_capi_CApi_SQLITE_IOERR_UNLOCK 2058L +#undef org_sqlite_jni_capi_CApi_SQLITE_IOERR_RDLOCK +#define org_sqlite_jni_capi_CApi_SQLITE_IOERR_RDLOCK 2314L +#undef org_sqlite_jni_capi_CApi_SQLITE_IOERR_DELETE +#define org_sqlite_jni_capi_CApi_SQLITE_IOERR_DELETE 2570L +#undef org_sqlite_jni_capi_CApi_SQLITE_IOERR_BLOCKED +#define org_sqlite_jni_capi_CApi_SQLITE_IOERR_BLOCKED 2826L +#undef org_sqlite_jni_capi_CApi_SQLITE_IOERR_NOMEM +#define org_sqlite_jni_capi_CApi_SQLITE_IOERR_NOMEM 3082L +#undef org_sqlite_jni_capi_CApi_SQLITE_IOERR_ACCESS +#define org_sqlite_jni_capi_CApi_SQLITE_IOERR_ACCESS 3338L +#undef org_sqlite_jni_capi_CApi_SQLITE_IOERR_CHECKRESERVEDLOCK +#define org_sqlite_jni_capi_CApi_SQLITE_IOERR_CHECKRESERVEDLOCK 3594L +#undef org_sqlite_jni_capi_CApi_SQLITE_IOERR_LOCK +#define org_sqlite_jni_capi_CApi_SQLITE_IOERR_LOCK 3850L +#undef org_sqlite_jni_capi_CApi_SQLITE_IOERR_CLOSE +#define org_sqlite_jni_capi_CApi_SQLITE_IOERR_CLOSE 4106L +#undef org_sqlite_jni_capi_CApi_SQLITE_IOERR_DIR_CLOSE +#define org_sqlite_jni_capi_CApi_SQLITE_IOERR_DIR_CLOSE 4362L +#undef org_sqlite_jni_capi_CApi_SQLITE_IOERR_SHMOPEN +#define org_sqlite_jni_capi_CApi_SQLITE_IOERR_SHMOPEN 4618L +#undef org_sqlite_jni_capi_CApi_SQLITE_IOERR_SHMSIZE +#define org_sqlite_jni_capi_CApi_SQLITE_IOERR_SHMSIZE 4874L +#undef org_sqlite_jni_capi_CApi_SQLITE_IOERR_SHMLOCK +#define org_sqlite_jni_capi_CApi_SQLITE_IOERR_SHMLOCK 5130L +#undef org_sqlite_jni_capi_CApi_SQLITE_IOERR_SHMMAP +#define org_sqlite_jni_capi_CApi_SQLITE_IOERR_SHMMAP 5386L +#undef org_sqlite_jni_capi_CApi_SQLITE_IOERR_SEEK +#define org_sqlite_jni_capi_CApi_SQLITE_IOERR_SEEK 5642L +#undef org_sqlite_jni_capi_CApi_SQLITE_IOERR_DELETE_NOENT +#define org_sqlite_jni_capi_CApi_SQLITE_IOERR_DELETE_NOENT 5898L +#undef org_sqlite_jni_capi_CApi_SQLITE_IOERR_MMAP +#define org_sqlite_jni_capi_CApi_SQLITE_IOERR_MMAP 6154L +#undef org_sqlite_jni_capi_CApi_SQLITE_IOERR_GETTEMPPATH +#define org_sqlite_jni_capi_CApi_SQLITE_IOERR_GETTEMPPATH 6410L +#undef org_sqlite_jni_capi_CApi_SQLITE_IOERR_CONVPATH +#define org_sqlite_jni_capi_CApi_SQLITE_IOERR_CONVPATH 6666L +#undef org_sqlite_jni_capi_CApi_SQLITE_IOERR_VNODE +#define org_sqlite_jni_capi_CApi_SQLITE_IOERR_VNODE 6922L +#undef org_sqlite_jni_capi_CApi_SQLITE_IOERR_AUTH +#define org_sqlite_jni_capi_CApi_SQLITE_IOERR_AUTH 7178L +#undef org_sqlite_jni_capi_CApi_SQLITE_IOERR_BEGIN_ATOMIC +#define org_sqlite_jni_capi_CApi_SQLITE_IOERR_BEGIN_ATOMIC 7434L +#undef org_sqlite_jni_capi_CApi_SQLITE_IOERR_COMMIT_ATOMIC +#define org_sqlite_jni_capi_CApi_SQLITE_IOERR_COMMIT_ATOMIC 7690L +#undef org_sqlite_jni_capi_CApi_SQLITE_IOERR_ROLLBACK_ATOMIC +#define org_sqlite_jni_capi_CApi_SQLITE_IOERR_ROLLBACK_ATOMIC 7946L +#undef org_sqlite_jni_capi_CApi_SQLITE_IOERR_DATA +#define org_sqlite_jni_capi_CApi_SQLITE_IOERR_DATA 8202L +#undef org_sqlite_jni_capi_CApi_SQLITE_IOERR_CORRUPTFS +#define org_sqlite_jni_capi_CApi_SQLITE_IOERR_CORRUPTFS 8458L +#undef org_sqlite_jni_capi_CApi_SQLITE_LOCKED_SHAREDCACHE +#define org_sqlite_jni_capi_CApi_SQLITE_LOCKED_SHAREDCACHE 262L +#undef org_sqlite_jni_capi_CApi_SQLITE_LOCKED_VTAB +#define org_sqlite_jni_capi_CApi_SQLITE_LOCKED_VTAB 518L +#undef org_sqlite_jni_capi_CApi_SQLITE_BUSY_RECOVERY +#define org_sqlite_jni_capi_CApi_SQLITE_BUSY_RECOVERY 261L +#undef org_sqlite_jni_capi_CApi_SQLITE_BUSY_SNAPSHOT +#define org_sqlite_jni_capi_CApi_SQLITE_BUSY_SNAPSHOT 517L +#undef org_sqlite_jni_capi_CApi_SQLITE_BUSY_TIMEOUT +#define org_sqlite_jni_capi_CApi_SQLITE_BUSY_TIMEOUT 773L +#undef org_sqlite_jni_capi_CApi_SQLITE_CANTOPEN_NOTEMPDIR +#define org_sqlite_jni_capi_CApi_SQLITE_CANTOPEN_NOTEMPDIR 270L +#undef org_sqlite_jni_capi_CApi_SQLITE_CANTOPEN_ISDIR +#define org_sqlite_jni_capi_CApi_SQLITE_CANTOPEN_ISDIR 526L +#undef org_sqlite_jni_capi_CApi_SQLITE_CANTOPEN_FULLPATH +#define org_sqlite_jni_capi_CApi_SQLITE_CANTOPEN_FULLPATH 782L +#undef org_sqlite_jni_capi_CApi_SQLITE_CANTOPEN_CONVPATH +#define org_sqlite_jni_capi_CApi_SQLITE_CANTOPEN_CONVPATH 1038L +#undef org_sqlite_jni_capi_CApi_SQLITE_CANTOPEN_SYMLINK +#define org_sqlite_jni_capi_CApi_SQLITE_CANTOPEN_SYMLINK 1550L +#undef org_sqlite_jni_capi_CApi_SQLITE_CORRUPT_VTAB +#define org_sqlite_jni_capi_CApi_SQLITE_CORRUPT_VTAB 267L +#undef org_sqlite_jni_capi_CApi_SQLITE_CORRUPT_SEQUENCE +#define org_sqlite_jni_capi_CApi_SQLITE_CORRUPT_SEQUENCE 523L +#undef org_sqlite_jni_capi_CApi_SQLITE_CORRUPT_INDEX +#define org_sqlite_jni_capi_CApi_SQLITE_CORRUPT_INDEX 779L +#undef org_sqlite_jni_capi_CApi_SQLITE_READONLY_RECOVERY +#define org_sqlite_jni_capi_CApi_SQLITE_READONLY_RECOVERY 264L +#undef org_sqlite_jni_capi_CApi_SQLITE_READONLY_CANTLOCK +#define org_sqlite_jni_capi_CApi_SQLITE_READONLY_CANTLOCK 520L +#undef org_sqlite_jni_capi_CApi_SQLITE_READONLY_ROLLBACK +#define org_sqlite_jni_capi_CApi_SQLITE_READONLY_ROLLBACK 776L +#undef org_sqlite_jni_capi_CApi_SQLITE_READONLY_DBMOVED +#define org_sqlite_jni_capi_CApi_SQLITE_READONLY_DBMOVED 1032L +#undef org_sqlite_jni_capi_CApi_SQLITE_READONLY_CANTINIT +#define org_sqlite_jni_capi_CApi_SQLITE_READONLY_CANTINIT 1288L +#undef org_sqlite_jni_capi_CApi_SQLITE_READONLY_DIRECTORY +#define org_sqlite_jni_capi_CApi_SQLITE_READONLY_DIRECTORY 1544L +#undef org_sqlite_jni_capi_CApi_SQLITE_ABORT_ROLLBACK +#define org_sqlite_jni_capi_CApi_SQLITE_ABORT_ROLLBACK 516L +#undef org_sqlite_jni_capi_CApi_SQLITE_CONSTRAINT_CHECK +#define org_sqlite_jni_capi_CApi_SQLITE_CONSTRAINT_CHECK 275L +#undef org_sqlite_jni_capi_CApi_SQLITE_CONSTRAINT_COMMITHOOK +#define org_sqlite_jni_capi_CApi_SQLITE_CONSTRAINT_COMMITHOOK 531L +#undef org_sqlite_jni_capi_CApi_SQLITE_CONSTRAINT_FOREIGNKEY +#define org_sqlite_jni_capi_CApi_SQLITE_CONSTRAINT_FOREIGNKEY 787L +#undef org_sqlite_jni_capi_CApi_SQLITE_CONSTRAINT_FUNCTION +#define org_sqlite_jni_capi_CApi_SQLITE_CONSTRAINT_FUNCTION 1043L +#undef org_sqlite_jni_capi_CApi_SQLITE_CONSTRAINT_NOTNULL +#define org_sqlite_jni_capi_CApi_SQLITE_CONSTRAINT_NOTNULL 1299L +#undef org_sqlite_jni_capi_CApi_SQLITE_CONSTRAINT_PRIMARYKEY +#define org_sqlite_jni_capi_CApi_SQLITE_CONSTRAINT_PRIMARYKEY 1555L +#undef org_sqlite_jni_capi_CApi_SQLITE_CONSTRAINT_TRIGGER +#define org_sqlite_jni_capi_CApi_SQLITE_CONSTRAINT_TRIGGER 1811L +#undef org_sqlite_jni_capi_CApi_SQLITE_CONSTRAINT_UNIQUE +#define org_sqlite_jni_capi_CApi_SQLITE_CONSTRAINT_UNIQUE 2067L +#undef org_sqlite_jni_capi_CApi_SQLITE_CONSTRAINT_VTAB +#define org_sqlite_jni_capi_CApi_SQLITE_CONSTRAINT_VTAB 2323L +#undef org_sqlite_jni_capi_CApi_SQLITE_CONSTRAINT_ROWID +#define org_sqlite_jni_capi_CApi_SQLITE_CONSTRAINT_ROWID 2579L +#undef org_sqlite_jni_capi_CApi_SQLITE_CONSTRAINT_PINNED +#define org_sqlite_jni_capi_CApi_SQLITE_CONSTRAINT_PINNED 2835L +#undef org_sqlite_jni_capi_CApi_SQLITE_CONSTRAINT_DATATYPE +#define org_sqlite_jni_capi_CApi_SQLITE_CONSTRAINT_DATATYPE 3091L +#undef org_sqlite_jni_capi_CApi_SQLITE_NOTICE_RECOVER_WAL +#define org_sqlite_jni_capi_CApi_SQLITE_NOTICE_RECOVER_WAL 283L +#undef org_sqlite_jni_capi_CApi_SQLITE_NOTICE_RECOVER_ROLLBACK +#define org_sqlite_jni_capi_CApi_SQLITE_NOTICE_RECOVER_ROLLBACK 539L +#undef org_sqlite_jni_capi_CApi_SQLITE_WARNING_AUTOINDEX +#define org_sqlite_jni_capi_CApi_SQLITE_WARNING_AUTOINDEX 284L +#undef org_sqlite_jni_capi_CApi_SQLITE_AUTH_USER +#define org_sqlite_jni_capi_CApi_SQLITE_AUTH_USER 279L +#undef org_sqlite_jni_capi_CApi_SQLITE_OK_LOAD_PERMANENTLY +#define org_sqlite_jni_capi_CApi_SQLITE_OK_LOAD_PERMANENTLY 256L +#undef org_sqlite_jni_capi_CApi_SQLITE_SERIALIZE_NOCOPY +#define org_sqlite_jni_capi_CApi_SQLITE_SERIALIZE_NOCOPY 1L +#undef org_sqlite_jni_capi_CApi_SQLITE_DESERIALIZE_FREEONCLOSE +#define org_sqlite_jni_capi_CApi_SQLITE_DESERIALIZE_FREEONCLOSE 1L +#undef org_sqlite_jni_capi_CApi_SQLITE_DESERIALIZE_READONLY +#define org_sqlite_jni_capi_CApi_SQLITE_DESERIALIZE_READONLY 4L +#undef org_sqlite_jni_capi_CApi_SQLITE_DESERIALIZE_RESIZEABLE +#define org_sqlite_jni_capi_CApi_SQLITE_DESERIALIZE_RESIZEABLE 2L +#undef org_sqlite_jni_capi_CApi_SQLITE_SESSION_CONFIG_STRMSIZE +#define org_sqlite_jni_capi_CApi_SQLITE_SESSION_CONFIG_STRMSIZE 1L +#undef org_sqlite_jni_capi_CApi_SQLITE_SESSION_OBJCONFIG_SIZE +#define org_sqlite_jni_capi_CApi_SQLITE_SESSION_OBJCONFIG_SIZE 1L +#undef org_sqlite_jni_capi_CApi_SQLITE_STATUS_MEMORY_USED +#define org_sqlite_jni_capi_CApi_SQLITE_STATUS_MEMORY_USED 0L +#undef org_sqlite_jni_capi_CApi_SQLITE_STATUS_PAGECACHE_USED +#define org_sqlite_jni_capi_CApi_SQLITE_STATUS_PAGECACHE_USED 1L +#undef org_sqlite_jni_capi_CApi_SQLITE_STATUS_PAGECACHE_OVERFLOW +#define org_sqlite_jni_capi_CApi_SQLITE_STATUS_PAGECACHE_OVERFLOW 2L +#undef org_sqlite_jni_capi_CApi_SQLITE_STATUS_MALLOC_SIZE +#define org_sqlite_jni_capi_CApi_SQLITE_STATUS_MALLOC_SIZE 5L +#undef org_sqlite_jni_capi_CApi_SQLITE_STATUS_PARSER_STACK +#define org_sqlite_jni_capi_CApi_SQLITE_STATUS_PARSER_STACK 6L +#undef org_sqlite_jni_capi_CApi_SQLITE_STATUS_PAGECACHE_SIZE +#define org_sqlite_jni_capi_CApi_SQLITE_STATUS_PAGECACHE_SIZE 7L +#undef org_sqlite_jni_capi_CApi_SQLITE_STATUS_MALLOC_COUNT +#define org_sqlite_jni_capi_CApi_SQLITE_STATUS_MALLOC_COUNT 9L +#undef org_sqlite_jni_capi_CApi_SQLITE_STMTSTATUS_FULLSCAN_STEP +#define org_sqlite_jni_capi_CApi_SQLITE_STMTSTATUS_FULLSCAN_STEP 1L +#undef org_sqlite_jni_capi_CApi_SQLITE_STMTSTATUS_SORT +#define org_sqlite_jni_capi_CApi_SQLITE_STMTSTATUS_SORT 2L +#undef org_sqlite_jni_capi_CApi_SQLITE_STMTSTATUS_AUTOINDEX +#define org_sqlite_jni_capi_CApi_SQLITE_STMTSTATUS_AUTOINDEX 3L +#undef org_sqlite_jni_capi_CApi_SQLITE_STMTSTATUS_VM_STEP +#define org_sqlite_jni_capi_CApi_SQLITE_STMTSTATUS_VM_STEP 4L +#undef org_sqlite_jni_capi_CApi_SQLITE_STMTSTATUS_REPREPARE +#define org_sqlite_jni_capi_CApi_SQLITE_STMTSTATUS_REPREPARE 5L +#undef org_sqlite_jni_capi_CApi_SQLITE_STMTSTATUS_RUN +#define org_sqlite_jni_capi_CApi_SQLITE_STMTSTATUS_RUN 6L +#undef org_sqlite_jni_capi_CApi_SQLITE_STMTSTATUS_FILTER_MISS +#define org_sqlite_jni_capi_CApi_SQLITE_STMTSTATUS_FILTER_MISS 7L +#undef org_sqlite_jni_capi_CApi_SQLITE_STMTSTATUS_FILTER_HIT +#define org_sqlite_jni_capi_CApi_SQLITE_STMTSTATUS_FILTER_HIT 8L +#undef org_sqlite_jni_capi_CApi_SQLITE_STMTSTATUS_MEMUSED +#define org_sqlite_jni_capi_CApi_SQLITE_STMTSTATUS_MEMUSED 99L +#undef org_sqlite_jni_capi_CApi_SQLITE_SYNC_NORMAL +#define org_sqlite_jni_capi_CApi_SQLITE_SYNC_NORMAL 2L +#undef org_sqlite_jni_capi_CApi_SQLITE_SYNC_FULL +#define org_sqlite_jni_capi_CApi_SQLITE_SYNC_FULL 3L +#undef org_sqlite_jni_capi_CApi_SQLITE_SYNC_DATAONLY +#define org_sqlite_jni_capi_CApi_SQLITE_SYNC_DATAONLY 16L +#undef org_sqlite_jni_capi_CApi_SQLITE_TRACE_STMT +#define org_sqlite_jni_capi_CApi_SQLITE_TRACE_STMT 1L +#undef org_sqlite_jni_capi_CApi_SQLITE_TRACE_PROFILE +#define org_sqlite_jni_capi_CApi_SQLITE_TRACE_PROFILE 2L +#undef org_sqlite_jni_capi_CApi_SQLITE_TRACE_ROW +#define org_sqlite_jni_capi_CApi_SQLITE_TRACE_ROW 4L +#undef org_sqlite_jni_capi_CApi_SQLITE_TRACE_CLOSE +#define org_sqlite_jni_capi_CApi_SQLITE_TRACE_CLOSE 8L +#undef org_sqlite_jni_capi_CApi_SQLITE_TXN_NONE +#define org_sqlite_jni_capi_CApi_SQLITE_TXN_NONE 0L +#undef org_sqlite_jni_capi_CApi_SQLITE_TXN_READ +#define org_sqlite_jni_capi_CApi_SQLITE_TXN_READ 1L +#undef org_sqlite_jni_capi_CApi_SQLITE_TXN_WRITE +#define org_sqlite_jni_capi_CApi_SQLITE_TXN_WRITE 2L +#undef org_sqlite_jni_capi_CApi_SQLITE_DETERMINISTIC +#define org_sqlite_jni_capi_CApi_SQLITE_DETERMINISTIC 2048L +#undef org_sqlite_jni_capi_CApi_SQLITE_DIRECTONLY +#define org_sqlite_jni_capi_CApi_SQLITE_DIRECTONLY 524288L +#undef org_sqlite_jni_capi_CApi_SQLITE_SUBTYPE +#define org_sqlite_jni_capi_CApi_SQLITE_SUBTYPE 1048576L +#undef org_sqlite_jni_capi_CApi_SQLITE_INNOCUOUS +#define org_sqlite_jni_capi_CApi_SQLITE_INNOCUOUS 2097152L +#undef org_sqlite_jni_capi_CApi_SQLITE_RESULT_SUBTYPE +#define org_sqlite_jni_capi_CApi_SQLITE_RESULT_SUBTYPE 16777216L +#undef org_sqlite_jni_capi_CApi_SQLITE_INDEX_SCAN_UNIQUE +#define org_sqlite_jni_capi_CApi_SQLITE_INDEX_SCAN_UNIQUE 1L +#undef org_sqlite_jni_capi_CApi_SQLITE_INDEX_CONSTRAINT_EQ +#define org_sqlite_jni_capi_CApi_SQLITE_INDEX_CONSTRAINT_EQ 2L +#undef org_sqlite_jni_capi_CApi_SQLITE_INDEX_CONSTRAINT_GT +#define org_sqlite_jni_capi_CApi_SQLITE_INDEX_CONSTRAINT_GT 4L +#undef org_sqlite_jni_capi_CApi_SQLITE_INDEX_CONSTRAINT_LE +#define org_sqlite_jni_capi_CApi_SQLITE_INDEX_CONSTRAINT_LE 8L +#undef org_sqlite_jni_capi_CApi_SQLITE_INDEX_CONSTRAINT_LT +#define org_sqlite_jni_capi_CApi_SQLITE_INDEX_CONSTRAINT_LT 16L +#undef org_sqlite_jni_capi_CApi_SQLITE_INDEX_CONSTRAINT_GE +#define org_sqlite_jni_capi_CApi_SQLITE_INDEX_CONSTRAINT_GE 32L +#undef org_sqlite_jni_capi_CApi_SQLITE_INDEX_CONSTRAINT_MATCH +#define org_sqlite_jni_capi_CApi_SQLITE_INDEX_CONSTRAINT_MATCH 64L +#undef org_sqlite_jni_capi_CApi_SQLITE_INDEX_CONSTRAINT_LIKE +#define org_sqlite_jni_capi_CApi_SQLITE_INDEX_CONSTRAINT_LIKE 65L +#undef org_sqlite_jni_capi_CApi_SQLITE_INDEX_CONSTRAINT_GLOB +#define org_sqlite_jni_capi_CApi_SQLITE_INDEX_CONSTRAINT_GLOB 66L +#undef org_sqlite_jni_capi_CApi_SQLITE_INDEX_CONSTRAINT_REGEXP +#define org_sqlite_jni_capi_CApi_SQLITE_INDEX_CONSTRAINT_REGEXP 67L +#undef org_sqlite_jni_capi_CApi_SQLITE_INDEX_CONSTRAINT_NE +#define org_sqlite_jni_capi_CApi_SQLITE_INDEX_CONSTRAINT_NE 68L +#undef org_sqlite_jni_capi_CApi_SQLITE_INDEX_CONSTRAINT_ISNOT +#define org_sqlite_jni_capi_CApi_SQLITE_INDEX_CONSTRAINT_ISNOT 69L +#undef org_sqlite_jni_capi_CApi_SQLITE_INDEX_CONSTRAINT_ISNOTNULL +#define org_sqlite_jni_capi_CApi_SQLITE_INDEX_CONSTRAINT_ISNOTNULL 70L +#undef org_sqlite_jni_capi_CApi_SQLITE_INDEX_CONSTRAINT_ISNULL +#define org_sqlite_jni_capi_CApi_SQLITE_INDEX_CONSTRAINT_ISNULL 71L +#undef org_sqlite_jni_capi_CApi_SQLITE_INDEX_CONSTRAINT_IS +#define org_sqlite_jni_capi_CApi_SQLITE_INDEX_CONSTRAINT_IS 72L +#undef org_sqlite_jni_capi_CApi_SQLITE_INDEX_CONSTRAINT_LIMIT +#define org_sqlite_jni_capi_CApi_SQLITE_INDEX_CONSTRAINT_LIMIT 73L +#undef org_sqlite_jni_capi_CApi_SQLITE_INDEX_CONSTRAINT_OFFSET +#define org_sqlite_jni_capi_CApi_SQLITE_INDEX_CONSTRAINT_OFFSET 74L +#undef org_sqlite_jni_capi_CApi_SQLITE_INDEX_CONSTRAINT_FUNCTION +#define org_sqlite_jni_capi_CApi_SQLITE_INDEX_CONSTRAINT_FUNCTION 150L +#undef org_sqlite_jni_capi_CApi_SQLITE_VTAB_CONSTRAINT_SUPPORT +#define org_sqlite_jni_capi_CApi_SQLITE_VTAB_CONSTRAINT_SUPPORT 1L +#undef org_sqlite_jni_capi_CApi_SQLITE_VTAB_INNOCUOUS +#define org_sqlite_jni_capi_CApi_SQLITE_VTAB_INNOCUOUS 2L +#undef org_sqlite_jni_capi_CApi_SQLITE_VTAB_DIRECTONLY +#define org_sqlite_jni_capi_CApi_SQLITE_VTAB_DIRECTONLY 3L +#undef org_sqlite_jni_capi_CApi_SQLITE_VTAB_USES_ALL_SCHEMAS +#define org_sqlite_jni_capi_CApi_SQLITE_VTAB_USES_ALL_SCHEMAS 4L +#undef org_sqlite_jni_capi_CApi_SQLITE_ROLLBACK +#define org_sqlite_jni_capi_CApi_SQLITE_ROLLBACK 1L +#undef org_sqlite_jni_capi_CApi_SQLITE_FAIL +#define org_sqlite_jni_capi_CApi_SQLITE_FAIL 3L +#undef org_sqlite_jni_capi_CApi_SQLITE_REPLACE +#define org_sqlite_jni_capi_CApi_SQLITE_REPLACE 5L +/* + * Class: org_sqlite_jni_capi_CApi + * Method: init + * Signature: ()V + */ +JNIEXPORT void JNICALL Java_org_sqlite_jni_capi_CApi_init + (JNIEnv *, jclass); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_java_uncache_thread + * Signature: ()Z + */ +JNIEXPORT jboolean JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1java_1uncache_1thread + (JNIEnv *, jclass); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_jni_supports_nio + * Signature: ()Z + */ +JNIEXPORT jboolean JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1jni_1supports_1nio + (JNIEnv *, jclass); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_jni_db_error + * Signature: (Lorg/sqlite/jni/capi/sqlite3;ILjava/lang/String;)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1jni_1db_1error + (JNIEnv *, jclass, jobject, jint, jstring); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_aggregate_context + * Signature: (Lorg/sqlite/jni/capi/sqlite3_context;Z)J + */ +JNIEXPORT jlong JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1aggregate_1context + (JNIEnv *, jclass, jobject, jboolean); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_auto_extension + * Signature: (Lorg/sqlite/jni/capi/AutoExtensionCallback;)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1auto_1extension + (JNIEnv *, jclass, jobject); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_backup_finish + * Signature: (J)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1backup_1finish + (JNIEnv *, jclass, jlong); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_backup_init + * Signature: (JLjava/lang/String;JLjava/lang/String;)Lorg/sqlite/jni/capi/sqlite3_backup; + */ +JNIEXPORT jobject JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1backup_1init + (JNIEnv *, jclass, jlong, jstring, jlong, jstring); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_backup_pagecount + * Signature: (J)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1backup_1pagecount + (JNIEnv *, jclass, jlong); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_backup_remaining + * Signature: (J)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1backup_1remaining + (JNIEnv *, jclass, jlong); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_backup_step + * Signature: (JI)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1backup_1step + (JNIEnv *, jclass, jlong, jint); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_bind_blob + * Signature: (JI[BI)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1bind_1blob + (JNIEnv *, jclass, jlong, jint, jbyteArray, jint); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_bind_double + * Signature: (JID)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1bind_1double + (JNIEnv *, jclass, jlong, jint, jdouble); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_bind_int + * Signature: (JII)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1bind_1int + (JNIEnv *, jclass, jlong, jint, jint); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_bind_int64 + * Signature: (JIJ)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1bind_1int64 + (JNIEnv *, jclass, jlong, jint, jlong); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_bind_java_object + * Signature: (JILjava/lang/Object;)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1bind_1java_1object + (JNIEnv *, jclass, jlong, jint, jobject); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_bind_nio_buffer + * Signature: (Lorg/sqlite/jni/capi/sqlite3_stmt;ILjava/nio/ByteBuffer;II)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1bind_1nio_1buffer + (JNIEnv *, jclass, jobject, jint, jobject, jint, jint); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_bind_null + * Signature: (JI)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1bind_1null + (JNIEnv *, jclass, jlong, jint); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_bind_parameter_count + * Signature: (J)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1bind_1parameter_1count + (JNIEnv *, jclass, jlong); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_bind_parameter_index + * Signature: (J[B)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1bind_1parameter_1index + (JNIEnv *, jclass, jlong, jbyteArray); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_bind_parameter_name + * Signature: (JI)Ljava/lang/String; + */ +JNIEXPORT jstring JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1bind_1parameter_1name + (JNIEnv *, jclass, jlong, jint); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_bind_text + * Signature: (JI[BI)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1bind_1text + (JNIEnv *, jclass, jlong, jint, jbyteArray, jint); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_bind_text16 + * Signature: (JI[BI)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1bind_1text16 + (JNIEnv *, jclass, jlong, jint, jbyteArray, jint); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_bind_value + * Signature: (JIJ)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1bind_1value + (JNIEnv *, jclass, jlong, jint, jlong); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_bind_zeroblob + * Signature: (JII)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1bind_1zeroblob + (JNIEnv *, jclass, jlong, jint, jint); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_bind_zeroblob64 + * Signature: (JIJ)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1bind_1zeroblob64 + (JNIEnv *, jclass, jlong, jint, jlong); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_blob_bytes + * Signature: (J)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1blob_1bytes + (JNIEnv *, jclass, jlong); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_blob_close + * Signature: (J)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1blob_1close + (JNIEnv *, jclass, jlong); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_blob_open + * Signature: (JLjava/lang/String;Ljava/lang/String;Ljava/lang/String;JILorg/sqlite/jni/capi/OutputPointer/sqlite3_blob;)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1blob_1open + (JNIEnv *, jclass, jlong, jstring, jstring, jstring, jlong, jint, jobject); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_blob_read + * Signature: (J[BI)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1blob_1read + (JNIEnv *, jclass, jlong, jbyteArray, jint); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_blob_read_nio_buffer + * Signature: (JILjava/nio/ByteBuffer;II)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1blob_1read_1nio_1buffer + (JNIEnv *, jclass, jlong, jint, jobject, jint, jint); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_blob_reopen + * Signature: (JJ)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1blob_1reopen + (JNIEnv *, jclass, jlong, jlong); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_blob_write + * Signature: (J[BI)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1blob_1write + (JNIEnv *, jclass, jlong, jbyteArray, jint); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_blob_write_nio_buffer + * Signature: (JILjava/nio/ByteBuffer;II)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1blob_1write_1nio_1buffer + (JNIEnv *, jclass, jlong, jint, jobject, jint, jint); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_busy_handler + * Signature: (JLorg/sqlite/jni/capi/BusyHandlerCallback;)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1busy_1handler + (JNIEnv *, jclass, jlong, jobject); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_busy_timeout + * Signature: (JI)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1busy_1timeout + (JNIEnv *, jclass, jlong, jint); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_cancel_auto_extension + * Signature: (Lorg/sqlite/jni/capi/AutoExtensionCallback;)Z + */ +JNIEXPORT jboolean JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1cancel_1auto_1extension + (JNIEnv *, jclass, jobject); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_changes + * Signature: (J)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1changes + (JNIEnv *, jclass, jlong); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_changes64 + * Signature: (J)J + */ +JNIEXPORT jlong JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1changes64 + (JNIEnv *, jclass, jlong); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_clear_bindings + * Signature: (J)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1clear_1bindings + (JNIEnv *, jclass, jlong); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_close + * Signature: (J)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1close + (JNIEnv *, jclass, jlong); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_close_v2 + * Signature: (J)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1close_1v2 + (JNIEnv *, jclass, jlong); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_column_blob + * Signature: (Lorg/sqlite/jni/capi/sqlite3_stmt;I)[B + */ +JNIEXPORT jbyteArray JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1column_1blob + (JNIEnv *, jclass, jobject, jint); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_column_bytes + * Signature: (JI)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1column_1bytes + (JNIEnv *, jclass, jlong, jint); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_column_bytes16 + * Signature: (JI)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1column_1bytes16 + (JNIEnv *, jclass, jlong, jint); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_column_count + * Signature: (J)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1column_1count + (JNIEnv *, jclass, jlong); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_column_database_name + * Signature: (JI)Ljava/lang/String; + */ +JNIEXPORT jstring JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1column_1database_1name + (JNIEnv *, jclass, jlong, jint); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_column_decltype + * Signature: (JI)Ljava/lang/String; + */ +JNIEXPORT jstring JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1column_1decltype + (JNIEnv *, jclass, jlong, jint); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_column_double + * Signature: (Lorg/sqlite/jni/capi/sqlite3_stmt;I)D + */ +JNIEXPORT jdouble JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1column_1double + (JNIEnv *, jclass, jobject, jint); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_column_int + * Signature: (Lorg/sqlite/jni/capi/sqlite3_stmt;I)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1column_1int + (JNIEnv *, jclass, jobject, jint); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_column_int64 + * Signature: (Lorg/sqlite/jni/capi/sqlite3_stmt;I)J + */ +JNIEXPORT jlong JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1column_1int64 + (JNIEnv *, jclass, jobject, jint); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_column_java_object + * Signature: (JI)Ljava/lang/Object; + */ +JNIEXPORT jobject JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1column_1java_1object + (JNIEnv *, jclass, jlong, jint); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_column_name + * Signature: (JI)Ljava/lang/String; + */ +JNIEXPORT jstring JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1column_1name + (JNIEnv *, jclass, jlong, jint); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_column_nio_buffer + * Signature: (Lorg/sqlite/jni/capi/sqlite3_stmt;I)Ljava/nio/ByteBuffer; + */ +JNIEXPORT jobject JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1column_1nio_1buffer + (JNIEnv *, jclass, jobject, jint); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_column_origin_name + * Signature: (JI)Ljava/lang/String; + */ +JNIEXPORT jstring JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1column_1origin_1name + (JNIEnv *, jclass, jlong, jint); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_column_table_name + * Signature: (JI)Ljava/lang/String; + */ +JNIEXPORT jstring JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1column_1table_1name + (JNIEnv *, jclass, jlong, jint); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_column_text + * Signature: (Lorg/sqlite/jni/capi/sqlite3_stmt;I)[B + */ +JNIEXPORT jbyteArray JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1column_1text + (JNIEnv *, jclass, jobject, jint); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_column_text16 + * Signature: (Lorg/sqlite/jni/capi/sqlite3_stmt;I)Ljava/lang/String; + */ +JNIEXPORT jstring JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1column_1text16 + (JNIEnv *, jclass, jobject, jint); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_column_type + * Signature: (JI)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1column_1type + (JNIEnv *, jclass, jlong, jint); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_column_value + * Signature: (Lorg/sqlite/jni/capi/sqlite3_stmt;I)Lorg/sqlite/jni/capi/sqlite3_value; + */ +JNIEXPORT jobject JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1column_1value + (JNIEnv *, jclass, jobject, jint); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_collation_needed + * Signature: (JLorg/sqlite/jni/capi/CollationNeededCallback;)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1collation_1needed + (JNIEnv *, jclass, jlong, jobject); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_commit_hook + * Signature: (JLorg/sqlite/jni/capi/CommitHookCallback;)Lorg/sqlite/jni/capi/CommitHookCallback; + */ +JNIEXPORT jobject JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1commit_1hook + (JNIEnv *, jclass, jlong, jobject); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_compileoption_get + * Signature: (I)Ljava/lang/String; + */ +JNIEXPORT jstring JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1compileoption_1get + (JNIEnv *, jclass, jint); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_compileoption_used + * Signature: (Ljava/lang/String;)Z + */ +JNIEXPORT jboolean JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1compileoption_1used + (JNIEnv *, jclass, jstring); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_complete + * Signature: ([B)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1complete + (JNIEnv *, jclass, jbyteArray); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_config__enable + * Signature: (I)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1config_1_1enable + (JNIEnv *, jclass, jint); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_config__CONFIG_LOG + * Signature: (Lorg/sqlite/jni/capi/ConfigLogCallback;)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1config_1_1CONFIG_1LOG + (JNIEnv *, jclass, jobject); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_config__SQLLOG + * Signature: (Lorg/sqlite/jni/capi/ConfigSqlLogCallback;)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1config_1_1SQLLOG + (JNIEnv *, jclass, jobject); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_context_db_handle + * Signature: (Lorg/sqlite/jni/capi/sqlite3_context;)Lorg/sqlite/jni/capi/sqlite3; + */ +JNIEXPORT jobject JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1context_1db_1handle + (JNIEnv *, jclass, jobject); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_create_collation + * Signature: (Lorg/sqlite/jni/capi/sqlite3;Ljava/lang/String;ILorg/sqlite/jni/capi/CollationCallback;)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1create_1collation + (JNIEnv *, jclass, jobject, jstring, jint, jobject); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_create_function + * Signature: (Lorg/sqlite/jni/capi/sqlite3;Ljava/lang/String;IILorg/sqlite/jni/capi/SQLFunction;)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1create_1function + (JNIEnv *, jclass, jobject, jstring, jint, jint, jobject); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_data_count + * Signature: (J)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1data_1count + (JNIEnv *, jclass, jlong); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_db_config + * Signature: (Lorg/sqlite/jni/capi/sqlite3;IILorg/sqlite/jni/capi/OutputPointer/Int32;)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1db_1config__Lorg_sqlite_jni_capi_sqlite3_2IILorg_sqlite_jni_capi_OutputPointer_Int32_2 + (JNIEnv *, jclass, jobject, jint, jint, jobject); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_db_config + * Signature: (Lorg/sqlite/jni/capi/sqlite3;ILjava/lang/String;)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1db_1config__Lorg_sqlite_jni_capi_sqlite3_2ILjava_lang_String_2 + (JNIEnv *, jclass, jobject, jint, jstring); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_db_name + * Signature: (JI)Ljava/lang/String; + */ +JNIEXPORT jstring JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1db_1name + (JNIEnv *, jclass, jlong, jint); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_db_filename + * Signature: (Lorg/sqlite/jni/capi/sqlite3;Ljava/lang/String;)Ljava/lang/String; + */ +JNIEXPORT jstring JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1db_1filename + (JNIEnv *, jclass, jobject, jstring); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_db_handle + * Signature: (Lorg/sqlite/jni/capi/sqlite3_stmt;)Lorg/sqlite/jni/capi/sqlite3; + */ +JNIEXPORT jobject JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1db_1handle + (JNIEnv *, jclass, jobject); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_db_readonly + * Signature: (Lorg/sqlite/jni/capi/sqlite3;Ljava/lang/String;)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1db_1readonly + (JNIEnv *, jclass, jobject, jstring); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_db_release_memory + * Signature: (Lorg/sqlite/jni/capi/sqlite3;)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1db_1release_1memory + (JNIEnv *, jclass, jobject); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_db_status + * Signature: (Lorg/sqlite/jni/capi/sqlite3;ILorg/sqlite/jni/capi/OutputPointer/Int32;Lorg/sqlite/jni/capi/OutputPointer/Int32;Z)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1db_1status + (JNIEnv *, jclass, jobject, jint, jobject, jobject, jboolean); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_errcode + * Signature: (Lorg/sqlite/jni/capi/sqlite3;)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1errcode + (JNIEnv *, jclass, jobject); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_errmsg + * Signature: (Lorg/sqlite/jni/capi/sqlite3;)Ljava/lang/String; + */ +JNIEXPORT jstring JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1errmsg + (JNIEnv *, jclass, jobject); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_set_errmsg + * Signature: (Lorg/sqlite/jni/capi/sqlite3;ILjava/lang/String;)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1set_1errmsg + (JNIEnv *, jclass, jobject, jint, jstring); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_error_offset + * Signature: (J)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1error_1offset + (JNIEnv *, jclass, jlong); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_errstr + * Signature: (I)Ljava/lang/String; + */ +JNIEXPORT jstring JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1errstr + (JNIEnv *, jclass, jint); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_expanded_sql + * Signature: (Lorg/sqlite/jni/capi/sqlite3_stmt;)Ljava/lang/String; + */ +JNIEXPORT jstring JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1expanded_1sql + (JNIEnv *, jclass, jobject); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_extended_errcode + * Signature: (J)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1extended_1errcode + (JNIEnv *, jclass, jlong); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_extended_result_codes + * Signature: (Lorg/sqlite/jni/capi/sqlite3;Z)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1extended_1result_1codes + (JNIEnv *, jclass, jobject, jboolean); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_get_autocommit + * Signature: (J)Z + */ +JNIEXPORT jboolean JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1get_1autocommit + (JNIEnv *, jclass, jlong); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_get_auxdata + * Signature: (Lorg/sqlite/jni/capi/sqlite3_context;I)Ljava/lang/Object; + */ +JNIEXPORT jobject JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1get_1auxdata + (JNIEnv *, jclass, jobject, jint); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_finalize + * Signature: (J)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1finalize + (JNIEnv *, jclass, jlong); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_initialize + * Signature: ()I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1initialize + (JNIEnv *, jclass); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_interrupt + * Signature: (Lorg/sqlite/jni/capi/sqlite3;)V + */ +JNIEXPORT void JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1interrupt + (JNIEnv *, jclass, jobject); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_is_interrupted + * Signature: (Lorg/sqlite/jni/capi/sqlite3;)Z + */ +JNIEXPORT jboolean JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1is_1interrupted + (JNIEnv *, jclass, jobject); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_keyword_check + * Signature: (Ljava/lang/String;)Z + */ +JNIEXPORT jboolean JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1keyword_1check + (JNIEnv *, jclass, jstring); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_keyword_count + * Signature: ()I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1keyword_1count + (JNIEnv *, jclass); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_keyword_name + * Signature: (I)Ljava/lang/String; + */ +JNIEXPORT jstring JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1keyword_1name + (JNIEnv *, jclass, jint); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_last_insert_rowid + * Signature: (Lorg/sqlite/jni/capi/sqlite3;)J + */ +JNIEXPORT jlong JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1last_1insert_1rowid + (JNIEnv *, jclass, jobject); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_libversion + * Signature: ()Ljava/lang/String; + */ +JNIEXPORT jstring JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1libversion + (JNIEnv *, jclass); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_libversion_number + * Signature: ()I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1libversion_1number + (JNIEnv *, jclass); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_limit + * Signature: (Lorg/sqlite/jni/capi/sqlite3;II)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1limit + (JNIEnv *, jclass, jobject, jint, jint); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_normalized_sql + * Signature: (Lorg/sqlite/jni/capi/sqlite3_stmt;)Ljava/lang/String; + */ +JNIEXPORT jstring JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1normalized_1sql + (JNIEnv *, jclass, jobject); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_open + * Signature: (Ljava/lang/String;Lorg/sqlite/jni/capi/OutputPointer/sqlite3;)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1open + (JNIEnv *, jclass, jstring, jobject); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_open_v2 + * Signature: (Ljava/lang/String;Lorg/sqlite/jni/capi/OutputPointer/sqlite3;ILjava/lang/String;)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1open_1v2 + (JNIEnv *, jclass, jstring, jobject, jint, jstring); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_prepare + * Signature: (J[BILorg/sqlite/jni/capi/OutputPointer/sqlite3_stmt;Lorg/sqlite/jni/capi/OutputPointer/Int32;)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1prepare + (JNIEnv *, jclass, jlong, jbyteArray, jint, jobject, jobject); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_prepare_v2 + * Signature: (J[BILorg/sqlite/jni/capi/OutputPointer/sqlite3_stmt;Lorg/sqlite/jni/capi/OutputPointer/Int32;)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1prepare_1v2 + (JNIEnv *, jclass, jlong, jbyteArray, jint, jobject, jobject); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_prepare_v3 + * Signature: (J[BIILorg/sqlite/jni/capi/OutputPointer/sqlite3_stmt;Lorg/sqlite/jni/capi/OutputPointer/Int32;)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1prepare_1v3 + (JNIEnv *, jclass, jlong, jbyteArray, jint, jint, jobject, jobject); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_preupdate_blobwrite + * Signature: (J)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1preupdate_1blobwrite + (JNIEnv *, jclass, jlong); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_preupdate_count + * Signature: (J)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1preupdate_1count + (JNIEnv *, jclass, jlong); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_preupdate_depth + * Signature: (J)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1preupdate_1depth + (JNIEnv *, jclass, jlong); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_preupdate_hook + * Signature: (JLorg/sqlite/jni/capi/PreupdateHookCallback;)Lorg/sqlite/jni/capi/PreupdateHookCallback; + */ +JNIEXPORT jobject JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1preupdate_1hook + (JNIEnv *, jclass, jlong, jobject); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_preupdate_new + * Signature: (JILorg/sqlite/jni/capi/OutputPointer/sqlite3_value;)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1preupdate_1new + (JNIEnv *, jclass, jlong, jint, jobject); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_preupdate_old + * Signature: (JILorg/sqlite/jni/capi/OutputPointer/sqlite3_value;)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1preupdate_1old + (JNIEnv *, jclass, jlong, jint, jobject); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_progress_handler + * Signature: (Lorg/sqlite/jni/capi/sqlite3;ILorg/sqlite/jni/capi/ProgressHandlerCallback;)V + */ +JNIEXPORT void JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1progress_1handler + (JNIEnv *, jclass, jobject, jint, jobject); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_randomness + * Signature: ([B)V + */ +JNIEXPORT void JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1randomness + (JNIEnv *, jclass, jbyteArray); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_release_memory + * Signature: (I)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1release_1memory + (JNIEnv *, jclass, jint); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_reset + * Signature: (Lorg/sqlite/jni/capi/sqlite3_stmt;)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1reset + (JNIEnv *, jclass, jobject); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_reset_auto_extension + * Signature: ()V + */ +JNIEXPORT void JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1reset_1auto_1extension + (JNIEnv *, jclass); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_result_double + * Signature: (Lorg/sqlite/jni/capi/sqlite3_context;D)V + */ +JNIEXPORT void JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1result_1double + (JNIEnv *, jclass, jobject, jdouble); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_result_error + * Signature: (Lorg/sqlite/jni/capi/sqlite3_context;[BI)V + */ +JNIEXPORT void JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1result_1error + (JNIEnv *, jclass, jobject, jbyteArray, jint); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_result_error_toobig + * Signature: (Lorg/sqlite/jni/capi/sqlite3_context;)V + */ +JNIEXPORT void JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1result_1error_1toobig + (JNIEnv *, jclass, jobject); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_result_error_nomem + * Signature: (Lorg/sqlite/jni/capi/sqlite3_context;)V + */ +JNIEXPORT void JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1result_1error_1nomem + (JNIEnv *, jclass, jobject); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_result_error_code + * Signature: (Lorg/sqlite/jni/capi/sqlite3_context;I)V + */ +JNIEXPORT void JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1result_1error_1code + (JNIEnv *, jclass, jobject, jint); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_result_int + * Signature: (Lorg/sqlite/jni/capi/sqlite3_context;I)V + */ +JNIEXPORT void JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1result_1int + (JNIEnv *, jclass, jobject, jint); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_result_int64 + * Signature: (Lorg/sqlite/jni/capi/sqlite3_context;J)V + */ +JNIEXPORT void JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1result_1int64 + (JNIEnv *, jclass, jobject, jlong); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_result_java_object + * Signature: (Lorg/sqlite/jni/capi/sqlite3_context;Ljava/lang/Object;)V + */ +JNIEXPORT void JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1result_1java_1object + (JNIEnv *, jclass, jobject, jobject); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_result_nio_buffer + * Signature: (Lorg/sqlite/jni/capi/sqlite3_context;Ljava/nio/ByteBuffer;II)V + */ +JNIEXPORT void JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1result_1nio_1buffer + (JNIEnv *, jclass, jobject, jobject, jint, jint); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_result_null + * Signature: (Lorg/sqlite/jni/capi/sqlite3_context;)V + */ +JNIEXPORT void JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1result_1null + (JNIEnv *, jclass, jobject); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_result_subtype + * Signature: (Lorg/sqlite/jni/capi/sqlite3_context;I)V + */ +JNIEXPORT void JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1result_1subtype + (JNIEnv *, jclass, jobject, jint); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_result_value + * Signature: (Lorg/sqlite/jni/capi/sqlite3_context;Lorg/sqlite/jni/capi/sqlite3_value;)V + */ +JNIEXPORT void JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1result_1value + (JNIEnv *, jclass, jobject, jobject); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_result_zeroblob + * Signature: (Lorg/sqlite/jni/capi/sqlite3_context;I)V + */ +JNIEXPORT void JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1result_1zeroblob + (JNIEnv *, jclass, jobject, jint); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_result_zeroblob64 + * Signature: (Lorg/sqlite/jni/capi/sqlite3_context;J)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1result_1zeroblob64 + (JNIEnv *, jclass, jobject, jlong); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_result_blob + * Signature: (Lorg/sqlite/jni/capi/sqlite3_context;[BI)V + */ +JNIEXPORT void JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1result_1blob + (JNIEnv *, jclass, jobject, jbyteArray, jint); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_result_blob64 + * Signature: (Lorg/sqlite/jni/capi/sqlite3_context;[BJ)V + */ +JNIEXPORT void JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1result_1blob64 + (JNIEnv *, jclass, jobject, jbyteArray, jlong); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_result_text + * Signature: (Lorg/sqlite/jni/capi/sqlite3_context;[BI)V + */ +JNIEXPORT void JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1result_1text + (JNIEnv *, jclass, jobject, jbyteArray, jint); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_result_text64 + * Signature: (Lorg/sqlite/jni/capi/sqlite3_context;[BJI)V + */ +JNIEXPORT void JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1result_1text64 + (JNIEnv *, jclass, jobject, jbyteArray, jlong, jint); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_rollback_hook + * Signature: (JLorg/sqlite/jni/capi/RollbackHookCallback;)Lorg/sqlite/jni/capi/RollbackHookCallback; + */ +JNIEXPORT jobject JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1rollback_1hook + (JNIEnv *, jclass, jlong, jobject); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_set_authorizer + * Signature: (Lorg/sqlite/jni/capi/sqlite3;Lorg/sqlite/jni/capi/AuthorizerCallback;)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1set_1authorizer + (JNIEnv *, jclass, jobject, jobject); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_set_auxdata + * Signature: (Lorg/sqlite/jni/capi/sqlite3_context;ILjava/lang/Object;)V + */ +JNIEXPORT void JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1set_1auxdata + (JNIEnv *, jclass, jobject, jint, jobject); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_set_last_insert_rowid + * Signature: (Lorg/sqlite/jni/capi/sqlite3;J)V + */ +JNIEXPORT void JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1set_1last_1insert_1rowid + (JNIEnv *, jclass, jobject, jlong); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_shutdown + * Signature: ()I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1shutdown + (JNIEnv *, jclass); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_sleep + * Signature: (I)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1sleep + (JNIEnv *, jclass, jint); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_sourceid + * Signature: ()Ljava/lang/String; + */ +JNIEXPORT jstring JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1sourceid + (JNIEnv *, jclass); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_sql + * Signature: (Lorg/sqlite/jni/capi/sqlite3_stmt;)Ljava/lang/String; + */ +JNIEXPORT jstring JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1sql + (JNIEnv *, jclass, jobject); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_status + * Signature: (ILorg/sqlite/jni/capi/OutputPointer/Int32;Lorg/sqlite/jni/capi/OutputPointer/Int32;Z)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1status + (JNIEnv *, jclass, jint, jobject, jobject, jboolean); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_status64 + * Signature: (ILorg/sqlite/jni/capi/OutputPointer/Int64;Lorg/sqlite/jni/capi/OutputPointer/Int64;Z)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1status64 + (JNIEnv *, jclass, jint, jobject, jobject, jboolean); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_step + * Signature: (J)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1step + (JNIEnv *, jclass, jlong); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_stmt_busy + * Signature: (Lorg/sqlite/jni/capi/sqlite3_stmt;)Z + */ +JNIEXPORT jboolean JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1stmt_1busy + (JNIEnv *, jclass, jobject); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_stmt_explain + * Signature: (JI)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1stmt_1explain + (JNIEnv *, jclass, jlong, jint); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_stmt_isexplain + * Signature: (J)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1stmt_1isexplain + (JNIEnv *, jclass, jlong); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_stmt_readonly + * Signature: (Lorg/sqlite/jni/capi/sqlite3_stmt;)Z + */ +JNIEXPORT jboolean JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1stmt_1readonly + (JNIEnv *, jclass, jobject); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_stmt_status + * Signature: (Lorg/sqlite/jni/capi/sqlite3_stmt;IZ)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1stmt_1status + (JNIEnv *, jclass, jobject, jint, jboolean); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_strglob + * Signature: ([B[B)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1strglob + (JNIEnv *, jclass, jbyteArray, jbyteArray); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_strlike + * Signature: ([B[BI)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1strlike + (JNIEnv *, jclass, jbyteArray, jbyteArray, jint); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_system_errno + * Signature: (J)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1system_1errno + (JNIEnv *, jclass, jlong); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_table_column_metadata + * Signature: (Lorg/sqlite/jni/capi/sqlite3;Ljava/lang/String;Ljava/lang/String;Ljava/lang/String;Lorg/sqlite/jni/capi/OutputPointer/String;Lorg/sqlite/jni/capi/OutputPointer/String;Lorg/sqlite/jni/capi/OutputPointer/Bool;Lorg/sqlite/jni/capi/OutputPointer/Bool;Lorg/sqlite/jni/capi/OutputPointer/Bool;)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1table_1column_1metadata + (JNIEnv *, jclass, jobject, jstring, jstring, jstring, jobject, jobject, jobject, jobject, jobject); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_threadsafe + * Signature: ()I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1threadsafe + (JNIEnv *, jclass); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_total_changes + * Signature: (J)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1total_1changes + (JNIEnv *, jclass, jlong); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_total_changes64 + * Signature: (J)J + */ +JNIEXPORT jlong JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1total_1changes64 + (JNIEnv *, jclass, jlong); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_trace_v2 + * Signature: (Lorg/sqlite/jni/capi/sqlite3;ILorg/sqlite/jni/capi/TraceV2Callback;)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1trace_1v2 + (JNIEnv *, jclass, jobject, jint, jobject); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_txn_state + * Signature: (Lorg/sqlite/jni/capi/sqlite3;Ljava/lang/String;)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1txn_1state + (JNIEnv *, jclass, jobject, jstring); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_update_hook + * Signature: (JLorg/sqlite/jni/capi/UpdateHookCallback;)Lorg/sqlite/jni/capi/UpdateHookCallback; + */ +JNIEXPORT jobject JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1update_1hook + (JNIEnv *, jclass, jlong, jobject); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_value_blob + * Signature: (J)[B + */ +JNIEXPORT jbyteArray JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1value_1blob + (JNIEnv *, jclass, jlong); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_value_bytes + * Signature: (J)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1value_1bytes + (JNIEnv *, jclass, jlong); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_value_bytes16 + * Signature: (J)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1value_1bytes16 + (JNIEnv *, jclass, jlong); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_value_double + * Signature: (J)D + */ +JNIEXPORT jdouble JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1value_1double + (JNIEnv *, jclass, jlong); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_value_dup + * Signature: (J)Lorg/sqlite/jni/capi/sqlite3_value; + */ +JNIEXPORT jobject JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1value_1dup + (JNIEnv *, jclass, jlong); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_value_encoding + * Signature: (J)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1value_1encoding + (JNIEnv *, jclass, jlong); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_value_free + * Signature: (J)V + */ +JNIEXPORT void JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1value_1free + (JNIEnv *, jclass, jlong); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_value_frombind + * Signature: (J)Z + */ +JNIEXPORT jboolean JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1value_1frombind + (JNIEnv *, jclass, jlong); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_value_int + * Signature: (J)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1value_1int + (JNIEnv *, jclass, jlong); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_value_int64 + * Signature: (J)J + */ +JNIEXPORT jlong JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1value_1int64 + (JNIEnv *, jclass, jlong); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_value_java_object + * Signature: (J)Ljava/lang/Object; + */ +JNIEXPORT jobject JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1value_1java_1object + (JNIEnv *, jclass, jlong); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_value_nio_buffer + * Signature: (Lorg/sqlite/jni/capi/sqlite3_value;)Ljava/nio/ByteBuffer; + */ +JNIEXPORT jobject JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1value_1nio_1buffer + (JNIEnv *, jclass, jobject); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_value_nochange + * Signature: (J)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1value_1nochange + (JNIEnv *, jclass, jlong); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_value_numeric_type + * Signature: (J)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1value_1numeric_1type + (JNIEnv *, jclass, jlong); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_value_subtype + * Signature: (J)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1value_1subtype + (JNIEnv *, jclass, jlong); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_value_text + * Signature: (J)[B + */ +JNIEXPORT jbyteArray JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1value_1text + (JNIEnv *, jclass, jlong); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_value_text16 + * Signature: (J)Ljava/lang/String; + */ +JNIEXPORT jstring JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1value_1text16 + (JNIEnv *, jclass, jlong); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_value_type + * Signature: (J)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1value_1type + (JNIEnv *, jclass, jlong); + +/* + * Class: org_sqlite_jni_capi_CApi + * Method: sqlite3_jni_internal_details + * Signature: ()V + */ +JNIEXPORT void JNICALL Java_org_sqlite_jni_capi_CApi_sqlite3_1jni_1internal_1details + (JNIEnv *, jclass); + +#ifdef __cplusplus +} +#endif +#endif +/* DO NOT EDIT THIS FILE - it is machine generated */ +#include +/* Header for class org_sqlite_jni_capi_SQLTester */ + +#ifndef _Included_org_sqlite_jni_capi_SQLTester +#define _Included_org_sqlite_jni_capi_SQLTester +#ifdef __cplusplus +extern "C" { +#endif +/* + * Class: org_sqlite_jni_capi_SQLTester + * Method: strglob + * Signature: ([B[B)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_capi_SQLTester_strglob + (JNIEnv *, jclass, jbyteArray, jbyteArray); + +/* + * Class: org_sqlite_jni_capi_SQLTester + * Method: installCustomExtensions + * Signature: ()V + */ +JNIEXPORT void JNICALL Java_org_sqlite_jni_capi_SQLTester_installCustomExtensions + (JNIEnv *, jclass); + +#ifdef __cplusplus +} +#endif +#endif +/* DO NOT EDIT THIS FILE - it is machine generated */ +#include +/* Header for class org_sqlite_jni_fts5_Fts5ExtensionApi */ + +#ifndef _Included_org_sqlite_jni_fts5_Fts5ExtensionApi +#define _Included_org_sqlite_jni_fts5_Fts5ExtensionApi +#ifdef __cplusplus +extern "C" { +#endif +/* + * Class: org_sqlite_jni_fts5_Fts5ExtensionApi + * Method: getInstance + * Signature: ()Lorg/sqlite/jni/fts5/Fts5ExtensionApi; + */ +JNIEXPORT jobject JNICALL Java_org_sqlite_jni_fts5_Fts5ExtensionApi_getInstance + (JNIEnv *, jclass); + +/* + * Class: org_sqlite_jni_fts5_Fts5ExtensionApi + * Method: xColumnCount + * Signature: (Lorg/sqlite/jni/fts5/Fts5Context;)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_fts5_Fts5ExtensionApi_xColumnCount + (JNIEnv *, jobject, jobject); + +/* + * Class: org_sqlite_jni_fts5_Fts5ExtensionApi + * Method: xColumnSize + * Signature: (Lorg/sqlite/jni/fts5/Fts5Context;ILorg/sqlite/jni/capi/OutputPointer/Int32;)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_fts5_Fts5ExtensionApi_xColumnSize + (JNIEnv *, jobject, jobject, jint, jobject); + +/* + * Class: org_sqlite_jni_fts5_Fts5ExtensionApi + * Method: xColumnText + * Signature: (Lorg/sqlite/jni/fts5/Fts5Context;ILorg/sqlite/jni/capi/OutputPointer/String;)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_fts5_Fts5ExtensionApi_xColumnText + (JNIEnv *, jobject, jobject, jint, jobject); + +/* + * Class: org_sqlite_jni_fts5_Fts5ExtensionApi + * Method: xColumnTotalSize + * Signature: (Lorg/sqlite/jni/fts5/Fts5Context;ILorg/sqlite/jni/capi/OutputPointer/Int64;)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_fts5_Fts5ExtensionApi_xColumnTotalSize + (JNIEnv *, jobject, jobject, jint, jobject); + +/* + * Class: org_sqlite_jni_fts5_Fts5ExtensionApi + * Method: xGetAuxdata + * Signature: (Lorg/sqlite/jni/fts5/Fts5Context;Z)Ljava/lang/Object; + */ +JNIEXPORT jobject JNICALL Java_org_sqlite_jni_fts5_Fts5ExtensionApi_xGetAuxdata + (JNIEnv *, jobject, jobject, jboolean); + +/* + * Class: org_sqlite_jni_fts5_Fts5ExtensionApi + * Method: xInst + * Signature: (Lorg/sqlite/jni/fts5/Fts5Context;ILorg/sqlite/jni/capi/OutputPointer/Int32;Lorg/sqlite/jni/capi/OutputPointer/Int32;Lorg/sqlite/jni/capi/OutputPointer/Int32;)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_fts5_Fts5ExtensionApi_xInst + (JNIEnv *, jobject, jobject, jint, jobject, jobject, jobject); + +/* + * Class: org_sqlite_jni_fts5_Fts5ExtensionApi + * Method: xInstCount + * Signature: (Lorg/sqlite/jni/fts5/Fts5Context;Lorg/sqlite/jni/capi/OutputPointer/Int32;)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_fts5_Fts5ExtensionApi_xInstCount + (JNIEnv *, jobject, jobject, jobject); + +/* + * Class: org_sqlite_jni_fts5_Fts5ExtensionApi + * Method: xPhraseCount + * Signature: (Lorg/sqlite/jni/fts5/Fts5Context;)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_fts5_Fts5ExtensionApi_xPhraseCount + (JNIEnv *, jobject, jobject); + +/* + * Class: org_sqlite_jni_fts5_Fts5ExtensionApi + * Method: xPhraseFirst + * Signature: (Lorg/sqlite/jni/fts5/Fts5Context;ILorg/sqlite/jni/fts5/Fts5PhraseIter;Lorg/sqlite/jni/capi/OutputPointer/Int32;Lorg/sqlite/jni/capi/OutputPointer/Int32;)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_fts5_Fts5ExtensionApi_xPhraseFirst + (JNIEnv *, jobject, jobject, jint, jobject, jobject, jobject); + +/* + * Class: org_sqlite_jni_fts5_Fts5ExtensionApi + * Method: xPhraseFirstColumn + * Signature: (Lorg/sqlite/jni/fts5/Fts5Context;ILorg/sqlite/jni/fts5/Fts5PhraseIter;Lorg/sqlite/jni/capi/OutputPointer/Int32;)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_fts5_Fts5ExtensionApi_xPhraseFirstColumn + (JNIEnv *, jobject, jobject, jint, jobject, jobject); + +/* + * Class: org_sqlite_jni_fts5_Fts5ExtensionApi + * Method: xPhraseNext + * Signature: (Lorg/sqlite/jni/fts5/Fts5Context;Lorg/sqlite/jni/fts5/Fts5PhraseIter;Lorg/sqlite/jni/capi/OutputPointer/Int32;Lorg/sqlite/jni/capi/OutputPointer/Int32;)V + */ +JNIEXPORT void JNICALL Java_org_sqlite_jni_fts5_Fts5ExtensionApi_xPhraseNext + (JNIEnv *, jobject, jobject, jobject, jobject, jobject); + +/* + * Class: org_sqlite_jni_fts5_Fts5ExtensionApi + * Method: xPhraseNextColumn + * Signature: (Lorg/sqlite/jni/fts5/Fts5Context;Lorg/sqlite/jni/fts5/Fts5PhraseIter;Lorg/sqlite/jni/capi/OutputPointer/Int32;)V + */ +JNIEXPORT void JNICALL Java_org_sqlite_jni_fts5_Fts5ExtensionApi_xPhraseNextColumn + (JNIEnv *, jobject, jobject, jobject, jobject); + +/* + * Class: org_sqlite_jni_fts5_Fts5ExtensionApi + * Method: xPhraseSize + * Signature: (Lorg/sqlite/jni/fts5/Fts5Context;I)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_fts5_Fts5ExtensionApi_xPhraseSize + (JNIEnv *, jobject, jobject, jint); + +/* + * Class: org_sqlite_jni_fts5_Fts5ExtensionApi + * Method: xQueryPhrase + * Signature: (Lorg/sqlite/jni/fts5/Fts5Context;ILorg/sqlite/jni/fts5/Fts5ExtensionApi/XQueryPhraseCallback;)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_fts5_Fts5ExtensionApi_xQueryPhrase + (JNIEnv *, jobject, jobject, jint, jobject); + +/* + * Class: org_sqlite_jni_fts5_Fts5ExtensionApi + * Method: xRowCount + * Signature: (Lorg/sqlite/jni/fts5/Fts5Context;Lorg/sqlite/jni/capi/OutputPointer/Int64;)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_fts5_Fts5ExtensionApi_xRowCount + (JNIEnv *, jobject, jobject, jobject); + +/* + * Class: org_sqlite_jni_fts5_Fts5ExtensionApi + * Method: xRowid + * Signature: (Lorg/sqlite/jni/fts5/Fts5Context;)J + */ +JNIEXPORT jlong JNICALL Java_org_sqlite_jni_fts5_Fts5ExtensionApi_xRowid + (JNIEnv *, jobject, jobject); + +/* + * Class: org_sqlite_jni_fts5_Fts5ExtensionApi + * Method: xSetAuxdata + * Signature: (Lorg/sqlite/jni/fts5/Fts5Context;Ljava/lang/Object;)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_fts5_Fts5ExtensionApi_xSetAuxdata + (JNIEnv *, jobject, jobject, jobject); + +/* + * Class: org_sqlite_jni_fts5_Fts5ExtensionApi + * Method: xTokenize + * Signature: (Lorg/sqlite/jni/fts5/Fts5Context;[BLorg/sqlite/jni/fts5/XTokenizeCallback;)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_fts5_Fts5ExtensionApi_xTokenize + (JNIEnv *, jobject, jobject, jbyteArray, jobject); + +/* + * Class: org_sqlite_jni_fts5_Fts5ExtensionApi + * Method: xUserData + * Signature: (Lorg/sqlite/jni/fts5/Fts5Context;)Ljava/lang/Object; + */ +JNIEXPORT jobject JNICALL Java_org_sqlite_jni_fts5_Fts5ExtensionApi_xUserData + (JNIEnv *, jobject, jobject); + +#ifdef __cplusplus +} +#endif +#endif +/* DO NOT EDIT THIS FILE - it is machine generated */ +#include +/* Header for class org_sqlite_jni_fts5_fts5_api */ + +#ifndef _Included_org_sqlite_jni_fts5_fts5_api +#define _Included_org_sqlite_jni_fts5_fts5_api +#ifdef __cplusplus +extern "C" { +#endif +#undef org_sqlite_jni_fts5_fts5_api_iVersion +#define org_sqlite_jni_fts5_fts5_api_iVersion 2L +/* + * Class: org_sqlite_jni_fts5_fts5_api + * Method: getInstanceForDb + * Signature: (Lorg/sqlite/jni/capi/sqlite3;)Lorg/sqlite/jni/fts5/fts5_api; + */ +JNIEXPORT jobject JNICALL Java_org_sqlite_jni_fts5_fts5_1api_getInstanceForDb + (JNIEnv *, jclass, jobject); + +/* + * Class: org_sqlite_jni_fts5_fts5_api + * Method: xCreateFunction + * Signature: (Ljava/lang/String;Ljava/lang/Object;Lorg/sqlite/jni/fts5/fts5_extension_function;)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_fts5_fts5_1api_xCreateFunction + (JNIEnv *, jobject, jstring, jobject, jobject); + +#ifdef __cplusplus +} +#endif +#endif +/* DO NOT EDIT THIS FILE - it is machine generated */ +#include +/* Header for class org_sqlite_jni_fts5_fts5_tokenizer */ + +#ifndef _Included_org_sqlite_jni_fts5_fts5_tokenizer +#define _Included_org_sqlite_jni_fts5_fts5_tokenizer +#ifdef __cplusplus +extern "C" { +#endif +/* + * Class: org_sqlite_jni_fts5_fts5_tokenizer + * Method: xTokenize + * Signature: (Lorg/sqlite/jni/fts5/Fts5Tokenizer;I[BLorg/sqlite/jni/fts5/XTokenizeCallback;)I + */ +JNIEXPORT jint JNICALL Java_org_sqlite_jni_fts5_fts5_1tokenizer_xTokenize + (JNIEnv *, jobject, jobject, jint, jbyteArray, jobject); + +#ifdef __cplusplus +} +#endif +#endif diff --git a/ext/jni/src/org/sqlite/jni/annotation/Experimental.java b/ext/jni/src/org/sqlite/jni/annotation/Experimental.java new file mode 100644 index 0000000000..190435c858 --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/annotation/Experimental.java @@ -0,0 +1,30 @@ +/* +** 2023-09-27 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file houses the Experimental annotation for the sqlite3 C API. +*/ +package org.sqlite.jni.annotation; +import java.lang.annotation.*; + +/** + This annotation is for flagging methods, constructors, and types + which are expressly experimental and subject to any amount of + change or outright removal. Client code should not rely on such + features. +*/ +@Documented +@Retention(RetentionPolicy.SOURCE) +@Target({ + ElementType.METHOD, + ElementType.CONSTRUCTOR, + ElementType.TYPE +}) +public @interface Experimental{} diff --git a/ext/jni/src/org/sqlite/jni/annotation/NotNull.java b/ext/jni/src/org/sqlite/jni/annotation/NotNull.java new file mode 100644 index 0000000000..2873082446 --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/annotation/NotNull.java @@ -0,0 +1,71 @@ +/* +** 2023-09-27 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file houses the NotNull annotation for the sqlite3 C API. +*/ +package org.sqlite.jni.annotation; +import java.lang.annotation.*; + +/** + This annotation is for flagging parameters which may not legally be + null or point to closed/finalized C-side resources. + +

    In the case of Java types which map directly to C struct types + (e.g. {@link org.sqlite.jni.capi.sqlite3}, {@link + org.sqlite.jni.capi.sqlite3_stmt}, and {@link + org.sqlite.jni.capi.sqlite3_context}), a closed/finalized resource + is also considered to be null for purposes this annotation because + the C-side effect of passing such a handle is the same as if null + is passed.

    + +

    When used in the context of Java interfaces which are called + from the C APIs, this annotation communicates that the C API will + never pass a null value to the callback for that parameter.

    + +

    Passing a null, for this annotation's definition of null, for + any parameter marked with this annotation specifically invokes + undefined behavior (see below).

    + +

    Passing 0 (i.e. C NULL) or a negative value for any long-type + parameter marked with this annotation specifically invokes undefined + behavior (see below). Such values are treated as C pointers in the + JNI layer.

    + +

    Undefined behaviour: the JNI build uses the {@code + SQLITE_ENABLE_API_ARMOR} build flag, meaning that the C code + invoked with invalid NULL pointers and the like will not invoke + undefined behavior in the conventional C sense, but may, for + example, return result codes which are not documented for the + affected APIs or may otherwise behave unpredictably. In no known + cases will such arguments result in C-level code dereferencing a + NULL pointer or accessing out-of-bounds (or otherwise invalid) + memory. In other words, they may cause unexpected behavior but + should never cause an outright crash or security issue.

    + +

    Note that the C-style API does not throw any exceptions on its + own because it has a no-throw policy in order to retain its C-style + semantics, but it may trigger NullPointerExceptions (or similar) if + passed a null for a parameter flagged with this annotation.

    + +

    This annotation is informational only. No policy is in place to + programmatically ensure that NotNull is conformed to in client + code.

    + +

    This annotation is solely for the use by the classes in the + org.sqlite.jni package and subpackages, but is made public so that + javadoc will link to it from the annotated functions. It is not + part of the public API and client-level code must not rely on + it.

    +*/ +@Documented +@Retention(RetentionPolicy.SOURCE) +@Target(ElementType.PARAMETER) +public @interface NotNull{} diff --git a/ext/jni/src/org/sqlite/jni/annotation/Nullable.java b/ext/jni/src/org/sqlite/jni/annotation/Nullable.java new file mode 100644 index 0000000000..e3fa30efc9 --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/annotation/Nullable.java @@ -0,0 +1,33 @@ +/* +** 2023-09-27 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file houses the Nullable annotation for the sqlite3 C API. +*/ +package org.sqlite.jni.annotation; +import java.lang.annotation.*; + +/** + This annotation is for flagging parameters which may legally be + null, noting that they may behave differently if passed null but + are prepared to expect null as a value. When used in the context of + callback methods which are called into from the C APIs, this + annotation communicates that the C API may pass a null value to the + callback. + +

    This annotation is solely for the use by the classes in this + package but is made public so that javadoc will link to it from the + annotated functions. It is not part of the public API and + client-level code must not rely on it. +*/ +@Documented +@Retention(RetentionPolicy.SOURCE) +@Target(ElementType.PARAMETER) +public @interface Nullable{} diff --git a/ext/jni/src/org/sqlite/jni/annotation/package-info.java b/ext/jni/src/org/sqlite/jni/annotation/package-info.java new file mode 100644 index 0000000000..20ac7a3017 --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/annotation/package-info.java @@ -0,0 +1,17 @@ +/* +** 2023-09-27 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +*/ +/** + This package houses annotations specific to the JNI bindings of the + SQLite3 C API. +*/ +package org.sqlite.jni.annotation; diff --git a/ext/jni/src/org/sqlite/jni/capi/AbstractCollationCallback.java b/ext/jni/src/org/sqlite/jni/capi/AbstractCollationCallback.java new file mode 100644 index 0000000000..925536636e --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/capi/AbstractCollationCallback.java @@ -0,0 +1,34 @@ +/* +** 2023-08-25 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file is part of the JNI bindings for the sqlite3 C API. +*/ +package org.sqlite.jni.capi; +import org.sqlite.jni.annotation.NotNull; + +/** + An implementation of {@link CollationCallback} which provides a + no-op xDestroy() method. +*/ +public abstract class AbstractCollationCallback + implements CollationCallback, XDestroyCallback { + /** + Must compare the given byte arrays and return the result using + {@code memcmp()} semantics. + */ + public abstract int call(@NotNull byte[] lhs, @NotNull byte[] rhs); + + /** + Optionally override to be notified when the UDF is finalized by + SQLite. This implementation does nothing. + */ + public void xDestroy(){} +} diff --git a/ext/jni/src/org/sqlite/jni/capi/AggregateFunction.java b/ext/jni/src/org/sqlite/jni/capi/AggregateFunction.java new file mode 100644 index 0000000000..912f6ed5b5 --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/capi/AggregateFunction.java @@ -0,0 +1,138 @@ +/* +** 2023-08-25 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file is part of the JNI bindings for the sqlite3 C API. +*/ +package org.sqlite.jni.capi; + + +/** + A SQLFunction implementation for aggregate functions. Its T is the + data type of its "accumulator" state, an instance of which is + intended to be be managed using the getAggregateState() and + takeAggregateState() methods. +*/ +public abstract class AggregateFunction implements SQLFunction { + + /** + As for the xStep() argument of the C API's + sqlite3_create_function(). If this function throws, the + exception is not propagated and a warning might be emitted to a + debugging channel. + */ + public abstract void xStep(sqlite3_context cx, sqlite3_value[] args); + + /** + As for the xFinal() argument of the C API's sqlite3_create_function(). + If this function throws, it is translated into an sqlite3_result_error(). + */ + public abstract void xFinal(sqlite3_context cx); + + /** + Optionally override to be notified when the UDF is finalized by + SQLite. + */ + public void xDestroy() {} + + /** + PerContextState assists aggregate and window functions in + managing their accumulator state across calls to the UDF's + callbacks. + +

    T must be of a type which can be legally stored as a value in + java.util.HashMap. + +

    If a given aggregate or window function is called multiple times + in a single SQL statement, e.g. SELECT MYFUNC(A), MYFUNC(B)..., + then the clients need some way of knowing which call is which so + that they can map their state between their various UDF callbacks + and reset it via xFinal(). This class takes care of such + mappings. + +

    This class works by mapping + sqlite3_context.getAggregateContext() to a single piece of + state, of a client-defined type (the T part of this class), which + persists across a "matching set" of the UDF's callbacks. + +

    This class is a helper providing commonly-needed functionality + - it is not required for use with aggregate or window functions. + Client UDFs are free to perform such mappings using custom + approaches. The provided {@link AggregateFunction} and {@link + WindowFunction} classes use this. + */ + public static final class PerContextState { + private final java.util.Map> map + = new java.util.HashMap<>(); + + /** + Should be called from a UDF's xStep(), xValue(), and xInverse() + methods, passing it that method's first argument and an initial + value for the persistent state. If there is currently no + mapping for the given context within the map, one is created + using the given initial value, else the existing one is used + and the 2nd argument is ignored. It returns a ValueHolder + which can be used to modify that state directly without + requiring that the client update the underlying map's entry. + +

    The caller is obligated to eventually call + takeAggregateState() to clear the mapping. + */ + public ValueHolder getAggregateState(sqlite3_context cx, T initialValue){ + final Long key = cx.getAggregateContext(true); + ValueHolder rc = null==key ? null : map.get(key); + if( null==rc ){ + map.put(key, rc = new ValueHolder<>(initialValue)); + } + return rc; + } + + /** + Should be called from a UDF's xFinal() method and passed that + method's first argument. This function removes the value + associated with cx.getAggregateContext() from the map and + returns it, returning null if no other UDF method has been + called to set up such a mapping. The latter condition will be + the case if a UDF is used in a statement which has no result + rows. + */ + public T takeAggregateState(sqlite3_context cx){ + final ValueHolder h = map.remove(cx.getAggregateContext(false)); + return null==h ? null : h.value; + } + } + + /** Per-invocation state for the UDF. */ + private final PerContextState map = new PerContextState<>(); + + /** + To be called from the implementation's xStep() method, as well + as the xValue() and xInverse() methods of the {@link WindowFunction} + subclass, to fetch the current per-call UDF state. On the + first call to this method for any given sqlite3_context + argument, the context is set to the given initial value. On all other + calls, the 2nd argument is ignored. + + @see AggregateFunction.PerContextState#getAggregateState + */ + protected final ValueHolder getAggregateState(sqlite3_context cx, T initialValue){ + return map.getAggregateState(cx, initialValue); + } + + /** + To be called from the implementation's xFinal() method to fetch + the final state of the UDF and remove its mapping. + + see AggregateFunction.PerContextState#takeAggregateState + */ + protected final T takeAggregateState(sqlite3_context cx){ + return map.takeAggregateState(cx); + } +} diff --git a/ext/jni/src/org/sqlite/jni/capi/AuthorizerCallback.java b/ext/jni/src/org/sqlite/jni/capi/AuthorizerCallback.java new file mode 100644 index 0000000000..298e3a5900 --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/capi/AuthorizerCallback.java @@ -0,0 +1,29 @@ +/* +** 2023-08-25 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file is part of the JNI bindings for the sqlite3 C API. +*/ +package org.sqlite.jni.capi; +import org.sqlite.jni.annotation.*; + +/** + Callback for use with {@link CApi#sqlite3_set_authorizer}. +*/ +public interface AuthorizerCallback extends CallbackProxy { + /** + Must function as described for the C-level + sqlite3_set_authorizer() callback. If it throws, the error is + converted to a db-level error and the exception is suppressed. + */ + int call(int opId, @Nullable String s1, @Nullable String s2, + @Nullable String s3, @Nullable String s4); + +} diff --git a/ext/jni/src/org/sqlite/jni/capi/AutoExtensionCallback.java b/ext/jni/src/org/sqlite/jni/capi/AutoExtensionCallback.java new file mode 100644 index 0000000000..7a54132d29 --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/capi/AutoExtensionCallback.java @@ -0,0 +1,40 @@ +/* +** 2023-08-25 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file is part of the JNI bindings for the sqlite3 C API. +*/ +package org.sqlite.jni.capi; + +/** + Callback for use with the {@link CApi#sqlite3_auto_extension} + family of APIs. +*/ +public interface AutoExtensionCallback extends CallbackProxy { + /** + Must function as described for a C-level + sqlite3_auto_extension() callback. + +

    This callback may throw and the exception's error message will + be set as the db's error string. + +

    Tips for implementations: + +

    - Opening a database from an auto-extension handler will lead to + an endless recursion of the auto-handler triggering itself + indirectly for each newly-opened database. + +

    - If this routine is stateful, it may be useful to make the + overridden method synchronized. + +

    - Results are undefined if the given db is closed by an auto-extension. + */ + int call(sqlite3 db); +} diff --git a/ext/jni/src/org/sqlite/jni/capi/BusyHandlerCallback.java b/ext/jni/src/org/sqlite/jni/capi/BusyHandlerCallback.java new file mode 100644 index 0000000000..00223f0b66 --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/capi/BusyHandlerCallback.java @@ -0,0 +1,26 @@ +/* +** 2023-08-25 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file is part of the JNI bindings for the sqlite3 C API. +*/ +package org.sqlite.jni.capi; + +/** + Callback for use with {@link CApi#sqlite3_busy_handler}. +*/ +public interface BusyHandlerCallback extends CallbackProxy { + /** + Must function as documented for the C-level + sqlite3_busy_handler() callback argument, minus the (void*) + argument the C-level function requires. + */ + int call(int n); +} diff --git a/ext/jni/src/org/sqlite/jni/capi/CApi.java b/ext/jni/src/org/sqlite/jni/capi/CApi.java new file mode 100644 index 0000000000..0b840c3623 --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/capi/CApi.java @@ -0,0 +1,2897 @@ +/* +** 2023-07-21 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file declares the main JNI bindings for the sqlite3 C API. +*/ +package org.sqlite.jni.capi; +import java.util.Arrays; +import java.nio.charset.StandardCharsets; +import org.sqlite.jni.annotation.*; + +/** + This class contains the entire C-style sqlite3 JNI API binding, + minus a few bits and pieces declared in other files. For client-side + use, a static import is recommended: + +

    {@code
    +  import static org.sqlite.jni.capi.CApi.*;
    +  }
    + +

    The C-side part can be found in sqlite3-jni.c. + +

    Only functions which materially differ from their C counterparts + are documented here, and only those material differences are + documented. The C documentation is otherwise applicable for these + APIs: + +

    https://sqlite.org/c3ref/intro.html + +

    A handful of Java-specific APIs have been added which are + documented here. A number of convenience overloads are provided + which are not documented but whose semantics map 1-to-1 in an + intuitive manner. e.g. {@link + #sqlite3_result_set(sqlite3_context,int)} is equivalent to {@link + #sqlite3_result_int}, and sqlite3_result_set() has many + type-specific overloads. + +

    Notes regarding Java's Modified UTF-8 vs standard UTF-8: + +

    SQLite internally uses UTF-8 encoding, whereas Java natively uses + UTF-16. Java JNI has routines for converting to and from UTF-8, + but JNI uses what its docs call modified UTF-8 (see links below) + Care must be taken when converting Java strings to or from standard + UTF-8 to ensure that the proper conversion is performed. In short, + Java's {@code String.getBytes(StandardCharsets.UTF_8)} performs the proper + conversion in Java, and there are no JNI C APIs for that conversion + (JNI's {@code NewStringUTF()} requires its input to be in MUTF-8). + +

    The known consequences and limitations this discrepancy places on + the SQLite3 JNI binding include: + +

      + +
    • C functions which take C-style strings without a length argument + require special care when taking input from Java. In particular, + Java strings converted to byte arrays for encoding purposes are not + NUL-terminated, and conversion to a Java byte array must sometimes + be careful to add one. Functions which take a length do not require + this so long as the length is provided. Search the CApi class + for "\0" for examples. + +
    + +

    Further reading: + +

    https://stackoverflow.com/questions/57419723 +

    https://stackoverflow.com/questions/7921016 +

    https://itecnote.com/tecnote/java-getting-true-utf-8-characters-in-java-jni/ +

    https://docs.oracle.com/javase/8/docs/api/java/lang/Character.html#unicode +

    https://docs.oracle.com/javase/8/docs/api/java/io/DataInput.html#modified-utf-8 + +*/ +public final class CApi { + static { + System.loadLibrary("sqlite3-jni"); + } + //! Not used + private CApi(){} + //! Called from static init code. + private static native void init(); + + /** + Returns a nul-terminated copy of s as a UTF-8-encoded byte array, + or null if s is null. + */ + private static byte[] nulTerminateUtf8(String s){ + return null==s ? null : (s+"\0").getBytes(StandardCharsets.UTF_8); + } + + /** + Each thread which uses the SQLite3 JNI APIs should call + sqlite3_jni_uncache_thread() when it is done with the library - + either right before it terminates or when it finishes using the + SQLite API. This will clean up any cached per-thread info. + +

    This process does not close any databases or finalize + any prepared statements because their ownership does not depend on + a given thread. For proper library behavior, and to + avoid C-side leaks, be sure to finalize all statements and close + all databases before calling this function. + +

    Calling this from the main application thread is not strictly + required. Additional threads must call this before ending or they + will leak cache entries in the C heap, which in turn may keep + numerous Java-side global references active. + +

    This routine returns false without side effects if the current + JNIEnv is not cached, else returns true, but this information is + primarily for testing of the JNI bindings and is not information + which client-level code can use to make any informed + decisions. Its return type and semantics are not considered + stable and may change at any time. + */ + public static native boolean sqlite3_java_uncache_thread(); + + /** + Returns true if this JVM has JNI-level support for C-level direct + memory access using java.nio.ByteBuffer, else returns false. + */ + @Experimental + public static native boolean sqlite3_jni_supports_nio(); + + /** + For internal use only. Sets the given db's error code and + (optionally) string. If rc is 0, it defaults to SQLITE_ERROR. + + On success it returns rc. On error it may return a more serious + code, such as SQLITE_NOMEM. Returns SQLITE_MISUSE if db is null. + */ + static native int sqlite3_jni_db_error(@NotNull sqlite3 db, + int rc, @Nullable String msg); + + /** + Convenience overload which uses e.toString() as the error + message. + */ + static int sqlite3_jni_db_error(@NotNull sqlite3 db, + int rc, @NotNull Exception e){ + return sqlite3_jni_db_error(db, rc, e.toString()); + } + + ////////////////////////////////////////////////////////////////////// + // Maintenance reminder: please keep the sqlite3_.... functions + // alphabetized. The SQLITE_... values. on the other hand, are + // grouped by category. + + /** + Functions exactly like the native form except that (A) the 2nd + argument is a boolean instead of an int and (B) the returned + value is not a pointer address and is only intended for use as a + per-UDF-call lookup key in a higher-level data structure. + +

    Passing a true second argument is analogous to passing some + unspecified small, non-0 positive value to the C API and passing + false is equivalent to passing 0 to the C API. + +

    Like the C API, it returns 0 if allocation fails or if + initialize is false and no prior aggregate context was allocated + for cx. If initialize is true then it returns 0 only on + allocation error. In all cases, 0 is considered the sentinel + "not a key" value. + */ + public static native long sqlite3_aggregate_context(sqlite3_context cx, boolean initialize); + + /** + Functions almost as documented for the C API, with these + exceptions: + +

    - The callback interface is shorter because of + cross-language differences. Specifically, 3rd argument to the C + auto-extension callback interface is unnecessary here. + +

    The C API docs do not specifically say so, but if the list of + auto-extensions is manipulated from an auto-extension, it is + undefined which, if any, auto-extensions will subsequently + execute for the current database. That is, doing so will result + in unpredictable, but not undefined, behavior. + +

    See the AutoExtension class docs for more information. + */ + public static native int sqlite3_auto_extension(@NotNull AutoExtensionCallback callback); + + private static native int sqlite3_backup_finish(@NotNull long ptrToBackup); + + public static int sqlite3_backup_finish(@NotNull sqlite3_backup b){ + return null==b ? 0 : sqlite3_backup_finish(b.clearNativePointer()); + } + + private static native sqlite3_backup sqlite3_backup_init( + @NotNull long ptrToDbDest, @NotNull String destSchemaName, + @NotNull long ptrToDbSrc, @NotNull String srcSchemaName + ); + + public static sqlite3_backup sqlite3_backup_init( + @NotNull sqlite3 dbDest, @NotNull String destSchemaName, + @NotNull sqlite3 dbSrc, @NotNull String srcSchemaName + ){ + return sqlite3_backup_init( dbDest.getNativePointer(), destSchemaName, + dbSrc.getNativePointer(), srcSchemaName ); + } + + private static native int sqlite3_backup_pagecount(@NotNull long ptrToBackup); + + public static int sqlite3_backup_pagecount(@NotNull sqlite3_backup b){ + return sqlite3_backup_pagecount(b.getNativePointer()); + } + + private static native int sqlite3_backup_remaining(@NotNull long ptrToBackup); + + public static int sqlite3_backup_remaining(@NotNull sqlite3_backup b){ + return sqlite3_backup_remaining(b.getNativePointer()); + } + + private static native int sqlite3_backup_step(@NotNull long ptrToBackup, int nPage); + + public static int sqlite3_backup_step(@NotNull sqlite3_backup b, int nPage){ + return sqlite3_backup_step(b.getNativePointer(), nPage); + } + + private static native int sqlite3_bind_blob( + @NotNull long ptrToStmt, int ndx, @Nullable byte[] data, int n + ); + + /** + If n is negative, SQLITE_MISUSE is returned. If n>data.length + then n is silently truncated to data.length. + */ + public static int sqlite3_bind_blob( + @NotNull sqlite3_stmt stmt, int ndx, @Nullable byte[] data, int n + ){ + return sqlite3_bind_blob(stmt.getNativePointer(), ndx, data, n); + } + + public static int sqlite3_bind_blob( + @NotNull sqlite3_stmt stmt, int ndx, @Nullable byte[] data + ){ + return (null==data) + ? sqlite3_bind_null(stmt.getNativePointer(), ndx) + : sqlite3_bind_blob(stmt.getNativePointer(), ndx, data, data.length); + } + + /** + Convenience overload which is a simple proxy for + sqlite3_bind_nio_buffer(). + */ + @Experimental + /*public*/ static int sqlite3_bind_blob( + @NotNull sqlite3_stmt stmt, int ndx, @Nullable java.nio.ByteBuffer data, + int begin, int n + ){ + return sqlite3_bind_nio_buffer(stmt, ndx, data, begin, n); + } + + /** + Convenience overload which is equivalent to passing its arguments + to sqlite3_bind_nio_buffer() with the values 0 and -1 for the + final two arguments. + */ + @Experimental + /*public*/ static int sqlite3_bind_blob( + @NotNull sqlite3_stmt stmt, int ndx, @Nullable java.nio.ByteBuffer data + ){ + return sqlite3_bind_nio_buffer(stmt, ndx, data, 0, -1); + } + + private static native int sqlite3_bind_double( + @NotNull long ptrToStmt, int ndx, double v + ); + + public static int sqlite3_bind_double( + @NotNull sqlite3_stmt stmt, int ndx, double v + ){ + return sqlite3_bind_double(stmt.getNativePointer(), ndx, v); + } + + private static native int sqlite3_bind_int( + @NotNull long ptrToStmt, int ndx, int v + ); + + public static int sqlite3_bind_int( + @NotNull sqlite3_stmt stmt, int ndx, int v + ){ + return sqlite3_bind_int(stmt.getNativePointer(), ndx, v); + } + + private static native int sqlite3_bind_int64( + @NotNull long ptrToStmt, int ndx, long v + ); + + public static int sqlite3_bind_int64(@NotNull sqlite3_stmt stmt, int ndx, long v){ + return sqlite3_bind_int64( stmt.getNativePointer(), ndx, v ); + } + + private static native int sqlite3_bind_java_object( + @NotNull long ptrToStmt, int ndx, @Nullable Object o + ); + + /** + Binds the contents of the given buffer object as a blob. + + The byte range of the buffer may be restricted by providing a + start index and a number of bytes. beginPos may not be negative. + Negative howMany is interpreted as the remainder of the buffer + past the given start position, up to the buffer's limit() (as + opposed its capacity()). + + If beginPos+howMany would extend past the limit() of the buffer + then SQLITE_ERROR is returned. + + If any of the following are true, this function behaves like + sqlite3_bind_null(): the buffer is null, beginPos is at or past + the end of the buffer, howMany is 0, or the calculated slice of + the blob has a length of 0. + + If ndx is out of range, it returns SQLITE_RANGE, as documented + for sqlite3_bind_blob(). If beginPos is negative or if + sqlite3_jni_supports_nio() returns false then SQLITE_MISUSE is + returned. Note that this function is bound (as it were) by the + SQLITE_LIMIT_LENGTH constraint and SQLITE_TOOBIG is returned if + the resulting slice of the buffer exceeds that limit. + + This function does not modify the buffer's streaming-related + cursors. + + If the buffer is modified in a separate thread while this + operation is running, results are undefined and will likely + result in corruption of the bound data or a segmentation fault. + + Design note: this function should arguably take a java.nio.Buffer + instead of ByteBuffer, but it can only operate on "direct" + buffers and the only such class offered by Java is (apparently) + ByteBuffer. + + @see https://docs.oracle.com/javase/8/docs/api/java/nio/Buffer.html + */ + @Experimental + /*public*/ static native int sqlite3_bind_nio_buffer( + @NotNull sqlite3_stmt stmt, int ndx, @Nullable java.nio.ByteBuffer data, + int beginPos, int howMany + ); + + /** + Convenience overload which binds the given buffer's entire + contents, up to its limit() (as opposed to its capacity()). + */ + @Experimental + /*public*/ static int sqlite3_bind_nio_buffer( + @NotNull sqlite3_stmt stmt, int ndx, @Nullable java.nio.ByteBuffer data + ){ + return sqlite3_bind_nio_buffer(stmt, ndx, data, 0, -1); + } + + /** + Binds the given object at the given index. If o is null then this behaves like + sqlite3_bind_null(). + + @see #sqlite3_result_java_object + */ + public static int sqlite3_bind_java_object( + @NotNull sqlite3_stmt stmt, int ndx, @Nullable Object o + ){ + return sqlite3_bind_java_object(stmt.getNativePointer(), ndx, o); + } + + private static native int sqlite3_bind_null(@NotNull long ptrToStmt, int ndx); + + public static int sqlite3_bind_null(@NotNull sqlite3_stmt stmt, int ndx){ + return sqlite3_bind_null(stmt.getNativePointer(), ndx); + } + + private static native int sqlite3_bind_parameter_count(@NotNull long ptrToStmt); + + public static int sqlite3_bind_parameter_count(@NotNull sqlite3_stmt stmt){ + return sqlite3_bind_parameter_count(stmt.getNativePointer()); + } + + /** + Requires that paramName be a NUL-terminated UTF-8 string. + + This overload is private because: (A) to keep users from + inadvertently passing non-NUL-terminated byte arrays (an easy + thing to do). (B) it is cheaper to NUL-terminate the + String-to-byte-array conversion in the public-facing Java-side + overload than to do that in C, so that signature is the + public-facing one. + */ + private static native int sqlite3_bind_parameter_index( + @NotNull long ptrToStmt, @NotNull byte[] paramName + ); + + public static int sqlite3_bind_parameter_index( + @NotNull sqlite3_stmt stmt, @NotNull String paramName + ){ + final byte[] utf8 = nulTerminateUtf8(paramName); + return null==utf8 ? 0 : sqlite3_bind_parameter_index(stmt.getNativePointer(), utf8); + } + + private static native String sqlite3_bind_parameter_name( + @NotNull long ptrToStmt, int index + ); + + public static String sqlite3_bind_parameter_name(@NotNull sqlite3_stmt stmt, int index){ + return sqlite3_bind_parameter_name(stmt.getNativePointer(), index); + } + + private static native int sqlite3_bind_text( + @NotNull long ptrToStmt, int ndx, @Nullable byte[] utf8, int maxBytes + ); + + /** + Works like the C-level sqlite3_bind_text() but assumes + SQLITE_TRANSIENT for the final C API parameter. The byte array is + assumed to be in UTF-8 encoding. + +

    If data is not null and maxBytes>utf8.length then maxBytes is + silently truncated to utf8.length. If maxBytes is negative then + results are undefined if data is not null and does not contain a + NUL byte. + */ + static int sqlite3_bind_text( + @NotNull sqlite3_stmt stmt, int ndx, @Nullable byte[] utf8, int maxBytes + ){ + return sqlite3_bind_text(stmt.getNativePointer(), ndx, utf8, maxBytes); + } + + /** + Converts data, if not null, to a UTF-8-encoded byte array and + binds it as such, returning the result of the C-level + sqlite3_bind_null() or sqlite3_bind_text(). + */ + public static int sqlite3_bind_text( + @NotNull sqlite3_stmt stmt, int ndx, @Nullable String data + ){ + if( null==data ) return sqlite3_bind_null(stmt.getNativePointer(), ndx); + final byte[] utf8 = data.getBytes(StandardCharsets.UTF_8); + return sqlite3_bind_text(stmt.getNativePointer(), ndx, utf8, utf8.length); + } + + /** + Requires that utf8 be null or in UTF-8 encoding. + */ + public static int sqlite3_bind_text( + @NotNull sqlite3_stmt stmt, int ndx, @Nullable byte[] utf8 + ){ + return ( null==utf8 ) + ? sqlite3_bind_null(stmt.getNativePointer(), ndx) + : sqlite3_bind_text(stmt.getNativePointer(), ndx, utf8, utf8.length); + } + + private static native int sqlite3_bind_text16( + @NotNull long ptrToStmt, int ndx, @Nullable byte[] data, int maxBytes + ); + + /** + Identical to the sqlite3_bind_text() overload with the same + signature but requires that its input be encoded in UTF-16 in + platform byte order. + */ + static int sqlite3_bind_text16( + @NotNull sqlite3_stmt stmt, int ndx, @Nullable byte[] data, int maxBytes + ){ + return sqlite3_bind_text16(stmt.getNativePointer(), ndx, data, maxBytes); + } + + /** + Converts its string argument to UTF-16 and binds it as such, returning + the result of the C-side function of the same name. The 3rd argument + may be null. + */ + public static int sqlite3_bind_text16( + @NotNull sqlite3_stmt stmt, int ndx, @Nullable String data + ){ + if(null == data) return sqlite3_bind_null(stmt, ndx); + final byte[] bytes = data.getBytes(StandardCharsets.UTF_16); + return sqlite3_bind_text16(stmt.getNativePointer(), ndx, bytes, bytes.length); + } + + /** + Requires that data be null or in UTF-16 encoding in platform byte + order. Returns the result of the C-level sqlite3_bind_null() or + sqlite3_bind_text16(). + */ + public static int sqlite3_bind_text16( + @NotNull sqlite3_stmt stmt, int ndx, @Nullable byte[] data + ){ + return (null == data) + ? sqlite3_bind_null(stmt.getNativePointer(), ndx) + : sqlite3_bind_text16(stmt.getNativePointer(), ndx, data, data.length); + } + + private static native int sqlite3_bind_value(@NotNull long ptrToStmt, int ndx, long ptrToValue); + + /** + Functions like the C-level sqlite3_bind_value(), or + sqlite3_bind_null() if val is null. + */ + public static int sqlite3_bind_value(@NotNull sqlite3_stmt stmt, int ndx, sqlite3_value val){ + return sqlite3_bind_value(stmt.getNativePointer(), ndx, + null==val ? 0L : val.getNativePointer()); + } + + private static native int sqlite3_bind_zeroblob(@NotNull long ptrToStmt, int ndx, int n); + + public static int sqlite3_bind_zeroblob(@NotNull sqlite3_stmt stmt, int ndx, int n){ + return sqlite3_bind_zeroblob(stmt.getNativePointer(), ndx, n); + } + + private static native int sqlite3_bind_zeroblob64( + @NotNull long ptrToStmt, int ndx, long n + ); + + public static int sqlite3_bind_zeroblob64(@NotNull sqlite3_stmt stmt, int ndx, long n){ + return sqlite3_bind_zeroblob64(stmt.getNativePointer(), ndx, n); + } + + private static native int sqlite3_blob_bytes(@NotNull long ptrToBlob); + + public static int sqlite3_blob_bytes(@NotNull sqlite3_blob blob){ + return sqlite3_blob_bytes(blob.getNativePointer()); + } + + private static native int sqlite3_blob_close(@Nullable long ptrToBlob); + + public static int sqlite3_blob_close(@Nullable sqlite3_blob blob){ + return null==blob ? 0 : sqlite3_blob_close(blob.clearNativePointer()); + } + + private static native int sqlite3_blob_open( + @NotNull long ptrToDb, @NotNull String dbName, + @NotNull String tableName, @NotNull String columnName, + long iRow, int flags, @NotNull OutputPointer.sqlite3_blob out + ); + + public static int sqlite3_blob_open( + @NotNull sqlite3 db, @NotNull String dbName, + @NotNull String tableName, @NotNull String columnName, + long iRow, int flags, @NotNull OutputPointer.sqlite3_blob out + ){ + return sqlite3_blob_open(db.getNativePointer(), dbName, tableName, + columnName, iRow, flags, out); + } + + /** + Convenience overload. + */ + public static sqlite3_blob sqlite3_blob_open( + @NotNull sqlite3 db, @NotNull String dbName, + @NotNull String tableName, @NotNull String columnName, + long iRow, int flags ){ + final OutputPointer.sqlite3_blob out = new OutputPointer.sqlite3_blob(); + sqlite3_blob_open(db.getNativePointer(), dbName, tableName, columnName, + iRow, flags, out); + return out.take(); + } + + private static native int sqlite3_blob_read( + @NotNull long ptrToBlob, @NotNull byte[] target, int srcOffset + ); + + /** + As per C's sqlite3_blob_read(), but writes its output to the + given byte array. Note that the final argument is the offset of + the source buffer, not the target array. + */ + public static int sqlite3_blob_read( + @NotNull sqlite3_blob src, @NotNull byte[] target, int srcOffset + ){ + return sqlite3_blob_read(src.getNativePointer(), target, srcOffset); + } + + /** + An internal level of indirection. + */ + @Experimental + private static native int sqlite3_blob_read_nio_buffer( + @NotNull long ptrToBlob, int srcOffset, + @NotNull java.nio.ByteBuffer tgt, int tgtOffset, int howMany + ); + + /** + Reads howMany bytes from offset srcOffset of src into position + tgtOffset of tgt. + + Returns SQLITE_MISUSE if src is null, tgt is null, or + sqlite3_jni_supports_nio() returns false. Returns SQLITE_ERROR if + howMany or either offset are negative. If argument validation + succeeds, it returns the result of the underlying call to + sqlite3_blob_read() (0 on success). + */ + @Experimental + /*public*/ static int sqlite3_blob_read_nio_buffer( + @NotNull sqlite3_blob src, int srcOffset, + @NotNull java.nio.ByteBuffer tgt, int tgtOffset, int howMany + ){ + return (JNI_SUPPORTS_NIO && src!=null && tgt!=null) + ? sqlite3_blob_read_nio_buffer( + src.getNativePointer(), srcOffset, tgt, tgtOffset, howMany + ) + : SQLITE_MISUSE; + } + + /** + Convenience overload which reads howMany bytes from position + srcOffset of src and returns the result as a new ByteBuffer. + + srcOffset may not be negative. If howMany is negative, it is + treated as all bytes following srcOffset. + + Returns null if sqlite3_jni_supports_nio(), any arguments are + invalid, if the number of bytes to read is 0 or is larger than + the src blob, or the underlying call to sqlite3_blob_read() fails + for any reason. + */ + @Experimental + /*public*/ static java.nio.ByteBuffer sqlite3_blob_read_nio_buffer( + @NotNull sqlite3_blob src, int srcOffset, int howMany + ){ + if( !JNI_SUPPORTS_NIO || src==null ) return null; + else if( srcOffset<0 ) return null; + final int nB = sqlite3_blob_bytes(src); + if( srcOffset>=nB ) return null; + else if( howMany<0 ) howMany = nB - srcOffset; + if( srcOffset + howMany > nB ) return null; + final java.nio.ByteBuffer tgt = + java.nio.ByteBuffer.allocateDirect(howMany); + final int rc = sqlite3_blob_read_nio_buffer( + src.getNativePointer(), srcOffset, tgt, 0, howMany + ); + return 0==rc ? tgt : null; + } + + /** + Overload alias for sqlite3_blob_read_nio_buffer(). + */ + @Experimental + /*public*/ static int sqlite3_blob_read( + @NotNull sqlite3_blob src, int srcOffset, + @NotNull java.nio.ByteBuffer tgt, + int tgtOffset, int howMany + ){ + return sqlite3_blob_read_nio_buffer( + src, srcOffset, tgt, tgtOffset, howMany + ); + } + + /** + Convenience overload which uses 0 for both src and tgt offsets + and reads a number of bytes equal to the smaller of + sqlite3_blob_bytes(src) and tgt.limit(). + + On success it sets tgt.limit() to the number of bytes read. On + error, tgt.limit() is not modified. + + Returns 0 on success. Returns SQLITE_MISUSE is either argument is + null or sqlite3_jni_supports_nio() returns false. Else it returns + the result of the underlying call to sqlite3_blob_read(). + */ + @Experimental + /*public*/ static int sqlite3_blob_read( + @NotNull sqlite3_blob src, + @NotNull java.nio.ByteBuffer tgt + ){ + if(!JNI_SUPPORTS_NIO || src==null || tgt==null) return SQLITE_MISUSE; + final int nSrc = sqlite3_blob_bytes(src); + final int nTgt = tgt.limit(); + final int nRead = nTgt T sqlite3_column_java_object( + @NotNull sqlite3_stmt stmt, int ndx, @NotNull Class type + ){ + final Object o = sqlite3_column_java_object(stmt, ndx); + return type.isInstance(o) ? (T)o : null; + } + + private static native String sqlite3_column_name(@NotNull long ptrToStmt, int ndx); + + public static String sqlite3_column_name(@NotNull sqlite3_stmt stmt, int ndx){ + return sqlite3_column_name(stmt.getNativePointer(), ndx); + } + + /** + A variant of sqlite3_column_blob() which returns the blob as a + ByteBuffer object. Returns null if its argument is null, if + sqlite3_jni_supports_nio() is false, or if sqlite3_column_blob() + would return null for the same inputs. + */ + @Experimental + /*public*/ static native java.nio.ByteBuffer sqlite3_column_nio_buffer( + @NotNull sqlite3_stmt stmt, int ndx + ); + + private static native String sqlite3_column_origin_name(@NotNull long ptrToStmt, int ndx); + + /** + Only available if built with SQLITE_ENABLE_COLUMN_METADATA. + */ + public static String sqlite3_column_origin_name(@NotNull sqlite3_stmt stmt, int ndx){ + return sqlite3_column_origin_name(stmt.getNativePointer(), ndx); + } + + private static native String sqlite3_column_table_name(@NotNull long ptrToStmt, int ndx); + + /** + Only available if built with SQLITE_ENABLE_COLUMN_METADATA. + */ + public static String sqlite3_column_table_name(@NotNull sqlite3_stmt stmt, int ndx){ + return sqlite3_column_table_name(stmt.getNativePointer(), ndx); + } + + /** + Functions identially to the C API, and this note is just to + stress that the returned bytes are encoded as UTF-8. It returns + null if the underlying C-level sqlite3_column_text() returns NULL + or on allocation error. + + @see #sqlite3_column_text16(sqlite3_stmt,int) + */ + public static native byte[] sqlite3_column_text( + @NotNull sqlite3_stmt stmt, int ndx + ); + + public static native String sqlite3_column_text16( + @NotNull sqlite3_stmt stmt, int ndx + ); + + // The real utility of this function is questionable. + // /** + // Returns a Java value representation based on the value of + // sqlite_value_type(). For integer types it returns either Integer + // or Long, depending on whether the value will fit in an + // Integer. For floating-point values it always returns type Double. + + // If the column was bound using sqlite3_result_java_object() then + // that value, as an Object, is returned. + // */ + // public static Object sqlite3_column_to_java(@NotNull sqlite3_stmt stmt, + // int ndx){ + // sqlite3_value v = sqlite3_column_value(stmt, ndx); + // Object rv = null; + // if(null == v) return v; + // v = sqlite3_value_dup(v)/*need a protected value*/; + // if(null == v) return v /* OOM error in C */; + // if(112/* 'p' */ == sqlite3_value_subtype(v)){ + // rv = sqlite3_value_java_object(v); + // }else{ + // switch(sqlite3_value_type(v)){ + // case SQLITE_INTEGER: { + // final long i = sqlite3_value_int64(v); + // rv = (i<=0x7fffffff && i>=-0x7fffffff-1) + // ? new Integer((int)i) : new Long(i); + // break; + // } + // case SQLITE_FLOAT: rv = new Double(sqlite3_value_double(v)); break; + // case SQLITE_BLOB: rv = sqlite3_value_blob(v); break; + // case SQLITE_TEXT: rv = sqlite3_value_text16(v); break; + // default: break; + // } + // } + // sqlite3_value_free(v); + // return rv; + // } + + private static native int sqlite3_column_type(@NotNull long ptrToStmt, int ndx); + + public static int sqlite3_column_type(@NotNull sqlite3_stmt stmt, int ndx){ + return sqlite3_column_type(stmt.getNativePointer(), ndx); + } + + public static native sqlite3_value sqlite3_column_value( + @NotNull sqlite3_stmt stmt, int ndx + ); + + private static native int sqlite3_collation_needed( + @NotNull long ptrToDb, @Nullable CollationNeededCallback callback + ); + + /** + This functions like C's sqlite3_collation_needed16() because + Java's string type is inherently compatible with that interface. + */ + public static int sqlite3_collation_needed( + @NotNull sqlite3 db, @Nullable CollationNeededCallback callback + ){ + return sqlite3_collation_needed(db.getNativePointer(), callback); + } + + private static native CommitHookCallback sqlite3_commit_hook( + @NotNull long ptrToDb, @Nullable CommitHookCallback hook + ); + + public static CommitHookCallback sqlite3_commit_hook( + @NotNull sqlite3 db, @Nullable CommitHookCallback hook + ){ + return sqlite3_commit_hook(db.getNativePointer(), hook); + } + + public static native String sqlite3_compileoption_get(int n); + + public static native boolean sqlite3_compileoption_used(String optName); + + /** + This implementation is private because it's too easy to pass it + non-NUL-terminated byte arrays from client code. + */ + private static native int sqlite3_complete( + @NotNull byte[] nulTerminatedUtf8Sql + ); + + /** + Unlike the C API, this returns SQLITE_MISUSE if its argument is + null (as opposed to invoking UB). + */ + public static int sqlite3_complete(@NotNull String sql){ + return sqlite3_complete( nulTerminateUtf8(sql) ); + } + + /** + Internal level of indirection for sqlite3_config(int). + */ + private static native int sqlite3_config__enable(int op); + + /** + Internal level of indirection for sqlite3_config(ConfigLogCallback). + */ + private static native int sqlite3_config__CONFIG_LOG( + @Nullable ConfigLogCallback logger + ); + + /** + Internal level of indirection for sqlite3_config(ConfigSqlLogCallback). + */ + private static native int sqlite3_config__SQLLOG( + @Nullable ConfigSqlLogCallback logger + ); + + /** +

    Works like in the C API with the exception that it only supports + the following subset of configuration flags: + +

    SQLITE_CONFIG_SINGLETHREAD + SQLITE_CONFIG_MULTITHREAD + SQLITE_CONFIG_SERIALIZED + +

    Others may be added in the future. It returns SQLITE_MISUSE if + given an argument it does not handle. + +

    Note that sqlite3_config() is not threadsafe with regards to + the rest of the library. This must not be called when any other + library APIs are being called. + */ + public static int sqlite3_config(int op){ + return sqlite3_config__enable(op); + } + + /** + If the native library was built with SQLITE_ENABLE_SQLLOG defined + then this acts as a proxy for C's + sqlite3_config(SQLITE_CONFIG_SQLLOG,...). This sets or clears the + logger. If installation of a logger fails, any previous logger is + retained. + +

    If not built with SQLITE_ENABLE_SQLLOG defined, this returns + SQLITE_MISUSE. + +

    Note that sqlite3_config() is not threadsafe with regards to + the rest of the library. This must not be called when any other + library APIs are being called. + */ + public static int sqlite3_config( @Nullable ConfigSqlLogCallback logger ){ + return sqlite3_config__SQLLOG(logger); + } + + /** + The sqlite3_config() overload for handling the SQLITE_CONFIG_LOG + option. + */ + public static int sqlite3_config( @Nullable ConfigLogCallback logger ){ + return sqlite3_config__CONFIG_LOG(logger); + } + + /** + Unlike the C API, this returns null if its argument is + null (as opposed to invoking UB). + */ + public static native sqlite3 sqlite3_context_db_handle( + @NotNull sqlite3_context cx + ); + + public static native int sqlite3_create_collation( + @NotNull sqlite3 db, @NotNull String name, int eTextRep, + @NotNull CollationCallback col + ); + + /** + The Java counterpart to the C-native sqlite3_create_function(), + sqlite3_create_function_v2(), and + sqlite3_create_window_function(). Which one it behaves like + depends on which methods the final argument implements. See + SQLFunction's subclasses (ScalarFunction, AggregateFunction, + and WindowFunction) for details. + +

    Unlike the C API, this returns SQLITE_MISUSE null if its db or + functionName arguments are null (as opposed to invoking UB). + */ + public static native int sqlite3_create_function( + @NotNull sqlite3 db, @NotNull String functionName, + int nArg, int eTextRep, @NotNull SQLFunction func + ); + + private static native int sqlite3_data_count(@NotNull long ptrToStmt); + + public static int sqlite3_data_count(@NotNull sqlite3_stmt stmt){ + return sqlite3_data_count(stmt.getNativePointer()); + } + + /** + Overload for sqlite3_db_config() calls which take (int,int*) + variadic arguments. Returns SQLITE_MISUSE if op is not one of the + SQLITE_DBCONFIG_... options which uses this call form. + +

    Unlike the C API, this returns SQLITE_MISUSE if its db argument + is null (as opposed to invoking UB). + */ + public static native int sqlite3_db_config( + @NotNull sqlite3 db, int op, int onOff, @Nullable OutputPointer.Int32 out + ); + + /** + Overload for sqlite3_db_config() calls which take a (const char*) + variadic argument. As of SQLite3 v3.43 the only such option is + SQLITE_DBCONFIG_MAINDBNAME. Returns SQLITE_MISUSE if op is not + SQLITE_DBCONFIG_MAINDBNAME, but that set of options may be + extended in future versions. + */ + public static native int sqlite3_db_config( + @NotNull sqlite3 db, int op, @NotNull String val + ); + + private static native String sqlite3_db_name(@NotNull long ptrToDb, int ndx); + + public static String sqlite3_db_name(@NotNull sqlite3 db, int ndx){ + return null==db ? null : sqlite3_db_name(db.getNativePointer(), ndx); + } + + public static native String sqlite3_db_filename( + @NotNull sqlite3 db, @NotNull String dbName + ); + + public static native sqlite3 sqlite3_db_handle(@NotNull sqlite3_stmt stmt); + + public static native int sqlite3_db_readonly(@NotNull sqlite3 db, String dbName); + + public static native int sqlite3_db_release_memory(sqlite3 db); + + public static native int sqlite3_db_status( + @NotNull sqlite3 db, int op, @NotNull OutputPointer.Int32 pCurrent, + @NotNull OutputPointer.Int32 pHighwater, boolean reset + ); + + public static native int sqlite3_errcode(@NotNull sqlite3 db); + + public static native String sqlite3_errmsg(@NotNull sqlite3 db); + + /** Added in 3.51.0. */ + public static native int sqlite3_set_errmsg(@NotNull sqlite3 db, + int resultCode, + String msg); + + private static native int sqlite3_error_offset(@NotNull long ptrToDb); + + /** + Caveat: the returned byte offset values assume UTF-8-encoded + inputs, so won't always match character offsets in Java Strings. + */ + public static int sqlite3_error_offset(@NotNull sqlite3 db){ + return sqlite3_error_offset(db.getNativePointer()); + } + + public static native String sqlite3_errstr(int resultCode); + + public static native String sqlite3_expanded_sql(@NotNull sqlite3_stmt stmt); + + private static native int sqlite3_extended_errcode(@NotNull long ptrToDb); + + public static int sqlite3_extended_errcode(@NotNull sqlite3 db){ + return sqlite3_extended_errcode(db.getNativePointer()); + } + + public static native int sqlite3_extended_result_codes( + @NotNull sqlite3 db, boolean on + ); + + private static native boolean sqlite3_get_autocommit(@NotNull long ptrToDb); + + public static boolean sqlite3_get_autocommit(@NotNull sqlite3 db){ + return sqlite3_get_autocommit(db.getNativePointer()); + } + + public static native Object sqlite3_get_auxdata( + @NotNull sqlite3_context cx, int n + ); + + private static native int sqlite3_finalize(long ptrToStmt); + + public static int sqlite3_finalize(@NotNull sqlite3_stmt stmt){ + return null==stmt ? 0 : sqlite3_finalize(stmt.clearNativePointer()); + } + + public static native int sqlite3_initialize(); + + public static native void sqlite3_interrupt(@NotNull sqlite3 db); + + public static native boolean sqlite3_is_interrupted(@NotNull sqlite3 db); + + public static native boolean sqlite3_keyword_check(@NotNull String word); + + public static native int sqlite3_keyword_count(); + + public static native String sqlite3_keyword_name(int index); + + + public static native long sqlite3_last_insert_rowid(@NotNull sqlite3 db); + + public static native String sqlite3_libversion(); + + public static native int sqlite3_libversion_number(); + + public static native int sqlite3_limit(@NotNull sqlite3 db, int id, int newVal); + + /** + Only available if built with SQLITE_ENABLE_NORMALIZE. If not, it always + returns null. + */ + public static native String sqlite3_normalized_sql(@NotNull sqlite3_stmt stmt); + + /** + Works like its C counterpart and makes the native pointer of the + underling (sqlite3*) object available via + ppDb.getNativePointer(). That pointer is necessary for looking up + the JNI-side native, but clients need not pay it any + heed. Passing the object to sqlite3_close() or sqlite3_close_v2() + will clear that pointer mapping. + +

    Recall that even if opening fails, the output pointer might be + non-null. Any error message about the failure will be in that + object and it is up to the caller to sqlite3_close() that + db handle. + */ + public static native int sqlite3_open( + @Nullable String filename, @NotNull OutputPointer.sqlite3 ppDb + ); + + /** + Convenience overload which returns its db handle directly. The returned + object might not have been successfully opened: use sqlite3_errcode() to + check whether it is in an error state. + +

    Ownership of the returned value is passed to the caller, who must eventually + pass it to sqlite3_close() or sqlite3_close_v2(). + */ + public static sqlite3 sqlite3_open(@Nullable String filename){ + final OutputPointer.sqlite3 out = new OutputPointer.sqlite3(); + sqlite3_open(filename, out); + return out.take(); + } + + public static native int sqlite3_open_v2( + @Nullable String filename, @NotNull OutputPointer.sqlite3 ppDb, + int flags, @Nullable String zVfs + ); + + /** + Has the same semantics as the sqlite3-returning sqlite3_open() + but uses sqlite3_open_v2() instead of sqlite3_open(). + */ + public static sqlite3 sqlite3_open_v2(@Nullable String filename, int flags, + @Nullable String zVfs){ + final OutputPointer.sqlite3 out = new OutputPointer.sqlite3(); + sqlite3_open_v2(filename, out, flags, zVfs); + return out.take(); + } + + /** + The sqlite3_prepare() family of functions require slightly + different signatures than their native counterparts, but (A) they + retain functionally equivalent semantics and (B) overloading + allows us to install several convenience forms. + +

    All of them which take their SQL in the form of a byte[] require + that it be in UTF-8 encoding unless explicitly noted otherwise. + +

    The forms which take a "tail" output pointer return (via that + output object) the index into their SQL byte array at which the + end of the first SQL statement processed by the call was + found. That's fundamentally how the C APIs work but making use of + that value requires more copying of the input SQL into + consecutively smaller arrays in order to consume all of + it. (There is an example of doing that in this project's Tester1 + class.) For that vast majority of uses, that capability is not + necessary, however, and overloads are provided which gloss over + that. + +

    Results are undefined if maxBytes>sqlUtf8.length. + +

    This routine is private because its maxBytes value is not + strictly necessary in the Java interface, as sqlUtf8.length tells + us the information we need. Making this public would give clients + more ways to shoot themselves in the foot without providing any + real utility. + */ + private static native int sqlite3_prepare( + @NotNull long ptrToDb, @NotNull byte[] sqlUtf8, int maxBytes, + @NotNull OutputPointer.sqlite3_stmt outStmt, + @Nullable OutputPointer.Int32 pTailOffset + ); + + /** + Works like the canonical sqlite3_prepare() but its "tail" output + argument is returned as the index offset into the given + UTF-8-encoded byte array at which SQL parsing stopped. The + semantics are otherwise identical to the C API counterpart. + +

    Several overloads provided simplified call signatures. + */ + public static int sqlite3_prepare( + @NotNull sqlite3 db, @NotNull byte[] sqlUtf8, + @NotNull OutputPointer.sqlite3_stmt outStmt, + @Nullable OutputPointer.Int32 pTailOffset + ){ + return sqlite3_prepare(db.getNativePointer(), sqlUtf8, sqlUtf8.length, + outStmt, pTailOffset); + } + + public static int sqlite3_prepare( + @NotNull sqlite3 db, @NotNull byte[] sqlUtf8, + @NotNull OutputPointer.sqlite3_stmt outStmt + ){ + return sqlite3_prepare(db.getNativePointer(), sqlUtf8, sqlUtf8.length, + outStmt, null); + } + + public static int sqlite3_prepare( + @NotNull sqlite3 db, @NotNull String sql, + @NotNull OutputPointer.sqlite3_stmt outStmt + ){ + final byte[] utf8 = sql.getBytes(StandardCharsets.UTF_8); + return sqlite3_prepare(db.getNativePointer(), utf8, utf8.length, + outStmt, null); + } + + /** + Convenience overload which returns its statement handle directly, + or null on error or when reading only whitespace or + comments. sqlite3_errcode() can be used to determine whether + there was an error or the input was empty. Ownership of the + returned object is passed to the caller, who must eventually pass + it to sqlite3_finalize(). + */ + public static sqlite3_stmt sqlite3_prepare( + @NotNull sqlite3 db, @NotNull String sql + ){ + final OutputPointer.sqlite3_stmt out = new OutputPointer.sqlite3_stmt(); + sqlite3_prepare(db, sql, out); + return out.take(); + } + /** + @see #sqlite3_prepare + */ + private static native int sqlite3_prepare_v2( + @NotNull long ptrToDb, @NotNull byte[] sqlUtf8, int maxBytes, + @NotNull OutputPointer.sqlite3_stmt outStmt, + @Nullable OutputPointer.Int32 pTailOffset + ); + + /** + Works like the canonical sqlite3_prepare_v2() but its "tail" + output parameter is returned as the index offset into the given + byte array at which SQL parsing stopped. + */ + public static int sqlite3_prepare_v2( + @NotNull sqlite3 db, @NotNull byte[] sqlUtf8, + @NotNull OutputPointer.sqlite3_stmt outStmt, + @Nullable OutputPointer.Int32 pTailOffset + ){ + return sqlite3_prepare_v2(db.getNativePointer(), sqlUtf8, sqlUtf8.length, + outStmt, pTailOffset); + } + + public static int sqlite3_prepare_v2( + @NotNull sqlite3 db, @NotNull byte[] sqlUtf8, + @NotNull OutputPointer.sqlite3_stmt outStmt + ){ + return sqlite3_prepare_v2(db.getNativePointer(), sqlUtf8, sqlUtf8.length, + outStmt, null); + } + + public static int sqlite3_prepare_v2( + @NotNull sqlite3 db, @NotNull String sql, + @NotNull OutputPointer.sqlite3_stmt outStmt + ){ + final byte[] utf8 = sql.getBytes(StandardCharsets.UTF_8); + return sqlite3_prepare_v2(db.getNativePointer(), utf8, utf8.length, + outStmt, null); + } + + /** + Works identically to the sqlite3_stmt-returning sqlite3_prepare() + but uses sqlite3_prepare_v2(). + */ + public static sqlite3_stmt sqlite3_prepare_v2( + @NotNull sqlite3 db, @NotNull String sql + ){ + final OutputPointer.sqlite3_stmt out = new OutputPointer.sqlite3_stmt(); + sqlite3_prepare_v2(db, sql, out); + return out.take(); + } + + /** + @see #sqlite3_prepare + */ + private static native int sqlite3_prepare_v3( + @NotNull long ptrToDb, @NotNull byte[] sqlUtf8, int maxBytes, + int prepFlags, @NotNull OutputPointer.sqlite3_stmt outStmt, + @Nullable OutputPointer.Int32 pTailOffset + ); + + /** + Works like the canonical sqlite3_prepare_v2() but its "tail" + output parameter is returned as the index offset into the given + byte array at which SQL parsing stopped. + */ + public static int sqlite3_prepare_v3( + @NotNull sqlite3 db, @NotNull byte[] sqlUtf8, int prepFlags, + @NotNull OutputPointer.sqlite3_stmt outStmt, + @Nullable OutputPointer.Int32 pTailOffset + ){ + return sqlite3_prepare_v3(db.getNativePointer(), sqlUtf8, sqlUtf8.length, + prepFlags, outStmt, pTailOffset); + } + + /** + Convenience overload which elides the seldom-used pTailOffset + parameter. + */ + public static int sqlite3_prepare_v3( + @NotNull sqlite3 db, @NotNull byte[] sqlUtf8, int prepFlags, + @NotNull OutputPointer.sqlite3_stmt outStmt + ){ + return sqlite3_prepare_v3(db.getNativePointer(), sqlUtf8, sqlUtf8.length, + prepFlags, outStmt, null); + } + + /** + Convenience overload which elides the seldom-used pTailOffset + parameter and converts the given string to UTF-8 before passing + it on. + */ + public static int sqlite3_prepare_v3( + @NotNull sqlite3 db, @NotNull String sql, int prepFlags, + @NotNull OutputPointer.sqlite3_stmt outStmt + ){ + final byte[] utf8 = sql.getBytes(StandardCharsets.UTF_8); + return sqlite3_prepare_v3(db.getNativePointer(), utf8, utf8.length, + prepFlags, outStmt, null); + } + + /** + Works identically to the sqlite3_stmt-returning sqlite3_prepare() + but uses sqlite3_prepare_v3(). + */ + public static sqlite3_stmt sqlite3_prepare_v3( + @NotNull sqlite3 db, @NotNull String sql, int prepFlags + ){ + final OutputPointer.sqlite3_stmt out = new OutputPointer.sqlite3_stmt(); + sqlite3_prepare_v3(db, sql, prepFlags, out); + return out.take(); + } + + /** + A convenience wrapper around sqlite3_prepare_v3() which accepts + an arbitrary amount of input provided as a UTF-8-encoded byte + array. It loops over the input bytes looking for + statements. Each one it finds is passed to p.call(), passing + ownership of it to that function. If p.call() returns 0, looping + continues, else the loop stops and p.call()'s result code is + returned. If preparation of any given segment fails, looping + stops and that result code is returned. + +

    If p.call() throws, the exception is converted to a db-level + error and a non-0 code is returned, in order to retain the + C-style error semantics of the API. + +

    How each statement is handled, including whether it is finalized + or not, is up to the callback object. e.g. the callback might + collect them for later use. If it does not collect them then it + must finalize them. See PrepareMultiCallback.Finalize for a + simple proxy which does that. + */ + public static int sqlite3_prepare_multi( + @NotNull sqlite3 db, @NotNull byte[] sqlUtf8, + int prepFlags, + @NotNull PrepareMultiCallback p){ + final OutputPointer.Int32 oTail = new OutputPointer.Int32(); + int pos = 0, n = 1; + byte[] sqlChunk = sqlUtf8; + int rc = 0; + final OutputPointer.sqlite3_stmt outStmt = new OutputPointer.sqlite3_stmt(); + while( 0==rc && pos0 ){ + sqlChunk = Arrays.copyOfRange(sqlChunk, pos, + sqlChunk.length); + } + if( 0==sqlChunk.length ) break; + rc = sqlite3_prepare_v3(db, sqlChunk, prepFlags, outStmt, oTail); + if( 0!=rc ) break; + pos = oTail.value; + stmt = outStmt.take(); + if( null==stmt ){ + // empty statement (whitespace/comments) + continue; + } + try{ + rc = p.call(stmt); + }catch(Exception e){ + rc = sqlite3_jni_db_error( db, SQLITE_ERROR, e ); + } + } + return rc; + } + + /** + Convenience overload which accepts its SQL as a String and uses + no statement-preparation flags. + */ + public static int sqlite3_prepare_multi( + @NotNull sqlite3 db, @NotNull byte[] sqlUtf8, + @NotNull PrepareMultiCallback p){ + return sqlite3_prepare_multi(db, sqlUtf8, 0, p); + } + + /** + Convenience overload which accepts its SQL as a String. + */ + public static int sqlite3_prepare_multi( + @NotNull sqlite3 db, @NotNull String sql, int prepFlags, + @NotNull PrepareMultiCallback p){ + return sqlite3_prepare_multi( + db, sql.getBytes(StandardCharsets.UTF_8), prepFlags, p + ); + } + + /** + Convenience overload which accepts its SQL as a String and uses + no statement-preparation flags. + */ + public static int sqlite3_prepare_multi( + @NotNull sqlite3 db, @NotNull String sql, + @NotNull PrepareMultiCallback p){ + return sqlite3_prepare_multi(db, sql, 0, p); + } + + /** + Convenience overload which accepts its SQL as a String + array. They will be concatenated together as-is, with no + separator, and passed on to one of the other overloads. + */ + public static int sqlite3_prepare_multi( + @NotNull sqlite3 db, @NotNull String[] sql, int prepFlags, + @NotNull PrepareMultiCallback p){ + return sqlite3_prepare_multi(db, String.join("",sql), prepFlags, p); + } + + /** + Convenience overload which uses no statement-preparation flags. + */ + public static int sqlite3_prepare_multi( + @NotNull sqlite3 db, @NotNull String[] sql, + @NotNull PrepareMultiCallback p){ + return sqlite3_prepare_multi(db, sql, 0, p); + } + + private static native int sqlite3_preupdate_blobwrite(@NotNull long ptrToDb); + + /** + If the C API was built with SQLITE_ENABLE_PREUPDATE_HOOK defined, this + acts as a proxy for C's sqlite3_preupdate_blobwrite(), else it returns + SQLITE_MISUSE with no side effects. + */ + public static int sqlite3_preupdate_blobwrite(@NotNull sqlite3 db){ + return sqlite3_preupdate_blobwrite(db.getNativePointer()); + } + + private static native int sqlite3_preupdate_count(@NotNull long ptrToDb); + + /** + If the C API was built with SQLITE_ENABLE_PREUPDATE_HOOK defined, this + acts as a proxy for C's sqlite3_preupdate_count(), else it returns + SQLITE_MISUSE with no side effects. + */ + public static int sqlite3_preupdate_count(@NotNull sqlite3 db){ + return sqlite3_preupdate_count(db.getNativePointer()); + } + + private static native int sqlite3_preupdate_depth(@NotNull long ptrToDb); + + /** + If the C API was built with SQLITE_ENABLE_PREUPDATE_HOOK defined, this + acts as a proxy for C's sqlite3_preupdate_depth(), else it returns + SQLITE_MISUSE with no side effects. + */ + public static int sqlite3_preupdate_depth(@NotNull sqlite3 db){ + return sqlite3_preupdate_depth(db.getNativePointer()); + } + + private static native PreupdateHookCallback sqlite3_preupdate_hook( + @NotNull long ptrToDb, @Nullable PreupdateHookCallback hook + ); + + /** + If the C API was built with SQLITE_ENABLE_PREUPDATE_HOOK defined, this + acts as a proxy for C's sqlite3_preupdate_hook(), else it returns null + with no side effects. + */ + public static PreupdateHookCallback sqlite3_preupdate_hook( + @NotNull sqlite3 db, @Nullable PreupdateHookCallback hook + ){ + return sqlite3_preupdate_hook(db.getNativePointer(), hook); + } + + private static native int sqlite3_preupdate_new(@NotNull long ptrToDb, int col, + @NotNull OutputPointer.sqlite3_value out); + + /** + If the C API was built with SQLITE_ENABLE_PREUPDATE_HOOK defined, + this acts as a proxy for C's sqlite3_preupdate_new(), else it + returns SQLITE_MISUSE with no side effects. + + WARNING: client code _must not_ hold a reference to the returned + sqlite3_value object beyond the scope of the preupdate hook in + which this function is called. Doing so will leave the client + holding a stale pointer, the address of which could point to + anything at all after the pre-update hook is complete. This API + has no way to record such objects and clear/invalidate them at + the end of a pre-update hook. We "could" add infrastructure to do + so, but would require significant levels of bookkeeping. + */ + public static int sqlite3_preupdate_new(@NotNull sqlite3 db, int col, + @NotNull OutputPointer.sqlite3_value out){ + return sqlite3_preupdate_new(db.getNativePointer(), col, out); + } + + /** + Convenience wrapper for the 3-arg sqlite3_preupdate_new() which returns + null on error. + */ + public static sqlite3_value sqlite3_preupdate_new(@NotNull sqlite3 db, int col){ + final OutputPointer.sqlite3_value out = new OutputPointer.sqlite3_value(); + sqlite3_preupdate_new(db.getNativePointer(), col, out); + return out.take(); + } + + private static native int sqlite3_preupdate_old(@NotNull long ptrToDb, int col, + @NotNull OutputPointer.sqlite3_value out); + + /** + If the C API was built with SQLITE_ENABLE_PREUPDATE_HOOK defined, + this acts as a proxy for C's sqlite3_preupdate_old(), else it + returns SQLITE_MISUSE with no side effects. + + WARNING: see warning in sqlite3_preupdate_new() regarding the + potential for stale sqlite3_value handles. + */ + public static int sqlite3_preupdate_old(@NotNull sqlite3 db, int col, + @NotNull OutputPointer.sqlite3_value out){ + return sqlite3_preupdate_old(db.getNativePointer(), col, out); + } + + /** + Convenience wrapper for the 3-arg sqlite3_preupdate_old() which returns + null on error. + */ + public static sqlite3_value sqlite3_preupdate_old(@NotNull sqlite3 db, int col){ + final OutputPointer.sqlite3_value out = new OutputPointer.sqlite3_value(); + sqlite3_preupdate_old(db.getNativePointer(), col, out); + return out.take(); + } + + public static native void sqlite3_progress_handler( + @NotNull sqlite3 db, int n, @Nullable ProgressHandlerCallback h + ); + + public static native void sqlite3_randomness(byte[] target); + + public static native int sqlite3_release_memory(int n); + + public static native int sqlite3_reset(@NotNull sqlite3_stmt stmt); + + /** + Works like the C API except that it has no side effects if auto + extensions are currently running. (The JNI-level list of + extensions cannot be manipulated while it is being traversed.) + */ + public static native void sqlite3_reset_auto_extension(); + + public static native void sqlite3_result_double( + @NotNull sqlite3_context cx, double v + ); + + /** + The main sqlite3_result_error() impl of which all others are + proxies. eTextRep must be one of SQLITE_UTF8 or SQLITE_UTF16 and + msg must be encoded correspondingly. Any other eTextRep value + results in the C-level sqlite3_result_error() being called with a + complaint about the invalid argument. + */ + private static native void sqlite3_result_error( + @NotNull sqlite3_context cx, @NotNull byte[] msg, int eTextRep + ); + + public static void sqlite3_result_error( + @NotNull sqlite3_context cx, @NotNull byte[] utf8 + ){ + sqlite3_result_error(cx, utf8, SQLITE_UTF8); + } + + public static void sqlite3_result_error( + @NotNull sqlite3_context cx, @NotNull String msg + ){ + final byte[] utf8 = msg.getBytes(StandardCharsets.UTF_8); + sqlite3_result_error(cx, utf8, SQLITE_UTF8); + } + + public static void sqlite3_result_error16( + @NotNull sqlite3_context cx, @NotNull byte[] utf16 + ){ + sqlite3_result_error(cx, utf16, SQLITE_UTF16); + } + + public static void sqlite3_result_error16( + @NotNull sqlite3_context cx, @NotNull String msg + ){ + final byte[] utf16 = msg.getBytes(StandardCharsets.UTF_16); + sqlite3_result_error(cx, utf16, SQLITE_UTF16); + } + + /** + Equivalent to passing e.toString() to {@link + #sqlite3_result_error(sqlite3_context,String)}. Note that + toString() is used instead of getMessage() because the former + prepends the exception type name to the message. + */ + public static void sqlite3_result_error( + @NotNull sqlite3_context cx, @NotNull Exception e + ){ + sqlite3_result_error(cx, e.toString()); + } + + public static native void sqlite3_result_error_toobig( + @NotNull sqlite3_context cx + ); + + public static native void sqlite3_result_error_nomem( + @NotNull sqlite3_context cx + ); + + public static native void sqlite3_result_error_code( + @NotNull sqlite3_context cx, int c + ); + + public static native void sqlite3_result_int( + @NotNull sqlite3_context cx, int v + ); + + public static native void sqlite3_result_int64( + @NotNull sqlite3_context cx, long v + ); + + /** + Binds the SQL result to the given object, or {@link + #sqlite3_result_null} if {@code o} is null. Use {@link + #sqlite3_value_java_object} to fetch it. + +

    This is implemented in terms of C's sqlite3_result_pointer(), + but that function is not exposed to JNI because (A) + cross-language semantic mismatch and (B) Java doesn't need that + argument for its intended purpose (type safety). + + @see #sqlite3_value_java_object + @see #sqlite3_bind_java_object + */ + public static native void sqlite3_result_java_object( + @NotNull sqlite3_context cx, @NotNull Object o + ); + + /** + Similar to sqlite3_bind_nio_buffer(), this works like + sqlite3_result_blob() but accepts a java.nio.ByteBuffer as its + input source. See sqlite3_bind_nio_buffer() for the semantics of + the second and subsequent arguments. + + If cx is null then this function will silently fail. If + sqlite3_jni_supports_nio() returns false or iBegin is negative, + an error result is set. If (begin+n) extends beyond the end of + the buffer, it is silently truncated to fit. + + If any of the following apply, this function behaves like + sqlite3_result_null(): the blob is null, the resulting slice of + the blob is empty. + + If the resulting slice of the buffer exceeds SQLITE_LIMIT_LENGTH + then this function behaves like sqlite3_result_error_toobig(). + */ + @Experimental + /*public*/ static native void sqlite3_result_nio_buffer( + @NotNull sqlite3_context cx, @Nullable java.nio.ByteBuffer blob, + int begin, int n + ); + + /** + Convenience overload which uses the whole input object + as the result blob content. + */ + @Experimental + /*public*/ static void sqlite3_result_nio_buffer( + @NotNull sqlite3_context cx, @Nullable java.nio.ByteBuffer blob + ){ + sqlite3_result_nio_buffer(cx, blob, 0, -1); + } + + public static native void sqlite3_result_null( + @NotNull sqlite3_context cx + ); + + public static void sqlite3_result_set( + @NotNull sqlite3_context cx, @NotNull Boolean v + ){ + sqlite3_result_int(cx, v ? 1 : 0); + } + + public static void sqlite3_result_set( + @NotNull sqlite3_context cx, boolean v + ){ + sqlite3_result_int(cx, v ? 1 : 0); + } + + public static void sqlite3_result_set( + @NotNull sqlite3_context cx, @NotNull Double v + ){ + sqlite3_result_double(cx, v); + } + + public static void sqlite3_result_set( + @NotNull sqlite3_context cx, double v + ){ + sqlite3_result_double(cx, v); + } + + public static void sqlite3_result_set( + @NotNull sqlite3_context cx, @NotNull Integer v + ){ + sqlite3_result_int(cx, v); + } + + public static void sqlite3_result_set(@NotNull sqlite3_context cx, int v){ + sqlite3_result_int(cx, v); + } + + public static void sqlite3_result_set( + @NotNull sqlite3_context cx, @NotNull Long v + ){ + sqlite3_result_int64(cx, v); + } + + public static void sqlite3_result_set( + @NotNull sqlite3_context cx, long v + ){ + sqlite3_result_int64(cx, v); + } + + public static void sqlite3_result_set( + @NotNull sqlite3_context cx, @Nullable String v + ){ + if( null==v ) sqlite3_result_null(cx); + else sqlite3_result_text(cx, v); + } + + public static void sqlite3_result_set( + @NotNull sqlite3_context cx, @Nullable byte[] blob + ){ + if( null==blob ) sqlite3_result_null(cx); + else sqlite3_result_blob(cx, blob, blob.length); + } + + public static native void sqlite3_result_subtype( + @NotNull sqlite3_context cx, int val + ); + + public static native void sqlite3_result_value( + @NotNull sqlite3_context cx, @NotNull sqlite3_value v + ); + + public static native void sqlite3_result_zeroblob( + @NotNull sqlite3_context cx, int n + ); + + public static native int sqlite3_result_zeroblob64( + @NotNull sqlite3_context cx, long n + ); + + /** + This overload is private because its final parameter is arguably + unnecessary in Java. + */ + private static native void sqlite3_result_blob( + @NotNull sqlite3_context cx, @Nullable byte[] blob, int maxLen + ); + + public static void sqlite3_result_blob( + @NotNull sqlite3_context cx, @Nullable byte[] blob + ){ + sqlite3_result_blob(cx, blob, (int)(null==blob ? 0 : blob.length)); + } + + /** + Convenience overload which behaves like + sqlite3_result_nio_buffer(). + */ + @Experimental + /*public*/ static void sqlite3_result_blob( + @NotNull sqlite3_context cx, @Nullable java.nio.ByteBuffer blob, + int begin, int n + ){ + sqlite3_result_nio_buffer(cx, blob, begin, n); + } + + /** + Convenience overload which behaves like the two-argument overload of + sqlite3_result_nio_buffer(). + */ + @Experimental + /*public*/ static void sqlite3_result_blob( + @NotNull sqlite3_context cx, @Nullable java.nio.ByteBuffer blob + ){ + sqlite3_result_nio_buffer(cx, blob); + } + + /** + Binds the given text using C's sqlite3_result_blob64() unless: + +

      + +
    • @param blob is null: translates to sqlite3_result_null()
    • + +
    • @param blob is too large: translates to + sqlite3_result_error_toobig()
    • + +
    + +

    If @param maxLen is larger than blob.length, it is truncated + to that value. If it is negative, results are undefined.

    + +

    This overload is private because its final parameter is + arguably unnecessary in Java.

    + */ + private static native void sqlite3_result_blob64( + @NotNull sqlite3_context cx, @Nullable byte[] blob, long maxLen + ); + + public static void sqlite3_result_blob64( + @NotNull sqlite3_context cx, @Nullable byte[] blob + ){ + sqlite3_result_blob64(cx, blob, (long)(null==blob ? 0 : blob.length)); + } + + /** + This overload is private because its final parameter is + arguably unnecessary in Java. + */ + private static native void sqlite3_result_text( + @NotNull sqlite3_context cx, @Nullable byte[] utf8, int maxLen + ); + + public static void sqlite3_result_text( + @NotNull sqlite3_context cx, @Nullable byte[] utf8 + ){ + sqlite3_result_text(cx, utf8, null==utf8 ? 0 : utf8.length); + } + + public static void sqlite3_result_text( + @NotNull sqlite3_context cx, @Nullable String text + ){ + if(null == text) sqlite3_result_null(cx); + else{ + final byte[] utf8 = text.getBytes(StandardCharsets.UTF_8); + sqlite3_result_text(cx, utf8, utf8.length); + } + } + + /** + Binds the given text using C's sqlite3_result_text64() unless: + +
      + +
    • text is null: translates to a call to {@link + #sqlite3_result_null}
    • + +
    • text is too large: translates to a call to + {@link #sqlite3_result_error_toobig}
    • + +
    • The @param encoding argument has an invalid value: translates to + {@link sqlite3_result_error_code} with code SQLITE_FORMAT.
    • + +
    + + If maxLength (in bytes, not characters) is larger than + text.length, it is silently truncated to text.length. If it is + negative, results are undefined. If text is null, the subsequent + arguments are ignored. + + This overload is private because its maxLength parameter is + arguably unnecessary in Java. + */ + private static native void sqlite3_result_text64( + @NotNull sqlite3_context cx, @Nullable byte[] text, + long maxLength, int encoding + ); + + /** + Sets the current UDF result to the given bytes, which are assumed + be encoded in UTF-16 using the platform's byte order. + */ + public static void sqlite3_result_text16( + @NotNull sqlite3_context cx, @Nullable byte[] utf16 + ){ + if(null == utf16) sqlite3_result_null(cx); + else sqlite3_result_text64(cx, utf16, utf16.length, SQLITE_UTF16); + } + + public static void sqlite3_result_text16( + @NotNull sqlite3_context cx, @Nullable String text + ){ + if(null == text) sqlite3_result_null(cx); + else{ + final byte[] b = text.getBytes(StandardCharsets.UTF_16); + sqlite3_result_text64(cx, b, b.length, SQLITE_UTF16); + } + } + + private static native RollbackHookCallback sqlite3_rollback_hook( + @NotNull long ptrToDb, @Nullable RollbackHookCallback hook + ); + + public static RollbackHookCallback sqlite3_rollback_hook( + @NotNull sqlite3 db, @Nullable RollbackHookCallback hook + ){ + return sqlite3_rollback_hook(db.getNativePointer(), hook); + } + + public static native int sqlite3_set_authorizer( + @NotNull sqlite3 db, @Nullable AuthorizerCallback auth + ); + + public static native void sqlite3_set_auxdata( + @NotNull sqlite3_context cx, int n, @Nullable Object data + ); + + public static native void sqlite3_set_last_insert_rowid( + @NotNull sqlite3 db, long rowid + ); + + + /** + In addition to calling the C-level sqlite3_shutdown(), the JNI + binding also cleans up all stale per-thread state managed by the + library, as well as any registered auto-extensions, and frees up + various bits of memory. Calling this while database handles or + prepared statements are still active will leak resources. Trying + to use those objects after this routine is called invoked + undefined behavior. + */ + public static synchronized native int sqlite3_shutdown(); + + public static native int sqlite3_sleep(int ms); + + public static native String sqlite3_sourceid(); + + public static native String sqlite3_sql(@NotNull sqlite3_stmt stmt); + + //! Consider removing this. We can use sqlite3_status64() instead, + // or use that one's impl with this one's name. + public static native int sqlite3_status( + int op, @NotNull OutputPointer.Int32 pCurrent, + @NotNull OutputPointer.Int32 pHighwater, boolean reset + ); + + public static native int sqlite3_status64( + int op, @NotNull OutputPointer.Int64 pCurrent, + @NotNull OutputPointer.Int64 pHighwater, boolean reset + ); + + private static native int sqlite3_step(@NotNull long ptrToStmt); + + public static int sqlite3_step(@NotNull sqlite3_stmt stmt){ + return null==stmt ? SQLITE_MISUSE : sqlite3_step(stmt.getNativePointer()); + } + + public static native boolean sqlite3_stmt_busy(@NotNull sqlite3_stmt stmt); + + private static native int sqlite3_stmt_explain(@NotNull long ptrToStmt, int op); + + public static int sqlite3_stmt_explain(@NotNull sqlite3_stmt stmt, int op){ + return null==stmt ? SQLITE_MISUSE : sqlite3_stmt_explain(stmt.getNativePointer(), op); + } + + private static native int sqlite3_stmt_isexplain(@NotNull long ptrToStmt); + + public static int sqlite3_stmt_isexplain(@NotNull sqlite3_stmt stmt){ + return null==stmt ? 0 : sqlite3_stmt_isexplain(stmt.getNativePointer()); + } + + public static native boolean sqlite3_stmt_readonly(@NotNull sqlite3_stmt stmt); + + public static native int sqlite3_stmt_status( + @NotNull sqlite3_stmt stmt, int op, boolean reset + ); + + /** + Internal impl of the public sqlite3_strglob() method. Neither + argument may be null and both must be NUL-terminated UTF-8. + + This overload is private because: (A) to keep users from + inadvertently passing non-NUL-terminated byte arrays (an easy + thing to do). (B) it is cheaper to NUL-terminate the + String-to-byte-array conversion in the Java implementation + (sqlite3_strglob(String,String)) than to do that in C, so that + signature is the public-facing one. + */ + private static native int sqlite3_strglob( + @NotNull byte[] glob, @NotNull byte[] nulTerminatedUtf8 + ); + + public static int sqlite3_strglob( + @NotNull String glob, @NotNull String txt + ){ + return sqlite3_strglob(nulTerminateUtf8(glob), + nulTerminateUtf8(txt)); + } + + /** + The LIKE counterpart of the private sqlite3_strglob() method. + */ + private static native int sqlite3_strlike( + @NotNull byte[] glob, @NotNull byte[] nulTerminatedUtf8, + int escChar + ); + + public static int sqlite3_strlike( + @NotNull String glob, @NotNull String txt, char escChar + ){ + return sqlite3_strlike(nulTerminateUtf8(glob), + nulTerminateUtf8(txt), + (int)escChar); + } + + private static native int sqlite3_system_errno(@NotNull long ptrToDb); + + public static int sqlite3_system_errno(@NotNull sqlite3 db){ + return sqlite3_system_errno(db.getNativePointer()); + } + + public static native int sqlite3_table_column_metadata( + @NotNull sqlite3 db, @NotNull String zDbName, + @NotNull String zTableName, @NotNull String zColumnName, + @Nullable OutputPointer.String pzDataType, + @Nullable OutputPointer.String pzCollSeq, + @Nullable OutputPointer.Bool pNotNull, + @Nullable OutputPointer.Bool pPrimaryKey, + @Nullable OutputPointer.Bool pAutoinc + ); + + /** + Convenience overload which returns its results via a single + output object. If this function returns non-0 (error), the the + contents of the output object are not modified. + */ + public static int sqlite3_table_column_metadata( + @NotNull sqlite3 db, @NotNull String zDbName, + @NotNull String zTableName, @NotNull String zColumnName, + @NotNull TableColumnMetadata out){ + return sqlite3_table_column_metadata( + db, zDbName, zTableName, zColumnName, + out.pzDataType, out.pzCollSeq, out.pNotNull, + out.pPrimaryKey, out.pAutoinc); + } + + /** + Convenience overload which returns the column metadata object on + success and null on error. + */ + public static TableColumnMetadata sqlite3_table_column_metadata( + @NotNull sqlite3 db, @NotNull String zDbName, + @NotNull String zTableName, @NotNull String zColumnName){ + final TableColumnMetadata out = new TableColumnMetadata(); + return 0==sqlite3_table_column_metadata( + db, zDbName, zTableName, zColumnName, out + ) ? out : null; + } + + public static native int sqlite3_threadsafe(); + + private static native int sqlite3_total_changes(@NotNull long ptrToDb); + + public static int sqlite3_total_changes(@NotNull sqlite3 db){ + return sqlite3_total_changes(db.getNativePointer()); + } + + private static native long sqlite3_total_changes64(@NotNull long ptrToDb); + + public static long sqlite3_total_changes64(@NotNull sqlite3 db){ + return sqlite3_total_changes64(db.getNativePointer()); + } + + /** + Works like C's sqlite3_trace_v2() except that the 3rd argument to that + function is elided here because the roles of that functions' 3rd and 4th + arguments are encapsulated in the final argument to this function. + +

    Unlike the C API, which is documented as always returning 0, + this implementation returns non-0 if initialization of the tracer + mapping state fails (e.g. on OOM). + */ + public static native int sqlite3_trace_v2( + @NotNull sqlite3 db, int traceMask, @Nullable TraceV2Callback tracer + ); + + public static native int sqlite3_txn_state( + @NotNull sqlite3 db, @Nullable String zSchema + ); + + private static native UpdateHookCallback sqlite3_update_hook( + @NotNull long ptrToDb, @Nullable UpdateHookCallback hook + ); + + public static UpdateHookCallback sqlite3_update_hook( + @NotNull sqlite3 db, @Nullable UpdateHookCallback hook + ){ + return sqlite3_update_hook(db.getNativePointer(), hook); + } + + /* + Note that: + + void * sqlite3_user_data(sqlite3_context*) + + Is not relevant in the JNI binding, as its feature is replaced by + the ability to pass an object, including any relevant state, to + sqlite3_create_function(). + */ + + private static native byte[] sqlite3_value_blob(@NotNull long ptrToValue); + + public static byte[] sqlite3_value_blob(@NotNull sqlite3_value v){ + return sqlite3_value_blob(v.getNativePointer()); + } + + private static native int sqlite3_value_bytes(@NotNull long ptrToValue); + + public static int sqlite3_value_bytes(@NotNull sqlite3_value v){ + return sqlite3_value_bytes(v.getNativePointer()); + } + + private static native int sqlite3_value_bytes16(@NotNull long ptrToValue); + + public static int sqlite3_value_bytes16(@NotNull sqlite3_value v){ + return sqlite3_value_bytes16(v.getNativePointer()); + } + + private static native double sqlite3_value_double(@NotNull long ptrToValue); + + public static double sqlite3_value_double(@NotNull sqlite3_value v){ + return sqlite3_value_double(v.getNativePointer()); + } + + private static native sqlite3_value sqlite3_value_dup(@NotNull long ptrToValue); + + public static sqlite3_value sqlite3_value_dup(@NotNull sqlite3_value v){ + return sqlite3_value_dup(v.getNativePointer()); + } + + private static native int sqlite3_value_encoding(@NotNull long ptrToValue); + + public static int sqlite3_value_encoding(@NotNull sqlite3_value v){ + return sqlite3_value_encoding(v.getNativePointer()); + } + + private static native void sqlite3_value_free(@Nullable long ptrToValue); + + public static void sqlite3_value_free(@Nullable sqlite3_value v){ + if( null!=v ) sqlite3_value_free(v.clearNativePointer()); + } + + private static native boolean sqlite3_value_frombind(@NotNull long ptrToValue); + + public static boolean sqlite3_value_frombind(@NotNull sqlite3_value v){ + return sqlite3_value_frombind(v.getNativePointer()); + } + + private static native int sqlite3_value_int(@NotNull long ptrToValue); + + public static int sqlite3_value_int(@NotNull sqlite3_value v){ + return sqlite3_value_int(v.getNativePointer()); + } + + private static native long sqlite3_value_int64(@NotNull long ptrToValue); + + public static long sqlite3_value_int64(@NotNull sqlite3_value v){ + return sqlite3_value_int64(v.getNativePointer()); + } + + private static native Object sqlite3_value_java_object(@NotNull long ptrToValue); + + /** + If the given value was set using {@link + #sqlite3_result_java_object} then this function returns that + object, else it returns null. + +

    It is up to the caller to inspect the object to determine its + type, and cast it if necessary. + */ + public static Object sqlite3_value_java_object(@NotNull sqlite3_value v){ + return sqlite3_value_java_object(v.getNativePointer()); + } + + /** + A variant of sqlite3_value_java_object() which returns the + fetched object cast to T if the object is an instance of the + given Class, else it returns null. + */ + @SuppressWarnings("unchecked") + public static T sqlite3_value_java_object(@NotNull sqlite3_value v, + @NotNull Class type){ + final Object o = sqlite3_value_java_object(v); + return type.isInstance(o) ? (T)o : null; + } + + /** + A variant of sqlite3_column_blob() which returns the blob as a + ByteBuffer object. Returns null if its argument is null, if + sqlite3_jni_supports_nio() is false, or if sqlite3_value_blob() + would return null for the same input. + */ + @Experimental + /*public*/ static native java.nio.ByteBuffer sqlite3_value_nio_buffer( + @NotNull sqlite3_value v + ); + + private static native int sqlite3_value_nochange(@NotNull long ptrToValue); + + public static int sqlite3_value_nochange(@NotNull sqlite3_value v){ + return sqlite3_value_nochange(v.getNativePointer()); + } + + private static native int sqlite3_value_numeric_type(@NotNull long ptrToValue); + + public static int sqlite3_value_numeric_type(@NotNull sqlite3_value v){ + return sqlite3_value_numeric_type(v.getNativePointer()); + } + + private static native int sqlite3_value_subtype(@NotNull long ptrToValue); + + public static int sqlite3_value_subtype(@NotNull sqlite3_value v){ + return sqlite3_value_subtype(v.getNativePointer()); + } + + private static native byte[] sqlite3_value_text(@NotNull long ptrToValue); + + /** + Functions identially to the C API, and this note is just to + stress that the returned bytes are encoded as UTF-8. It returns + null if the underlying C-level sqlite3_value_text() returns NULL + or on allocation error. + */ + public static byte[] sqlite3_value_text(@NotNull sqlite3_value v){ + return sqlite3_value_text(v.getNativePointer()); + } + + private static native String sqlite3_value_text16(@NotNull long ptrToValue); + + public static String sqlite3_value_text16(@NotNull sqlite3_value v){ + return sqlite3_value_text16(v.getNativePointer()); + } + + private static native int sqlite3_value_type(@NotNull long ptrToValue); + + public static int sqlite3_value_type(@NotNull sqlite3_value v){ + return sqlite3_value_type(v.getNativePointer()); + } + + /** + This is NOT part of the public API. It exists solely as a place + for this code's developers to collect internal metrics and such. + It has no stable interface. It may go way or change behavior at + any time. + */ + public static native void sqlite3_jni_internal_details(); + + ////////////////////////////////////////////////////////////////////// + // SQLITE_... constants follow... + + // version info + public static final int SQLITE_VERSION_NUMBER = sqlite3_libversion_number(); + public static final String SQLITE_VERSION = sqlite3_libversion(); + public static final String SQLITE_SOURCE_ID = sqlite3_sourceid(); + + // access + public static final int SQLITE_ACCESS_EXISTS = 0; + public static final int SQLITE_ACCESS_READWRITE = 1; + public static final int SQLITE_ACCESS_READ = 2; + + // authorizer + public static final int SQLITE_DENY = 1; + public static final int SQLITE_IGNORE = 2; + public static final int SQLITE_CREATE_INDEX = 1; + public static final int SQLITE_CREATE_TABLE = 2; + public static final int SQLITE_CREATE_TEMP_INDEX = 3; + public static final int SQLITE_CREATE_TEMP_TABLE = 4; + public static final int SQLITE_CREATE_TEMP_TRIGGER = 5; + public static final int SQLITE_CREATE_TEMP_VIEW = 6; + public static final int SQLITE_CREATE_TRIGGER = 7; + public static final int SQLITE_CREATE_VIEW = 8; + public static final int SQLITE_DELETE = 9; + public static final int SQLITE_DROP_INDEX = 10; + public static final int SQLITE_DROP_TABLE = 11; + public static final int SQLITE_DROP_TEMP_INDEX = 12; + public static final int SQLITE_DROP_TEMP_TABLE = 13; + public static final int SQLITE_DROP_TEMP_TRIGGER = 14; + public static final int SQLITE_DROP_TEMP_VIEW = 15; + public static final int SQLITE_DROP_TRIGGER = 16; + public static final int SQLITE_DROP_VIEW = 17; + public static final int SQLITE_INSERT = 18; + public static final int SQLITE_PRAGMA = 19; + public static final int SQLITE_READ = 20; + public static final int SQLITE_SELECT = 21; + public static final int SQLITE_TRANSACTION = 22; + public static final int SQLITE_UPDATE = 23; + public static final int SQLITE_ATTACH = 24; + public static final int SQLITE_DETACH = 25; + public static final int SQLITE_ALTER_TABLE = 26; + public static final int SQLITE_REINDEX = 27; + public static final int SQLITE_ANALYZE = 28; + public static final int SQLITE_CREATE_VTABLE = 29; + public static final int SQLITE_DROP_VTABLE = 30; + public static final int SQLITE_FUNCTION = 31; + public static final int SQLITE_SAVEPOINT = 32; + public static final int SQLITE_RECURSIVE = 33; + + // blob finalizers: these should, because they are treated as + // special pointer values in C, ideally have the same sizeof() as + // the platform's (void*), but we can't know that size from here. + public static final long SQLITE_STATIC = 0; + public static final long SQLITE_TRANSIENT = -1; + + // changeset + public static final int SQLITE_CHANGESETSTART_INVERT = 2; + public static final int SQLITE_CHANGESETAPPLY_NOSAVEPOINT = 1; + public static final int SQLITE_CHANGESETAPPLY_INVERT = 2; + public static final int SQLITE_CHANGESETAPPLY_IGNORENOOP = 4; + public static final int SQLITE_CHANGESET_DATA = 1; + public static final int SQLITE_CHANGESET_NOTFOUND = 2; + public static final int SQLITE_CHANGESET_CONFLICT = 3; + public static final int SQLITE_CHANGESET_CONSTRAINT = 4; + public static final int SQLITE_CHANGESET_FOREIGN_KEY = 5; + public static final int SQLITE_CHANGESET_OMIT = 0; + public static final int SQLITE_CHANGESET_REPLACE = 1; + public static final int SQLITE_CHANGESET_ABORT = 2; + + // config + public static final int SQLITE_CONFIG_SINGLETHREAD = 1; + public static final int SQLITE_CONFIG_MULTITHREAD = 2; + public static final int SQLITE_CONFIG_SERIALIZED = 3; + public static final int SQLITE_CONFIG_MALLOC = 4; + public static final int SQLITE_CONFIG_GETMALLOC = 5; + public static final int SQLITE_CONFIG_SCRATCH = 6; + public static final int SQLITE_CONFIG_PAGECACHE = 7; + public static final int SQLITE_CONFIG_HEAP = 8; + public static final int SQLITE_CONFIG_MEMSTATUS = 9; + public static final int SQLITE_CONFIG_MUTEX = 10; + public static final int SQLITE_CONFIG_GETMUTEX = 11; + public static final int SQLITE_CONFIG_LOOKASIDE = 13; + public static final int SQLITE_CONFIG_PCACHE = 14; + public static final int SQLITE_CONFIG_GETPCACHE = 15; + public static final int SQLITE_CONFIG_LOG = 16; + public static final int SQLITE_CONFIG_URI = 17; + public static final int SQLITE_CONFIG_PCACHE2 = 18; + public static final int SQLITE_CONFIG_GETPCACHE2 = 19; + public static final int SQLITE_CONFIG_COVERING_INDEX_SCAN = 20; + public static final int SQLITE_CONFIG_SQLLOG = 21; + public static final int SQLITE_CONFIG_MMAP_SIZE = 22; + public static final int SQLITE_CONFIG_WIN32_HEAPSIZE = 23; + public static final int SQLITE_CONFIG_PCACHE_HDRSZ = 24; + public static final int SQLITE_CONFIG_PMASZ = 25; + public static final int SQLITE_CONFIG_STMTJRNL_SPILL = 26; + public static final int SQLITE_CONFIG_SMALL_MALLOC = 27; + public static final int SQLITE_CONFIG_SORTERREF_SIZE = 28; + public static final int SQLITE_CONFIG_MEMDB_MAXSIZE = 29; + + // data types + public static final int SQLITE_INTEGER = 1; + public static final int SQLITE_FLOAT = 2; + public static final int SQLITE_TEXT = 3; + public static final int SQLITE_BLOB = 4; + public static final int SQLITE_NULL = 5; + + // db config + public static final int SQLITE_DBCONFIG_MAINDBNAME = 1000; + public static final int SQLITE_DBCONFIG_LOOKASIDE = 1001; + public static final int SQLITE_DBCONFIG_ENABLE_FKEY = 1002; + public static final int SQLITE_DBCONFIG_ENABLE_TRIGGER = 1003; + public static final int SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER = 1004; + public static final int SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION = 1005; + public static final int SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE = 1006; + public static final int SQLITE_DBCONFIG_ENABLE_QPSG = 1007; + public static final int SQLITE_DBCONFIG_TRIGGER_EQP = 1008; + public static final int SQLITE_DBCONFIG_RESET_DATABASE = 1009; + public static final int SQLITE_DBCONFIG_DEFENSIVE = 1010; + public static final int SQLITE_DBCONFIG_WRITABLE_SCHEMA = 1011; + public static final int SQLITE_DBCONFIG_LEGACY_ALTER_TABLE = 1012; + public static final int SQLITE_DBCONFIG_DQS_DML = 1013; + public static final int SQLITE_DBCONFIG_DQS_DDL = 1014; + public static final int SQLITE_DBCONFIG_ENABLE_VIEW = 1015; + public static final int SQLITE_DBCONFIG_LEGACY_FILE_FORMAT = 1016; + public static final int SQLITE_DBCONFIG_TRUSTED_SCHEMA = 1017; + public static final int SQLITE_DBCONFIG_STMT_SCANSTATUS = 1018; + public static final int SQLITE_DBCONFIG_REVERSE_SCANORDER = 1019; + public static final int SQLITE_DBCONFIG_MAX = 1019; + + // db status + public static final int SQLITE_DBSTATUS_LOOKASIDE_USED = 0; + public static final int SQLITE_DBSTATUS_CACHE_USED = 1; + public static final int SQLITE_DBSTATUS_SCHEMA_USED = 2; + public static final int SQLITE_DBSTATUS_STMT_USED = 3; + public static final int SQLITE_DBSTATUS_LOOKASIDE_HIT = 4; + public static final int SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE = 5; + public static final int SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL = 6; + public static final int SQLITE_DBSTATUS_CACHE_HIT = 7; + public static final int SQLITE_DBSTATUS_CACHE_MISS = 8; + public static final int SQLITE_DBSTATUS_CACHE_WRITE = 9; + public static final int SQLITE_DBSTATUS_DEFERRED_FKS = 10; + public static final int SQLITE_DBSTATUS_CACHE_USED_SHARED = 11; + public static final int SQLITE_DBSTATUS_CACHE_SPILL = 12; + public static final int SQLITE_DBSTATUS_MAX = 12; + + // encodings + public static final int SQLITE_UTF8 = 1; + public static final int SQLITE_UTF16LE = 2; + public static final int SQLITE_UTF16BE = 3; + public static final int SQLITE_UTF16 = 4; + public static final int SQLITE_UTF16_ALIGNED = 8; + + // fcntl + public static final int SQLITE_FCNTL_LOCKSTATE = 1; + public static final int SQLITE_FCNTL_GET_LOCKPROXYFILE = 2; + public static final int SQLITE_FCNTL_SET_LOCKPROXYFILE = 3; + public static final int SQLITE_FCNTL_LAST_ERRNO = 4; + public static final int SQLITE_FCNTL_SIZE_HINT = 5; + public static final int SQLITE_FCNTL_CHUNK_SIZE = 6; + public static final int SQLITE_FCNTL_FILE_POINTER = 7; + public static final int SQLITE_FCNTL_SYNC_OMITTED = 8; + public static final int SQLITE_FCNTL_WIN32_AV_RETRY = 9; + public static final int SQLITE_FCNTL_PERSIST_WAL = 10; + public static final int SQLITE_FCNTL_OVERWRITE = 11; + public static final int SQLITE_FCNTL_VFSNAME = 12; + public static final int SQLITE_FCNTL_POWERSAFE_OVERWRITE = 13; + public static final int SQLITE_FCNTL_PRAGMA = 14; + public static final int SQLITE_FCNTL_BUSYHANDLER = 15; + public static final int SQLITE_FCNTL_TEMPFILENAME = 16; + public static final int SQLITE_FCNTL_MMAP_SIZE = 18; + public static final int SQLITE_FCNTL_TRACE = 19; + public static final int SQLITE_FCNTL_HAS_MOVED = 20; + public static final int SQLITE_FCNTL_SYNC = 21; + public static final int SQLITE_FCNTL_COMMIT_PHASETWO = 22; + public static final int SQLITE_FCNTL_WIN32_SET_HANDLE = 23; + public static final int SQLITE_FCNTL_WAL_BLOCK = 24; + public static final int SQLITE_FCNTL_ZIPVFS = 25; + public static final int SQLITE_FCNTL_RBU = 26; + public static final int SQLITE_FCNTL_VFS_POINTER = 27; + public static final int SQLITE_FCNTL_JOURNAL_POINTER = 28; + public static final int SQLITE_FCNTL_WIN32_GET_HANDLE = 29; + public static final int SQLITE_FCNTL_PDB = 30; + public static final int SQLITE_FCNTL_BEGIN_ATOMIC_WRITE = 31; + public static final int SQLITE_FCNTL_COMMIT_ATOMIC_WRITE = 32; + public static final int SQLITE_FCNTL_ROLLBACK_ATOMIC_WRITE = 33; + public static final int SQLITE_FCNTL_LOCK_TIMEOUT = 34; + public static final int SQLITE_FCNTL_DATA_VERSION = 35; + public static final int SQLITE_FCNTL_SIZE_LIMIT = 36; + public static final int SQLITE_FCNTL_CKPT_DONE = 37; + public static final int SQLITE_FCNTL_RESERVE_BYTES = 38; + public static final int SQLITE_FCNTL_CKPT_START = 39; + public static final int SQLITE_FCNTL_EXTERNAL_READER = 40; + public static final int SQLITE_FCNTL_CKSM_FILE = 41; + public static final int SQLITE_FCNTL_RESET_CACHE = 42; + + // flock + public static final int SQLITE_LOCK_NONE = 0; + public static final int SQLITE_LOCK_SHARED = 1; + public static final int SQLITE_LOCK_RESERVED = 2; + public static final int SQLITE_LOCK_PENDING = 3; + public static final int SQLITE_LOCK_EXCLUSIVE = 4; + + // iocap + public static final int SQLITE_IOCAP_ATOMIC = 1; + public static final int SQLITE_IOCAP_ATOMIC512 = 2; + public static final int SQLITE_IOCAP_ATOMIC1K = 4; + public static final int SQLITE_IOCAP_ATOMIC2K = 8; + public static final int SQLITE_IOCAP_ATOMIC4K = 16; + public static final int SQLITE_IOCAP_ATOMIC8K = 32; + public static final int SQLITE_IOCAP_ATOMIC16K = 64; + public static final int SQLITE_IOCAP_ATOMIC32K = 128; + public static final int SQLITE_IOCAP_ATOMIC64K = 256; + public static final int SQLITE_IOCAP_SAFE_APPEND = 512; + public static final int SQLITE_IOCAP_SEQUENTIAL = 1024; + public static final int SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN = 2048; + public static final int SQLITE_IOCAP_POWERSAFE_OVERWRITE = 4096; + public static final int SQLITE_IOCAP_IMMUTABLE = 8192; + public static final int SQLITE_IOCAP_BATCH_ATOMIC = 16384; + + // limits + public static final int SQLITE_LIMIT_LENGTH = 0; + public static final int SQLITE_LIMIT_SQL_LENGTH = 1; + public static final int SQLITE_LIMIT_COLUMN = 2; + public static final int SQLITE_LIMIT_EXPR_DEPTH = 3; + public static final int SQLITE_LIMIT_COMPOUND_SELECT = 4; + public static final int SQLITE_LIMIT_VDBE_OP = 5; + public static final int SQLITE_LIMIT_FUNCTION_ARG = 6; + public static final int SQLITE_LIMIT_ATTACHED = 7; + public static final int SQLITE_LIMIT_LIKE_PATTERN_LENGTH = 8; + public static final int SQLITE_LIMIT_VARIABLE_NUMBER = 9; + public static final int SQLITE_LIMIT_TRIGGER_DEPTH = 10; + public static final int SQLITE_LIMIT_WORKER_THREADS = 11; + + // open flags + + public static final int SQLITE_OPEN_READONLY = 0x00000001 /* Ok for sqlite3_open_v2() */; + public static final int SQLITE_OPEN_READWRITE = 0x00000002 /* Ok for sqlite3_open_v2() */; + public static final int SQLITE_OPEN_CREATE = 0x00000004 /* Ok for sqlite3_open_v2() */; + //public static final int SQLITE_OPEN_DELETEONCLOSE = 0x00000008 /* VFS only */; + //public static final int SQLITE_OPEN_EXCLUSIVE = 0x00000010 /* VFS only */; + //public static final int SQLITE_OPEN_AUTOPROXY = 0x00000020 /* VFS only */; + public static final int SQLITE_OPEN_URI = 0x00000040 /* Ok for sqlite3_open_v2() */; + public static final int SQLITE_OPEN_MEMORY = 0x00000080 /* Ok for sqlite3_open_v2() */; + //public static final int SQLITE_OPEN_MAIN_DB = 0x00000100 /* VFS only */; + //public static final int SQLITE_OPEN_TEMP_DB = 0x00000200 /* VFS only */; + //public static final int SQLITE_OPEN_TRANSIENT_DB = 0x00000400 /* VFS only */; + //public static final int SQLITE_OPEN_MAIN_JOURNAL = 0x00000800 /* VFS only */; + //public static final int SQLITE_OPEN_TEMP_JOURNAL = 0x00001000 /* VFS only */; + //public static final int SQLITE_OPEN_SUBJOURNAL = 0x00002000 /* VFS only */; + //public static final int SQLITE_OPEN_SUPER_JOURNAL = 0x00004000 /* VFS only */; + public static final int SQLITE_OPEN_NOMUTEX = 0x00008000 /* Ok for sqlite3_open_v2() */; + public static final int SQLITE_OPEN_FULLMUTEX = 0x00010000 /* Ok for sqlite3_open_v2() */; + public static final int SQLITE_OPEN_SHAREDCACHE = 0x00020000 /* Ok for sqlite3_open_v2() */; + public static final int SQLITE_OPEN_PRIVATECACHE = 0x00040000 /* Ok for sqlite3_open_v2() */; + //public static final int SQLITE_OPEN_WAL = 0x00080000 /* VFS only */; + public static final int SQLITE_OPEN_NOFOLLOW = 0x01000000 /* Ok for sqlite3_open_v2() */; + public static final int SQLITE_OPEN_EXRESCODE = 0x02000000 /* Extended result codes */; + + // prepare flags + public static final int SQLITE_PREPARE_PERSISTENT = 1; + public static final int SQLITE_PREPARE_NO_VTAB = 4; + + // result codes + public static final int SQLITE_OK = 0; + public static final int SQLITE_ERROR = 1; + public static final int SQLITE_INTERNAL = 2; + public static final int SQLITE_PERM = 3; + public static final int SQLITE_ABORT = 4; + public static final int SQLITE_BUSY = 5; + public static final int SQLITE_LOCKED = 6; + public static final int SQLITE_NOMEM = 7; + public static final int SQLITE_READONLY = 8; + public static final int SQLITE_INTERRUPT = 9; + public static final int SQLITE_IOERR = 10; + public static final int SQLITE_CORRUPT = 11; + public static final int SQLITE_NOTFOUND = 12; + public static final int SQLITE_FULL = 13; + public static final int SQLITE_CANTOPEN = 14; + public static final int SQLITE_PROTOCOL = 15; + public static final int SQLITE_EMPTY = 16; + public static final int SQLITE_SCHEMA = 17; + public static final int SQLITE_TOOBIG = 18; + public static final int SQLITE_CONSTRAINT = 19; + public static final int SQLITE_MISMATCH = 20; + public static final int SQLITE_MISUSE = 21; + public static final int SQLITE_NOLFS = 22; + public static final int SQLITE_AUTH = 23; + public static final int SQLITE_FORMAT = 24; + public static final int SQLITE_RANGE = 25; + public static final int SQLITE_NOTADB = 26; + public static final int SQLITE_NOTICE = 27; + public static final int SQLITE_WARNING = 28; + public static final int SQLITE_ROW = 100; + public static final int SQLITE_DONE = 101; + public static final int SQLITE_ERROR_MISSING_COLLSEQ = 257; + public static final int SQLITE_ERROR_RETRY = 513; + public static final int SQLITE_ERROR_SNAPSHOT = 769; + public static final int SQLITE_IOERR_READ = 266; + public static final int SQLITE_IOERR_SHORT_READ = 522; + public static final int SQLITE_IOERR_WRITE = 778; + public static final int SQLITE_IOERR_FSYNC = 1034; + public static final int SQLITE_IOERR_DIR_FSYNC = 1290; + public static final int SQLITE_IOERR_TRUNCATE = 1546; + public static final int SQLITE_IOERR_FSTAT = 1802; + public static final int SQLITE_IOERR_UNLOCK = 2058; + public static final int SQLITE_IOERR_RDLOCK = 2314; + public static final int SQLITE_IOERR_DELETE = 2570; + public static final int SQLITE_IOERR_BLOCKED = 2826; + public static final int SQLITE_IOERR_NOMEM = 3082; + public static final int SQLITE_IOERR_ACCESS = 3338; + public static final int SQLITE_IOERR_CHECKRESERVEDLOCK = 3594; + public static final int SQLITE_IOERR_LOCK = 3850; + public static final int SQLITE_IOERR_CLOSE = 4106; + public static final int SQLITE_IOERR_DIR_CLOSE = 4362; + public static final int SQLITE_IOERR_SHMOPEN = 4618; + public static final int SQLITE_IOERR_SHMSIZE = 4874; + public static final int SQLITE_IOERR_SHMLOCK = 5130; + public static final int SQLITE_IOERR_SHMMAP = 5386; + public static final int SQLITE_IOERR_SEEK = 5642; + public static final int SQLITE_IOERR_DELETE_NOENT = 5898; + public static final int SQLITE_IOERR_MMAP = 6154; + public static final int SQLITE_IOERR_GETTEMPPATH = 6410; + public static final int SQLITE_IOERR_CONVPATH = 6666; + public static final int SQLITE_IOERR_VNODE = 6922; + public static final int SQLITE_IOERR_AUTH = 7178; + public static final int SQLITE_IOERR_BEGIN_ATOMIC = 7434; + public static final int SQLITE_IOERR_COMMIT_ATOMIC = 7690; + public static final int SQLITE_IOERR_ROLLBACK_ATOMIC = 7946; + public static final int SQLITE_IOERR_DATA = 8202; + public static final int SQLITE_IOERR_CORRUPTFS = 8458; + public static final int SQLITE_LOCKED_SHAREDCACHE = 262; + public static final int SQLITE_LOCKED_VTAB = 518; + public static final int SQLITE_BUSY_RECOVERY = 261; + public static final int SQLITE_BUSY_SNAPSHOT = 517; + public static final int SQLITE_BUSY_TIMEOUT = 773; + public static final int SQLITE_CANTOPEN_NOTEMPDIR = 270; + public static final int SQLITE_CANTOPEN_ISDIR = 526; + public static final int SQLITE_CANTOPEN_FULLPATH = 782; + public static final int SQLITE_CANTOPEN_CONVPATH = 1038; + public static final int SQLITE_CANTOPEN_SYMLINK = 1550; + public static final int SQLITE_CORRUPT_VTAB = 267; + public static final int SQLITE_CORRUPT_SEQUENCE = 523; + public static final int SQLITE_CORRUPT_INDEX = 779; + public static final int SQLITE_READONLY_RECOVERY = 264; + public static final int SQLITE_READONLY_CANTLOCK = 520; + public static final int SQLITE_READONLY_ROLLBACK = 776; + public static final int SQLITE_READONLY_DBMOVED = 1032; + public static final int SQLITE_READONLY_CANTINIT = 1288; + public static final int SQLITE_READONLY_DIRECTORY = 1544; + public static final int SQLITE_ABORT_ROLLBACK = 516; + public static final int SQLITE_CONSTRAINT_CHECK = 275; + public static final int SQLITE_CONSTRAINT_COMMITHOOK = 531; + public static final int SQLITE_CONSTRAINT_FOREIGNKEY = 787; + public static final int SQLITE_CONSTRAINT_FUNCTION = 1043; + public static final int SQLITE_CONSTRAINT_NOTNULL = 1299; + public static final int SQLITE_CONSTRAINT_PRIMARYKEY = 1555; + public static final int SQLITE_CONSTRAINT_TRIGGER = 1811; + public static final int SQLITE_CONSTRAINT_UNIQUE = 2067; + public static final int SQLITE_CONSTRAINT_VTAB = 2323; + public static final int SQLITE_CONSTRAINT_ROWID = 2579; + public static final int SQLITE_CONSTRAINT_PINNED = 2835; + public static final int SQLITE_CONSTRAINT_DATATYPE = 3091; + public static final int SQLITE_NOTICE_RECOVER_WAL = 283; + public static final int SQLITE_NOTICE_RECOVER_ROLLBACK = 539; + public static final int SQLITE_WARNING_AUTOINDEX = 284; + public static final int SQLITE_AUTH_USER = 279; + public static final int SQLITE_OK_LOAD_PERMANENTLY = 256; + + // serialize + public static final int SQLITE_SERIALIZE_NOCOPY = 1; + public static final int SQLITE_DESERIALIZE_FREEONCLOSE = 1; + public static final int SQLITE_DESERIALIZE_READONLY = 4; + public static final int SQLITE_DESERIALIZE_RESIZEABLE = 2; + + // session + public static final int SQLITE_SESSION_CONFIG_STRMSIZE = 1; + public static final int SQLITE_SESSION_OBJCONFIG_SIZE = 1; + + // sqlite3 status + public static final int SQLITE_STATUS_MEMORY_USED = 0; + public static final int SQLITE_STATUS_PAGECACHE_USED = 1; + public static final int SQLITE_STATUS_PAGECACHE_OVERFLOW = 2; + public static final int SQLITE_STATUS_MALLOC_SIZE = 5; + public static final int SQLITE_STATUS_PARSER_STACK = 6; + public static final int SQLITE_STATUS_PAGECACHE_SIZE = 7; + public static final int SQLITE_STATUS_MALLOC_COUNT = 9; + + // stmt status + public static final int SQLITE_STMTSTATUS_FULLSCAN_STEP = 1; + public static final int SQLITE_STMTSTATUS_SORT = 2; + public static final int SQLITE_STMTSTATUS_AUTOINDEX = 3; + public static final int SQLITE_STMTSTATUS_VM_STEP = 4; + public static final int SQLITE_STMTSTATUS_REPREPARE = 5; + public static final int SQLITE_STMTSTATUS_RUN = 6; + public static final int SQLITE_STMTSTATUS_FILTER_MISS = 7; + public static final int SQLITE_STMTSTATUS_FILTER_HIT = 8; + public static final int SQLITE_STMTSTATUS_MEMUSED = 99; + + // sync flags + public static final int SQLITE_SYNC_NORMAL = 2; + public static final int SQLITE_SYNC_FULL = 3; + public static final int SQLITE_SYNC_DATAONLY = 16; + + // tracing flags + public static final int SQLITE_TRACE_STMT = 1; + public static final int SQLITE_TRACE_PROFILE = 2; + public static final int SQLITE_TRACE_ROW = 4; + public static final int SQLITE_TRACE_CLOSE = 8; + + // transaction state + public static final int SQLITE_TXN_NONE = 0; + public static final int SQLITE_TXN_READ = 1; + public static final int SQLITE_TXN_WRITE = 2; + + // udf flags + public static final int SQLITE_DETERMINISTIC = 0x000000800; + public static final int SQLITE_DIRECTONLY = 0x000080000; + public static final int SQLITE_SUBTYPE = 0x000100000; + public static final int SQLITE_INNOCUOUS = 0x000200000; + public static final int SQLITE_RESULT_SUBTYPE = 0x001000000; + + // virtual tables + public static final int SQLITE_INDEX_SCAN_UNIQUE = 1; + public static final int SQLITE_INDEX_CONSTRAINT_EQ = 2; + public static final int SQLITE_INDEX_CONSTRAINT_GT = 4; + public static final int SQLITE_INDEX_CONSTRAINT_LE = 8; + public static final int SQLITE_INDEX_CONSTRAINT_LT = 16; + public static final int SQLITE_INDEX_CONSTRAINT_GE = 32; + public static final int SQLITE_INDEX_CONSTRAINT_MATCH = 64; + public static final int SQLITE_INDEX_CONSTRAINT_LIKE = 65; + public static final int SQLITE_INDEX_CONSTRAINT_GLOB = 66; + public static final int SQLITE_INDEX_CONSTRAINT_REGEXP = 67; + public static final int SQLITE_INDEX_CONSTRAINT_NE = 68; + public static final int SQLITE_INDEX_CONSTRAINT_ISNOT = 69; + public static final int SQLITE_INDEX_CONSTRAINT_ISNOTNULL = 70; + public static final int SQLITE_INDEX_CONSTRAINT_ISNULL = 71; + public static final int SQLITE_INDEX_CONSTRAINT_IS = 72; + public static final int SQLITE_INDEX_CONSTRAINT_LIMIT = 73; + public static final int SQLITE_INDEX_CONSTRAINT_OFFSET = 74; + public static final int SQLITE_INDEX_CONSTRAINT_FUNCTION = 150; + public static final int SQLITE_VTAB_CONSTRAINT_SUPPORT = 1; + public static final int SQLITE_VTAB_INNOCUOUS = 2; + public static final int SQLITE_VTAB_DIRECTONLY = 3; + public static final int SQLITE_VTAB_USES_ALL_SCHEMAS = 4; + public static final int SQLITE_ROLLBACK = 1; + public static final int SQLITE_FAIL = 3; + public static final int SQLITE_REPLACE = 5; + static { + init(); + } + /* Must come after static init(). */ + private static final boolean JNI_SUPPORTS_NIO = sqlite3_jni_supports_nio(); +} diff --git a/ext/jni/src/org/sqlite/jni/capi/CallbackProxy.java b/ext/jni/src/org/sqlite/jni/capi/CallbackProxy.java new file mode 100644 index 0000000000..04000a3f31 --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/capi/CallbackProxy.java @@ -0,0 +1,45 @@ +/* +** 2023-08-25 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file is part of the JNI bindings for the sqlite3 C API. +*/ +package org.sqlite.jni.capi; +/** + This marker interface exists solely for use as a documentation and + class-grouping tool. It should be applied to interfaces or + classes which have a call() method implementing some specific + callback interface on behalf of the C library. + +

    Unless very explicitly documented otherwise, callbacks must + never throw. Any which do throw but should not might trigger debug + output regarding the error, but the exception will not be + propagated. For callback interfaces which support returning error + info to the core, the JNI binding will convert any exceptions to + C-level error information. For callback interfaces which do not + support returning error information, all exceptions will + necessarily be suppressed in order to retain the C-style no-throw + semantics and avoid invoking undefined behavior in the C layer. + +

    Callbacks of this style follow a common naming convention: + +

    1) They use the UpperCamelCase form of the C function they're + proxying for, minus the {@code sqlite3_} prefix, plus a {@code + Callback} suffix. e.g. {@code sqlite3_busy_handler()}'s callback is + named {@code BusyHandlerCallback}. Exceptions are made where that + would potentially be ambiguous, e.g. {@link ConfigSqlLogCallback} + instead of {@code ConfigCallback} because the {@code + sqlite3_config()} interface may need to support more callback types + in the future. + +

    2) They all have a {@code call()} method but its signature is + callback-specific. +*/ +public interface CallbackProxy {} diff --git a/ext/jni/src/org/sqlite/jni/capi/CollationCallback.java b/ext/jni/src/org/sqlite/jni/capi/CollationCallback.java new file mode 100644 index 0000000000..ed8bd09475 --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/capi/CollationCallback.java @@ -0,0 +1,35 @@ +/* +** 2023-08-25 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file is part of the JNI bindings for the sqlite3 C API. +*/ +package org.sqlite.jni.capi; +import org.sqlite.jni.annotation.NotNull; + +/** + Callback for use with {@link CApi#sqlite3_create_collation}. + + @see AbstractCollationCallback +*/ +public interface CollationCallback + extends CallbackProxy, XDestroyCallback { + /** + Must compare the given byte arrays and return the result using + {@code memcmp()} semantics. + */ + int call(@NotNull byte[] lhs, @NotNull byte[] rhs); + + /** + Called by SQLite when the collation is destroyed. If a collation + requires custom cleanup, override this method. + */ + void xDestroy(); +} diff --git a/ext/jni/src/org/sqlite/jni/capi/CollationNeededCallback.java b/ext/jni/src/org/sqlite/jni/capi/CollationNeededCallback.java new file mode 100644 index 0000000000..ffd7fa94ab --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/capi/CollationNeededCallback.java @@ -0,0 +1,29 @@ +/* +** 2023-08-25 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file is part of the JNI bindings for the sqlite3 C API. +*/ +package org.sqlite.jni.capi; + +/** + Callback for use with {@link CApi#sqlite3_collation_needed}. +*/ +public interface CollationNeededCallback extends CallbackProxy { + /** + Has the same semantics as the C-level sqlite3_create_collation() + callback. + +

    Because the C API has no mechanism for reporting errors + from this callbacks, any exceptions thrown by this callback + are suppressed. + */ + void call(sqlite3 db, int eTextRep, String collationName); +} diff --git a/ext/jni/src/org/sqlite/jni/capi/CommitHookCallback.java b/ext/jni/src/org/sqlite/jni/capi/CommitHookCallback.java new file mode 100644 index 0000000000..e1e55c78d2 --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/capi/CommitHookCallback.java @@ -0,0 +1,26 @@ +/* +** 2023-08-25 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file is part of the JNI bindings for the sqlite3 C API. +*/ +package org.sqlite.jni.capi; + +/** + Callback for use with {@link CApi#sqlite3_commit_hook}. +*/ +public interface CommitHookCallback extends CallbackProxy { + /** + Works as documented for the C-level sqlite3_commit_hook() + callback. If it throws, the exception is translated into + a db-level error. + */ + int call(); +} diff --git a/ext/jni/src/org/sqlite/jni/capi/ConfigLogCallback.java b/ext/jni/src/org/sqlite/jni/capi/ConfigLogCallback.java new file mode 100644 index 0000000000..6513b0730d --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/capi/ConfigLogCallback.java @@ -0,0 +1,25 @@ +/* +** 2023-08-23 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file is part of the JNI bindings for the sqlite3 C API. +*/ +package org.sqlite.jni.capi; + +/** + A callback for use with sqlite3_config(). +*/ +public interface ConfigLogCallback { + /** + Must function as described for a C-level callback for + {@link CApi#sqlite3_config(ConfigLogCallback)}, with the slight signature change. + */ + void call(int errCode, String msg); +} diff --git a/ext/jni/src/org/sqlite/jni/capi/ConfigSqlLogCallback.java b/ext/jni/src/org/sqlite/jni/capi/ConfigSqlLogCallback.java new file mode 100644 index 0000000000..a5530b49a4 --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/capi/ConfigSqlLogCallback.java @@ -0,0 +1,25 @@ +/* +** 2023-08-23 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file is part of the JNI bindings for the sqlite3 C API. +*/ +package org.sqlite.jni.capi; + +/** + A callback for use with sqlite3_config(). +*/ +public interface ConfigSqlLogCallback { + /** + Must function as described for a C-level callback for + {@link CApi#sqlite3_config(ConfigSqlLogCallback)}, with the slight signature change. + */ + void call(sqlite3 db, String msg, int msgType ); +} diff --git a/ext/jni/src/org/sqlite/jni/capi/NativePointerHolder.java b/ext/jni/src/org/sqlite/jni/capi/NativePointerHolder.java new file mode 100644 index 0000000000..e82909e424 --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/capi/NativePointerHolder.java @@ -0,0 +1,46 @@ +/* +** 2023-07-21 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file is part of the JNI bindings for the sqlite3 C API. +*/ +package org.sqlite.jni.capi; + +/** + A helper for passing pointers between JNI C code and Java, in + particular for output pointers of high-level object types in the + sqlite3 C API, e.g. (sqlite3**) and (sqlite3_stmt**). This is + intended to be subclassed and the ContextType is intended to be the + class which is doing the subclassing. The intent of the ContextType + is strictly to provide some level of type safety by avoiding that + NativePointerHolder is not inadvertently passed to an incompatible + function signature. + + These objects do not own the pointer they refer to. They are + intended simply to communicate that pointer between C and Java. +*/ +public class NativePointerHolder { + //! Only set from JNI, where access permissions don't matter. + private volatile long nativePointer = 0; + /** + For use ONLY by package-level APIs which act as proxies for + close/finalize operations. Such ops must call this to zero out + the pointer so that this object is not carrying a stale + pointer. This function returns the prior value of the pointer and + sets it to 0. + */ + final long clearNativePointer() { + final long rv = nativePointer; + nativePointer= 0; + return rv; + } + + public final long getNativePointer(){ return nativePointer; } +} diff --git a/ext/jni/src/org/sqlite/jni/capi/OutputPointer.java b/ext/jni/src/org/sqlite/jni/capi/OutputPointer.java new file mode 100644 index 0000000000..f50d0c57cb --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/capi/OutputPointer.java @@ -0,0 +1,253 @@ +/* +** 2023-07-21 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file is part of the JNI bindings for the sqlite3 C API. +*/ +package org.sqlite.jni.capi; + +/** + Helper classes for handling JNI output pointers. + +

    We do not use a generic OutputPointer because working with those + from the native JNI code is unduly quirky due to a lack of + autoboxing at that level. + +

    The usage is similar for all of these types: + +

    {@code
    +   OutputPointer.sqlite3 out = new OutputPointer.sqlite3();
    +   assert( null==out.get() );
    +   int rc = sqlite3_open(":memory:", out);
    +   if( 0!=rc ) ... error;
    +   assert( null!=out.get() );
    +   sqlite3 db = out.take();
    +   assert( null==out.get() );
    +   }
    + +

    With the minor exception that the primitive types permit direct + access to the object's value via the `value` property, whereas the + JNI-level opaque types do not permit client-level code to set that + property. + +

    Warning: do not share instances of these classes across + threads. Doing so may lead to corrupting sqlite3-internal state. +*/ +public final class OutputPointer { + + /** + Output pointer for use with routines, such as sqlite3_open(), + which return a database handle via an output pointer. These + pointers can only be set by the JNI layer, not by client-level + code. + */ + public static final class sqlite3 { + private org.sqlite.jni.capi.sqlite3 value; + /** Initializes with a null value. */ + public sqlite3(){value = null;} + /** Sets the current value to null. */ + public void clear(){value = null;} + /** Returns the current value. */ + public org.sqlite.jni.capi.sqlite3 get(){return value;} + /** Equivalent to calling get() then clear(). */ + public org.sqlite.jni.capi.sqlite3 take(){ + final org.sqlite.jni.capi.sqlite3 v = value; + value = null; + return v; + } + } + + /** + Output pointer for sqlite3_blob_open(). These + pointers can only be set by the JNI layer, not by client-level + code. + */ + public static final class sqlite3_blob { + private org.sqlite.jni.capi.sqlite3_blob value; + /** Initializes with a null value. */ + public sqlite3_blob(){value = null;} + /** Sets the current value to null. */ + public void clear(){value = null;} + /** Returns the current value. */ + public org.sqlite.jni.capi.sqlite3_blob get(){return value;} + /** Equivalent to calling get() then clear(). */ + public org.sqlite.jni.capi.sqlite3_blob take(){ + final org.sqlite.jni.capi.sqlite3_blob v = value; + value = null; + return v; + } + } + + /** + Output pointer for use with routines, such as sqlite3_prepare(), + which return a statement handle via an output pointer. These + pointers can only be set by the JNI layer, not by client-level + code. + */ + public static final class sqlite3_stmt { + private org.sqlite.jni.capi.sqlite3_stmt value; + /** Initializes with a null value. */ + public sqlite3_stmt(){value = null;} + /** Sets the current value to null. */ + public void clear(){value = null;} + /** Returns the current value. */ + public org.sqlite.jni.capi.sqlite3_stmt get(){return value;} + /** Equivalent to calling get() then clear(). */ + public org.sqlite.jni.capi.sqlite3_stmt take(){ + final org.sqlite.jni.capi.sqlite3_stmt v = value; + value = null; + return v; + } + } + + /** + Output pointer for use with routines, such as sqlite3_prepupdate_new(), + which return a sqlite3_value handle via an output pointer. These + pointers can only be set by the JNI layer, not by client-level + code. + */ + public static final class sqlite3_value { + private org.sqlite.jni.capi.sqlite3_value value; + /** Initializes with a null value. */ + public sqlite3_value(){value = null;} + /** Sets the current value to null. */ + public void clear(){value = null;} + /** Returns the current value. */ + public org.sqlite.jni.capi.sqlite3_value get(){return value;} + /** Equivalent to calling get() then clear(). */ + public org.sqlite.jni.capi.sqlite3_value take(){ + final org.sqlite.jni.capi.sqlite3_value v = value; + value = null; + return v; + } + } + + /** + Output pointer for use with native routines which return booleans + via integer output pointers. + */ + public static final class Bool { + /** + This is public for ease of use. Accessors are provided for + consistency with the higher-level types. + */ + public boolean value; + /** Initializes with the value 0. */ + public Bool(){this(false);} + /** Initializes with the value v. */ + public Bool(boolean v){value = v;} + /** Returns the current value. */ + public boolean get(){return value;} + /** Sets the current value to v. */ + public void set(boolean v){value = v;} + } + + /** + Output pointer for use with native routines which return integers via + output pointers. + */ + public static final class Int32 { + /** + This is public for ease of use. Accessors are provided for + consistency with the higher-level types. + */ + public int value; + /** Initializes with the value 0. */ + public Int32(){this(0);} + /** Initializes with the value v. */ + public Int32(int v){value = v;} + /** Returns the current value. */ + public int get(){return value;} + /** Sets the current value to v. */ + public void set(int v){value = v;} + } + + /** + Output pointer for use with native routines which return 64-bit integers + via output pointers. + */ + public static final class Int64 { + /** + This is public for ease of use. Accessors are provided for + consistency with the higher-level types. + */ + public long value; + /** Initializes with the value 0. */ + public Int64(){this(0);} + /** Initializes with the value v. */ + public Int64(long v){value = v;} + /** Returns the current value. */ + public long get(){return value;} + /** Sets the current value. */ + public void set(long v){value = v;} + } + + /** + Output pointer for use with native routines which return strings via + output pointers. + */ + public static final class String { + /** + This is public for ease of use. Accessors are provided for + consistency with the higher-level types. + */ + public java.lang.String value; + /** Initializes with a null value. */ + public String(){this(null);} + /** Initializes with the value v. */ + public String(java.lang.String v){value = v;} + /** Returns the current value. */ + public java.lang.String get(){return value;} + /** Sets the current value. */ + public void set(java.lang.String v){value = v;} + } + + /** + Output pointer for use with native routines which return byte + arrays via output pointers. + */ + public static final class ByteArray { + /** + This is public for ease of use. Accessors are provided for + consistency with the higher-level types. + */ + public byte[] value; + /** Initializes with the value null. */ + public ByteArray(){this(null);} + /** Initializes with the value v. */ + public ByteArray(byte[] v){value = v;} + /** Returns the current value. */ + public byte[] get(){return value;} + /** Sets the current value. */ + public void set(byte[] v){value = v;} + } + + /** + Output pointer for use with native routines which return + blobs via java.nio.ByteBuffer. + + See {@link org.sqlite.jni.capi.CApi#sqlite3_jni_supports_nio} + */ + public static final class ByteBuffer { + /** + This is public for ease of use. Accessors are provided for + consistency with the higher-level types. + */ + public java.nio.ByteBuffer value; + /** Initializes with the value null. */ + public ByteBuffer(){this(null);} + /** Initializes with the value v. */ + public ByteBuffer(java.nio.ByteBuffer v){value = v;} + /** Returns the current value. */ + public java.nio.ByteBuffer get(){return value;} + /** Sets the current value. */ + public void set(java.nio.ByteBuffer v){value = v;} + } +} diff --git a/ext/jni/src/org/sqlite/jni/capi/PrepareMultiCallback.java b/ext/jni/src/org/sqlite/jni/capi/PrepareMultiCallback.java new file mode 100644 index 0000000000..35bb069c49 --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/capi/PrepareMultiCallback.java @@ -0,0 +1,81 @@ +/* +** 2023-09-13 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file is part of the JNI bindings for the sqlite3 C API. +*/ +package org.sqlite.jni.capi; + +/** + Callback for use with {@link CApi#sqlite3_prepare_multi}. +*/ +public interface PrepareMultiCallback extends CallbackProxy { + + /** + Gets passed a sqlite3_stmt which it may handle in arbitrary ways, + transferring ownership of it to this function. + + sqlite3_prepare_multi() will _not_ finalize st - it is up + to the call() implementation how st is handled. + + Must return 0 on success or an SQLITE_... code on error. If it + throws, sqlite3_prepare_multi() will transform the exception into + a db-level error in order to retain the C-style error semantics + of the API. + + See the {@link Finalize} class for a wrapper which finalizes the + statement after calling a proxy PrepareMultiCallback. + */ + int call(sqlite3_stmt st); + + /** + A PrepareMultiCallback impl which wraps a separate impl and finalizes + any sqlite3_stmt passed to its callback. + */ + public static final class Finalize implements PrepareMultiCallback { + private final PrepareMultiCallback p; + /** + p is the proxy to call() when this.call() is called. + */ + public Finalize( PrepareMultiCallback p ){ + this.p = p; + } + /** + Calls the call() method of the proxied callback and either returns its + result or propagates an exception. Either way, it passes its argument to + sqlite3_finalize() before returning. + */ + @Override public int call(sqlite3_stmt st){ + try { + return this.p.call(st); + }finally{ + CApi.sqlite3_finalize(st); + } + } + } + + /** + A PrepareMultiCallback impl which steps entirely through a result set, + ignoring all non-error results. + */ + final class StepAll implements PrepareMultiCallback { + public StepAll(){} + /** + Calls sqlite3_step() on st until it returns something other than + SQLITE_ROW. If the final result is SQLITE_DONE then 0 is returned, + else the result of the final step is returned. + */ + @Override public int call(sqlite3_stmt st){ + int rc = CApi.SQLITE_DONE; + while( CApi.SQLITE_ROW == (rc = CApi.sqlite3_step(st)) ){} + return CApi.SQLITE_DONE==rc ? 0 : rc; + } + } +} diff --git a/ext/jni/src/org/sqlite/jni/capi/PreupdateHookCallback.java b/ext/jni/src/org/sqlite/jni/capi/PreupdateHookCallback.java new file mode 100644 index 0000000000..38f7c5613e --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/capi/PreupdateHookCallback.java @@ -0,0 +1,27 @@ +/* +** 2023-08-25 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file is part of the JNI bindings for the sqlite3 C API. +*/ +package org.sqlite.jni.capi; + +/** + Callback for use with {@link CApi#sqlite3_preupdate_hook}. +*/ +public interface PreupdateHookCallback extends CallbackProxy { + /** + Must function as described for the C-level sqlite3_preupdate_hook() + callback. If it throws, the exception is translated to a + db-level error and the exception is suppressed. + */ + void call(sqlite3 db, int op, String dbName, String dbTable, + long iKey1, long iKey2 ); +} diff --git a/ext/jni/src/org/sqlite/jni/capi/ProgressHandlerCallback.java b/ext/jni/src/org/sqlite/jni/capi/ProgressHandlerCallback.java new file mode 100644 index 0000000000..464baa2e3d --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/capi/ProgressHandlerCallback.java @@ -0,0 +1,27 @@ +/* +** 2023-08-25 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file is part of the JNI bindings for the sqlite3 C API. +*/ +package org.sqlite.jni.capi; + +/** + Callback for use with {@link CApi#sqlite3_progress_handler}. +*/ +public interface ProgressHandlerCallback extends CallbackProxy { + /** + Works as documented for the C-level sqlite3_progress_handler() callback. + +

    If it throws, the exception message is passed on to the db and + the exception is suppressed. + */ + int call(); +} diff --git a/ext/jni/src/org/sqlite/jni/capi/ResultCode.java b/ext/jni/src/org/sqlite/jni/capi/ResultCode.java new file mode 100644 index 0000000000..5a8b2e6a18 --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/capi/ResultCode.java @@ -0,0 +1,155 @@ +/* +** 2023-07-21 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file is part of the JNI bindings for the sqlite3 C API. +*/ +package org.sqlite.jni.capi; + +/** + This enum contains all of the core and "extended" result codes used + by the sqlite3 library. It is provided not for use with the C-style + API (with which it won't work) but for higher-level code which may + find it useful to map SQLite result codes to human-readable names. +*/ +public enum ResultCode { + SQLITE_OK(CApi.SQLITE_OK), + SQLITE_ERROR(CApi.SQLITE_ERROR), + SQLITE_INTERNAL(CApi.SQLITE_INTERNAL), + SQLITE_PERM(CApi.SQLITE_PERM), + SQLITE_ABORT(CApi.SQLITE_ABORT), + SQLITE_BUSY(CApi.SQLITE_BUSY), + SQLITE_LOCKED(CApi.SQLITE_LOCKED), + SQLITE_NOMEM(CApi.SQLITE_NOMEM), + SQLITE_READONLY(CApi.SQLITE_READONLY), + SQLITE_INTERRUPT(CApi.SQLITE_INTERRUPT), + SQLITE_IOERR(CApi.SQLITE_IOERR), + SQLITE_CORRUPT(CApi.SQLITE_CORRUPT), + SQLITE_NOTFOUND(CApi.SQLITE_NOTFOUND), + SQLITE_FULL(CApi.SQLITE_FULL), + SQLITE_CANTOPEN(CApi.SQLITE_CANTOPEN), + SQLITE_PROTOCOL(CApi.SQLITE_PROTOCOL), + SQLITE_EMPTY(CApi.SQLITE_EMPTY), + SQLITE_SCHEMA(CApi.SQLITE_SCHEMA), + SQLITE_TOOBIG(CApi.SQLITE_TOOBIG), + SQLITE_CONSTRAINT(CApi.SQLITE_CONSTRAINT), + SQLITE_MISMATCH(CApi.SQLITE_MISMATCH), + SQLITE_MISUSE(CApi.SQLITE_MISUSE), + SQLITE_NOLFS(CApi.SQLITE_NOLFS), + SQLITE_AUTH(CApi.SQLITE_AUTH), + SQLITE_FORMAT(CApi.SQLITE_FORMAT), + SQLITE_RANGE(CApi.SQLITE_RANGE), + SQLITE_NOTADB(CApi.SQLITE_NOTADB), + SQLITE_NOTICE(CApi.SQLITE_NOTICE), + SQLITE_WARNING(CApi.SQLITE_WARNING), + SQLITE_ROW(CApi.SQLITE_ROW), + SQLITE_DONE(CApi.SQLITE_DONE), + SQLITE_ERROR_MISSING_COLLSEQ(CApi.SQLITE_ERROR_MISSING_COLLSEQ), + SQLITE_ERROR_RETRY(CApi.SQLITE_ERROR_RETRY), + SQLITE_ERROR_SNAPSHOT(CApi.SQLITE_ERROR_SNAPSHOT), + SQLITE_IOERR_READ(CApi.SQLITE_IOERR_READ), + SQLITE_IOERR_SHORT_READ(CApi.SQLITE_IOERR_SHORT_READ), + SQLITE_IOERR_WRITE(CApi.SQLITE_IOERR_WRITE), + SQLITE_IOERR_FSYNC(CApi.SQLITE_IOERR_FSYNC), + SQLITE_IOERR_DIR_FSYNC(CApi.SQLITE_IOERR_DIR_FSYNC), + SQLITE_IOERR_TRUNCATE(CApi.SQLITE_IOERR_TRUNCATE), + SQLITE_IOERR_FSTAT(CApi.SQLITE_IOERR_FSTAT), + SQLITE_IOERR_UNLOCK(CApi.SQLITE_IOERR_UNLOCK), + SQLITE_IOERR_RDLOCK(CApi.SQLITE_IOERR_RDLOCK), + SQLITE_IOERR_DELETE(CApi.SQLITE_IOERR_DELETE), + SQLITE_IOERR_BLOCKED(CApi.SQLITE_IOERR_BLOCKED), + SQLITE_IOERR_NOMEM(CApi.SQLITE_IOERR_NOMEM), + SQLITE_IOERR_ACCESS(CApi.SQLITE_IOERR_ACCESS), + SQLITE_IOERR_CHECKRESERVEDLOCK(CApi.SQLITE_IOERR_CHECKRESERVEDLOCK), + SQLITE_IOERR_LOCK(CApi.SQLITE_IOERR_LOCK), + SQLITE_IOERR_CLOSE(CApi.SQLITE_IOERR_CLOSE), + SQLITE_IOERR_DIR_CLOSE(CApi.SQLITE_IOERR_DIR_CLOSE), + SQLITE_IOERR_SHMOPEN(CApi.SQLITE_IOERR_SHMOPEN), + SQLITE_IOERR_SHMSIZE(CApi.SQLITE_IOERR_SHMSIZE), + SQLITE_IOERR_SHMLOCK(CApi.SQLITE_IOERR_SHMLOCK), + SQLITE_IOERR_SHMMAP(CApi.SQLITE_IOERR_SHMMAP), + SQLITE_IOERR_SEEK(CApi.SQLITE_IOERR_SEEK), + SQLITE_IOERR_DELETE_NOENT(CApi.SQLITE_IOERR_DELETE_NOENT), + SQLITE_IOERR_MMAP(CApi.SQLITE_IOERR_MMAP), + SQLITE_IOERR_GETTEMPPATH(CApi.SQLITE_IOERR_GETTEMPPATH), + SQLITE_IOERR_CONVPATH(CApi.SQLITE_IOERR_CONVPATH), + SQLITE_IOERR_VNODE(CApi.SQLITE_IOERR_VNODE), + SQLITE_IOERR_AUTH(CApi.SQLITE_IOERR_AUTH), + SQLITE_IOERR_BEGIN_ATOMIC(CApi.SQLITE_IOERR_BEGIN_ATOMIC), + SQLITE_IOERR_COMMIT_ATOMIC(CApi.SQLITE_IOERR_COMMIT_ATOMIC), + SQLITE_IOERR_ROLLBACK_ATOMIC(CApi.SQLITE_IOERR_ROLLBACK_ATOMIC), + SQLITE_IOERR_DATA(CApi.SQLITE_IOERR_DATA), + SQLITE_IOERR_CORRUPTFS(CApi.SQLITE_IOERR_CORRUPTFS), + SQLITE_LOCKED_SHAREDCACHE(CApi.SQLITE_LOCKED_SHAREDCACHE), + SQLITE_LOCKED_VTAB(CApi.SQLITE_LOCKED_VTAB), + SQLITE_BUSY_RECOVERY(CApi.SQLITE_BUSY_RECOVERY), + SQLITE_BUSY_SNAPSHOT(CApi.SQLITE_BUSY_SNAPSHOT), + SQLITE_BUSY_TIMEOUT(CApi.SQLITE_BUSY_TIMEOUT), + SQLITE_CANTOPEN_NOTEMPDIR(CApi.SQLITE_CANTOPEN_NOTEMPDIR), + SQLITE_CANTOPEN_ISDIR(CApi.SQLITE_CANTOPEN_ISDIR), + SQLITE_CANTOPEN_FULLPATH(CApi.SQLITE_CANTOPEN_FULLPATH), + SQLITE_CANTOPEN_CONVPATH(CApi.SQLITE_CANTOPEN_CONVPATH), + SQLITE_CANTOPEN_SYMLINK(CApi.SQLITE_CANTOPEN_SYMLINK), + SQLITE_CORRUPT_VTAB(CApi.SQLITE_CORRUPT_VTAB), + SQLITE_CORRUPT_SEQUENCE(CApi.SQLITE_CORRUPT_SEQUENCE), + SQLITE_CORRUPT_INDEX(CApi.SQLITE_CORRUPT_INDEX), + SQLITE_READONLY_RECOVERY(CApi.SQLITE_READONLY_RECOVERY), + SQLITE_READONLY_CANTLOCK(CApi.SQLITE_READONLY_CANTLOCK), + SQLITE_READONLY_ROLLBACK(CApi.SQLITE_READONLY_ROLLBACK), + SQLITE_READONLY_DBMOVED(CApi.SQLITE_READONLY_DBMOVED), + SQLITE_READONLY_CANTINIT(CApi.SQLITE_READONLY_CANTINIT), + SQLITE_READONLY_DIRECTORY(CApi.SQLITE_READONLY_DIRECTORY), + SQLITE_ABORT_ROLLBACK(CApi.SQLITE_ABORT_ROLLBACK), + SQLITE_CONSTRAINT_CHECK(CApi.SQLITE_CONSTRAINT_CHECK), + SQLITE_CONSTRAINT_COMMITHOOK(CApi.SQLITE_CONSTRAINT_COMMITHOOK), + SQLITE_CONSTRAINT_FOREIGNKEY(CApi.SQLITE_CONSTRAINT_FOREIGNKEY), + SQLITE_CONSTRAINT_FUNCTION(CApi.SQLITE_CONSTRAINT_FUNCTION), + SQLITE_CONSTRAINT_NOTNULL(CApi.SQLITE_CONSTRAINT_NOTNULL), + SQLITE_CONSTRAINT_PRIMARYKEY(CApi.SQLITE_CONSTRAINT_PRIMARYKEY), + SQLITE_CONSTRAINT_TRIGGER(CApi.SQLITE_CONSTRAINT_TRIGGER), + SQLITE_CONSTRAINT_UNIQUE(CApi.SQLITE_CONSTRAINT_UNIQUE), + SQLITE_CONSTRAINT_VTAB(CApi.SQLITE_CONSTRAINT_VTAB), + SQLITE_CONSTRAINT_ROWID(CApi.SQLITE_CONSTRAINT_ROWID), + SQLITE_CONSTRAINT_PINNED(CApi.SQLITE_CONSTRAINT_PINNED), + SQLITE_CONSTRAINT_DATATYPE(CApi.SQLITE_CONSTRAINT_DATATYPE), + SQLITE_NOTICE_RECOVER_WAL(CApi.SQLITE_NOTICE_RECOVER_WAL), + SQLITE_NOTICE_RECOVER_ROLLBACK(CApi.SQLITE_NOTICE_RECOVER_ROLLBACK), + SQLITE_WARNING_AUTOINDEX(CApi.SQLITE_WARNING_AUTOINDEX), + SQLITE_AUTH_USER(CApi.SQLITE_AUTH_USER), + SQLITE_OK_LOAD_PERMANENTLY(CApi.SQLITE_OK_LOAD_PERMANENTLY); + + public final int value; + + ResultCode(int rc){ + value = rc; + ResultCodeMap.set(rc, this); + } + + /** + Returns the entry from this enum for the given result code, or + null if no match is found. + */ + public static ResultCode getEntryForInt(int rc){ + return ResultCodeMap.get(rc); + } + + /** + Internal level of indirection required because we cannot initialize + static enum members in an enum before the enum constructor is + invoked. + */ + private static final class ResultCodeMap { + private static final java.util.Map i2e + = new java.util.HashMap<>(); + private static void set(int rc, ResultCode e){ i2e.put(rc, e); } + private static ResultCode get(int rc){ return i2e.get(rc); } + } + +} diff --git a/ext/jni/src/org/sqlite/jni/capi/RollbackHookCallback.java b/ext/jni/src/org/sqlite/jni/capi/RollbackHookCallback.java new file mode 100644 index 0000000000..cf9c4b6e7a --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/capi/RollbackHookCallback.java @@ -0,0 +1,26 @@ +/* +** 2023-08-25 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file is part of the JNI bindings for the sqlite3 C API. +*/ +package org.sqlite.jni.capi; + +/** + Callback for use with {@link CApi#sqlite3_rollback_hook}. +*/ +public interface RollbackHookCallback extends CallbackProxy { + /** + Must function as documented for the C-level sqlite3_rollback_hook() + callback. If it throws, the exception is translated into + a db-level error. + */ + void call(); +} diff --git a/ext/jni/src/org/sqlite/jni/capi/SQLFunction.java b/ext/jni/src/org/sqlite/jni/capi/SQLFunction.java new file mode 100644 index 0000000000..7ad1381a7a --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/capi/SQLFunction.java @@ -0,0 +1,36 @@ +/* +** 2023-07-22 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file is part of the JNI bindings for the sqlite3 C API. +*/ +package org.sqlite.jni.capi; + +/** + SQLFunction is used in conjunction with the + sqlite3_create_function() JNI-bound API to give that native code + access to the callback functions needed in order to implement SQL + functions in Java. + +

    + + This class is not used by itself, but is a marker base class. The + three UDF types are modelled by the inner classes Scalar, + Aggregate, and Window. Most simply, clients may subclass + those, or create anonymous classes from them, to implement + UDFs. Clients are free to create their own classes for use with + UDFs, so long as they conform to the public interfaces defined by + those three classes. The JNI layer only actively relies on the + SQLFunction base class and the method names and signatures used by + the UDF callback interfaces. +*/ +public interface SQLFunction { + +} diff --git a/ext/jni/src/org/sqlite/jni/capi/SQLTester.java b/ext/jni/src/org/sqlite/jni/capi/SQLTester.java new file mode 100644 index 0000000000..bc2e75f8be --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/capi/SQLTester.java @@ -0,0 +1,1449 @@ +/* +** 2023-08-08 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains the main application entry pointer for the +** SQLTester framework. +*/ +package org.sqlite.jni.capi; +import java.util.ArrayList; +import java.util.Arrays; +import java.nio.charset.StandardCharsets; +import java.util.regex.*; +import static org.sqlite.jni.capi.CApi.*; + +/** + Modes for how to escape (or not) column values and names from + SQLTester.execSql() to the result buffer output. +*/ +enum ResultBufferMode { + //! Do not append to result buffer + NONE, + //! Append output escaped. + ESCAPED, + //! Append output as-is + ASIS +} + +/** + Modes to specify how to emit multi-row output from + SQLTester.execSql() to the result buffer. +*/ +enum ResultRowMode { + //! Keep all result rows on one line, space-separated. + ONELINE, + //! Add a newline between each result row. + NEWLINE +} + +/** + Base exception type for test-related failures. +*/ +class SQLTesterException extends RuntimeException { + private boolean bFatal = false; + + SQLTesterException(String msg){ + super(msg); + } + + protected SQLTesterException(String msg, boolean fatal){ + super(msg); + bFatal = fatal; + } + + /** + Indicates whether the framework should consider this exception + type as immediately fatal to the test run or not. + */ + final boolean isFatal(){ return bFatal; } +} + +class DbException extends SQLTesterException { + DbException(sqlite3 db, int rc, boolean closeDb){ + super("DB error #"+rc+": "+sqlite3_errmsg(db),true); + if( closeDb ) sqlite3_close_v2(db); + } + DbException(sqlite3 db, int rc){ + this(db, rc, false); + } +} + +/** + Generic test-failed exception. + */ +class TestScriptFailed extends SQLTesterException { + public TestScriptFailed(TestScript ts, String msg){ + super(ts.getOutputPrefix()+": "+msg, true); + } +} + +/** + Thrown when an unknown test command is encountered in a script. +*/ +class UnknownCommand extends SQLTesterException { + public UnknownCommand(TestScript ts, String cmd){ + super(ts.getOutputPrefix()+": unknown command: "+cmd, false); + } +} + +/** + Thrown when an "incompatible directive" is found in a script. This + can be the presence of a C-preprocessor construct, specific + metadata tags within a test script's header, or specific test + constructs which are incompatible with this particular + implementation. +*/ +class IncompatibleDirective extends SQLTesterException { + public IncompatibleDirective(TestScript ts, String line){ + super(ts.getOutputPrefix()+": incompatible directive: "+line, false); + } +} + +/** + Console output utility class. +*/ +class Outer { + private int verbosity = 0; + + static void out(Object val){ + System.out.print(val); + } + + Outer out(Object... vals){ + for(Object v : vals) out(v); + return this; + } + + Outer outln(Object... vals){ + out(vals).out("\n"); + return this; + } + + Outer verbose(Object... vals){ + if(verbosity>0){ + out("VERBOSE",(verbosity>1 ? "+: " : ": ")).outln(vals); + } + return this; + } + + void setVerbosity(int level){ + verbosity = level; + } + + int getVerbosity(){ + return verbosity; + } + + public boolean isVerbose(){return verbosity > 0;} + +} + +/** +

    This class provides an application which aims to implement the + rudimentary SQL-driven test tool described in the accompanying + {@code test-script-interpreter.md}. + +

    This class is an internal testing tool, not part of the public + interface but is (A) in the same package as the library because + access permissions require it to be so and (B) the JDK8 javadoc + offers no way to filter individual classes out of the doc + generation process (it can only exclude packages, but see (A)). + +

    An instance of this application provides a core set of services + which TestScript instances use for processing testing logic. + TestScripts, in turn, delegate the concrete test work to Command + objects, which the TestScript parses on their behalf. +*/ +public class SQLTester { + //! List of input script files. + private final java.util.List listInFiles = new ArrayList<>(); + //! Console output utility. + private final Outer outer = new Outer(); + //! Test input buffer. + private final StringBuilder inputBuffer = new StringBuilder(); + //! Test result buffer. + private final StringBuilder resultBuffer = new StringBuilder(); + //! Buffer for REQUIRED_PROPERTIES pragmas. + private final StringBuilder dbInitSql = new StringBuilder(); + //! Output representation of SQL NULL. + private String nullView = "nil"; + //! Total tests run. + private int nTotalTest = 0; + //! Total test script files run. + private int nTestFile = 0; + //! Number of scripts which were aborted. + private int nAbortedScript = 0; + //! Incremented by test case handlers + private int nTest = 0; + //! True to enable column name output from execSql() + private boolean emitColNames; + //! True to keep going regardless of how a test fails. + private boolean keepGoing = false; + //! The list of available db handles. + private final sqlite3[] aDb = new sqlite3[7]; + //! Index into aDb of the current db. + private int iCurrentDb = 0; + //! Name of the default db, re-created for each script. + private final String initialDbName = "test.db"; + + + public SQLTester(){ + reset(); + } + + void setVerbosity(int level){ + this.outer.setVerbosity( level ); + } + int getVerbosity(){ + return this.outer.getVerbosity(); + } + boolean isVerbose(){ + return this.outer.isVerbose(); + } + + void outputColumnNames(boolean b){ emitColNames = b; } + + void verbose(Object... vals){ + outer.verbose(vals); + } + + void outln(Object... vals){ + outer.outln(vals); + } + + void out(Object... vals){ + outer.out(vals); + } + + //! Adds the given test script to the to-test list. + public void addTestScript(String filename){ + listInFiles.add(filename); + //verbose("Added file ",filename); + } + + private void setupInitialDb() throws DbException { + if( null==aDb[0] ){ + Util.unlink(initialDbName); + openDb(0, initialDbName, true); + }else{ + outln("WARNING: setupInitialDb() unexpectedly ", + "triggered while it is opened."); + } + } + + static final String[] startEmoji = { + "🚴", "🏄", "🏇", "🤸", "⛹", "🏊", "⛷", "🧗", "🏋" + }; + static final int nStartEmoji = startEmoji.length; + static int iStartEmoji = 0; + + private static String nextStartEmoji(){ + return startEmoji[iStartEmoji++ % nStartEmoji]; + } + + public void runTests() throws Exception { + final long tStart = System.currentTimeMillis(); + for(String f : listInFiles){ + reset(); + ++nTestFile; + final TestScript ts = new TestScript(f); + outln(nextStartEmoji(), " starting [",f,"]"); + boolean threw = false; + final long timeStart = System.currentTimeMillis(); + try{ + ts.run(this); + }catch(SQLTesterException e){ + threw = true; + outln("🔥EXCEPTION: ",e.getClass().getSimpleName(),": ",e.getMessage()); + ++nAbortedScript; + if( keepGoing ) outln("Continuing anyway because of the keep-going option."); + else if( e.isFatal() ) throw e; + }finally{ + final long timeEnd = System.currentTimeMillis(); + outln("🏁",(threw ? "❌" : "✅")," ",nTest," test(s) in ", + (timeEnd-timeStart),"ms."); + } + } + final long tEnd = System.currentTimeMillis(); + outln("Total run-time: ",(tEnd-tStart),"ms"); + Util.unlink(initialDbName); + } + + private StringBuilder clearBuffer(StringBuilder b){ + b.setLength(0); + return b; + } + + StringBuilder clearInputBuffer(){ + return clearBuffer(inputBuffer); + } + + StringBuilder clearResultBuffer(){ + return clearBuffer(resultBuffer); + } + + StringBuilder getInputBuffer(){ return inputBuffer; } + + void appendInput(String n, boolean addNL){ + inputBuffer.append(n); + if(addNL) inputBuffer.append('\n'); + } + + void appendResult(String n, boolean addNL){ + resultBuffer.append(n); + if(addNL) resultBuffer.append('\n'); + } + + void appendDbInitSql(String n) throws DbException { + dbInitSql.append(n).append('\n'); + if( null!=getCurrentDb() ){ + //outln("RUNNING DB INIT CODE: ",n); + execSql(null, true, ResultBufferMode.NONE, null, n); + } + } + String getDbInitSql(){ return dbInitSql.toString(); } + + String getInputText(){ return inputBuffer.toString(); } + + String getResultText(){ return resultBuffer.toString(); } + + private String takeBuffer(StringBuilder b){ + final String rc = b.toString(); + clearBuffer(b); + return rc; + } + + String takeInputBuffer(){ return takeBuffer(inputBuffer); } + + String takeResultBuffer(){ return takeBuffer(resultBuffer); } + + int getCurrentDbId(){ return iCurrentDb; } + + SQLTester affirmDbId(int n) throws IndexOutOfBoundsException { + if(n<0 || n>=aDb.length){ + throw new IndexOutOfBoundsException("illegal db number: "+n); + } + return this; + } + + sqlite3 setCurrentDb(int n){ + affirmDbId(n); + iCurrentDb = n; + return this.aDb[n]; + } + + sqlite3 getCurrentDb(){ return aDb[iCurrentDb]; } + + sqlite3 getDbById(int id){ + return affirmDbId(id).aDb[id]; + } + + void closeDb(int id) { + final sqlite3 db = affirmDbId(id).aDb[id]; + if( null != db ){ + sqlite3_close_v2(db); + aDb[id] = null; + } + } + + void closeDb() { closeDb(iCurrentDb); } + + void closeAllDbs(){ + for(int i = 0; i 0){ + //outln("RUNNING DB INIT CODE: ",dbInitSql.toString()); + rc = execSql(db, false, ResultBufferMode.NONE, + null, dbInitSql.toString()); + } + if( 0!=rc ){ + throw new DbException(db, rc, true); + } + return aDb[iCurrentDb] = db; + } + + sqlite3 openDb(int slot, String name, boolean createIfNeeded) throws DbException { + affirmDbId(slot); + iCurrentDb = slot; + return openDb(name, createIfNeeded); + } + + /** + Resets all tester context state except for that related to + tracking running totals. + */ + void reset(){ + clearInputBuffer(); + clearResultBuffer(); + clearBuffer(dbInitSql); + closeAllDbs(); + nTest = 0; + nullView = "nil"; + emitColNames = false; + iCurrentDb = 0; + //dbInitSql.append("SELECT 1;"); + } + + void setNullValue(String v){nullView = v;} + + /** + If true, encountering an unknown command in a script causes the + remainder of the script to be skipped, rather than aborting the + whole script run. + */ + boolean skipUnknownCommands(){ + // Currently hard-coded. Potentially a flag someday. + return true; + } + + void incrementTestCounter(){ ++nTest; ++nTotalTest; } + + //! "Special" characters - we have to escape output if it contains any. + static final Pattern patternSpecial = Pattern.compile( + "[\\x00-\\x20\\x22\\x5c\\x7b\\x7d]" + ); + //! Either of '{' or '}'. + static final Pattern patternSquiggly = Pattern.compile("[{}]"); + + /** + Returns v or some escaped form of v, as defined in the tester's + spec doc. + */ + String escapeSqlValue(String v){ + if( "".equals(v) ) return "{}"; + Matcher m = patternSpecial.matcher(v); + if( !m.find() ){ + return v /* no escaping needed */; + } + m = patternSquiggly.matcher(v); + if( !m.find() ){ + return "{"+v+"}"; + } + final StringBuilder sb = new StringBuilder("\""); + final int n = v.length(); + for(int i = 0; i < n; ++i){ + final char ch = v.charAt(i); + switch(ch){ + case '\\': sb.append("\\\\"); break; + case '"': sb.append("\\\""); break; + default: + //verbose("CHAR ",(int)ch," ",ch," octal=",String.format("\\%03o", (int)ch)); + if( (int)ch < 32 ) sb.append(String.format("\\%03o", (int)ch)); + else sb.append(ch); + break; + } + } + sb.append("\""); + return sb.toString(); + } + + private void appendDbErr(sqlite3 db, StringBuilder sb, int rc){ + sb.append(org.sqlite.jni.capi.ResultCode.getEntryForInt(rc)).append(' '); + final String msg = escapeSqlValue(sqlite3_errmsg(db)); + if( '{' == msg.charAt(0) ){ + sb.append(msg); + }else{ + sb.append('{').append(msg).append('}'); + } + } + + /** + Runs SQL on behalf of test commands and outputs the results following + the very specific rules of the test framework. + + If db is null, getCurrentDb() is assumed. If throwOnError is true then + any db-side error will result in an exception, else they result in + the db's result code. + + appendMode specifies how/whether to append results to the result + buffer. rowMode specifies whether to output all results in a + single line or one line per row. If appendMode is + ResultBufferMode.NONE then rowMode is ignored and may be null. + */ + public int execSql(sqlite3 db, boolean throwOnError, + ResultBufferMode appendMode, ResultRowMode rowMode, + String sql) throws SQLTesterException { + if( null==db && null==aDb[0] ){ + // Delay opening of the initial db to enable tests to change its + // name and inject on-connect code via, e.g., the MEMDB + // directive. this setup as the potential to misinteract with + // auto-extension timing and must be done carefully. + setupInitialDb(); + } + final OutputPointer.Int32 oTail = new OutputPointer.Int32(); + final OutputPointer.sqlite3_stmt outStmt = new OutputPointer.sqlite3_stmt(); + final byte[] sqlUtf8 = sql.getBytes(StandardCharsets.UTF_8); + if( null==db ) db = getCurrentDb(); + int pos = 0, n = 1; + byte[] sqlChunk = sqlUtf8; + int rc = 0; + sqlite3_stmt stmt = null; + int spacing = 0 /* emit a space for --result if>0 */ ; + final StringBuilder sb = (ResultBufferMode.NONE==appendMode) + ? null : resultBuffer; + //outln("sqlChunk len= = ",sqlChunk.length); + try{ + while(pos < sqlChunk.length){ + if(pos > 0){ + sqlChunk = Arrays.copyOfRange(sqlChunk, pos, + sqlChunk.length); + } + if( 0==sqlChunk.length ) break; + rc = sqlite3_prepare_v2(db, sqlChunk, outStmt, oTail); + /*outln("PREPARE rc ",rc," oTail=",oTail.get(),": ", + new String(sqlChunk,StandardCharsets.UTF_8),"\n");*/ + if( 0!=rc ){ + if(throwOnError){ + throw new DbException(db, rc); + }else if( null!=sb ){ + appendDbErr(db, sb, rc); + } + break; + } + pos = oTail.value; + stmt = outStmt.take(); + if( null == stmt ){ + // empty statement was parsed. + continue; + } + if( null!=sb ){ + // Add the output to the result buffer... + final int nCol = sqlite3_column_count(stmt); + String colName = null, val = null; + while( SQLITE_ROW == (rc = sqlite3_step(stmt)) ){ + for(int i = 0; i < nCol; ++i){ + if( spacing++ > 0 ) sb.append(' '); + if( emitColNames ){ + colName = sqlite3_column_name(stmt, i); + switch(appendMode){ + case ASIS: + sb.append( colName ); + break; + case ESCAPED: + sb.append( escapeSqlValue(colName) ); + break; + default: + throw new SQLTesterException("Unhandled ResultBufferMode: "+appendMode); + } + sb.append(' '); + } + val = sqlite3_column_text16(stmt, i); + if( null==val ){ + sb.append( nullView ); + continue; + } + switch(appendMode){ + case ASIS: + sb.append( val ); + break; + case ESCAPED: + sb.append( escapeSqlValue(val) ); + break; + default: + throw new SQLTesterException("Unhandled ResultBufferMode: "+appendMode); + } + } + if( ResultRowMode.NEWLINE == rowMode ){ + spacing = 0; + sb.append('\n'); + } + } + }else{ + while( SQLITE_ROW == (rc = sqlite3_step(stmt)) ){} + } + sqlite3_finalize(stmt); + stmt = null; + if(SQLITE_ROW==rc || SQLITE_DONE==rc) rc = 0; + else if( rc!=0 ){ + if( null!=sb ){ + appendDbErr(db, sb, rc); + } + break; + } + } + }finally{ + sqlite3_reset(stmt + /* In order to trigger an exception in the + INSERT...RETURNING locking scenario: + https://sqlite.org/forum/forumpost/36f7a2e7494897df */); + sqlite3_finalize(stmt); + } + if( 0!=rc && throwOnError ){ + throw new DbException(db, rc); + } + return rc; + } + + public static void main(String[] argv) throws Exception{ + installCustomExtensions(); + boolean dumpInternals = false; + final SQLTester t = new SQLTester(); + for(String a : argv){ + if(a.startsWith("-")){ + final String flag = a.replaceFirst("-+",""); + if( flag.equals("verbose") ){ + // Use --verbose up to 3 times + t.setVerbosity(t.getVerbosity() + 1); + }else if( flag.equals("keep-going") ){ + t.keepGoing = true; + }else if( flag.equals("internals") ){ + dumpInternals = true; + }else{ + throw new IllegalArgumentException("Unhandled flag: "+flag); + } + continue; + } + t.addTestScript(a); + } + final AutoExtensionCallback ax = new AutoExtensionCallback() { + private final SQLTester tester = t; + @Override public int call(sqlite3 db){ + final String init = tester.getDbInitSql(); + if( !init.isEmpty() ){ + tester.execSql(db, true, ResultBufferMode.NONE, null, init); + } + return 0; + } + }; + sqlite3_auto_extension(ax); + try { + t.runTests(); + }finally{ + sqlite3_cancel_auto_extension(ax); + t.outln("Processed ",t.nTotalTest," test(s) in ",t.nTestFile," file(s)."); + if( t.nAbortedScript > 0 ){ + t.outln("Aborted ",t.nAbortedScript," script(s)."); + } + if( dumpInternals ){ + sqlite3_jni_internal_details(); + } + } + } + + /** + Internal impl of the public strglob() method. Neither argument + may be NULL and both _MUST_ be NUL-terminated. + */ + private static native int strglob(byte[] glob, byte[] txt); + + /** + Works essentially the same as sqlite3_strglob() except that the + glob character '#' matches a sequence of one or more digits. It + does not match when it appears at the start or middle of a series + of digits, e.g. "#23" or "1#3", but will match at the end, + e.g. "12#". + */ + static int strglob(String glob, String txt){ + return strglob( + (glob+"\0").getBytes(StandardCharsets.UTF_8), + (txt+"\0").getBytes(StandardCharsets.UTF_8) + ); + } + + /** + Sets up C-side components needed by the test framework. This must + not be called until main() is triggered so that it does not + interfere with library clients who don't use this class. + */ + static native void installCustomExtensions(); + static { + System.loadLibrary("sqlite3-jni") + /* Interestingly, when SQLTester is the main app, we have to + load that lib from here. The same load from CApi does + not happen early enough. Without this, + installCustomExtensions() is an unresolved symbol. */; + } + +} + +/** + General utilities for the SQLTester bits. +*/ +final class Util { + + //! Throws a new T, appending all msg args into a string for the message. + static void toss(Class errorType, Object... msg) throws Exception { + StringBuilder sb = new StringBuilder(); + for(Object s : msg) sb.append(s); + final java.lang.reflect.Constructor ctor = + errorType.getConstructor(String.class); + throw ctor.newInstance(sb.toString()); + } + + static void toss(Object... msg) throws Exception{ + toss(RuntimeException.class, msg); + } + + //! Tries to delete the given file, silently ignoring failure. + static void unlink(String filename){ + try{ + final java.io.File f = new java.io.File(filename); + f.delete(); + }catch(Exception e){ + /* ignore */ + } + } + + /** + Appends all entries in argv[1..end] into a space-separated + string, argv[0] is not included because it's expected to be a + command name. + */ + static String argvToString(String[] argv){ + StringBuilder sb = new StringBuilder(); + for(int i = 1; i < argv.length; ++i ){ + if( i>1 ) sb.append(" "); + sb.append( argv[i] ); + } + return sb.toString(); + } + +} + +/** + Base class for test script commands. It provides a set of utility + APIs for concrete command implementations. + + Each subclass must have a public no-arg ctor and must implement + the process() method which is abstract in this class. + + Commands are intended to be stateless, except perhaps for counters + and similar internals. Specifically, no state which changes the + behavior between any two invocations of process() should be + retained. +*/ +abstract class Command { + protected Command(){} + + /** + Must process one command-unit of work and either return + (on success) or throw (on error). + + The first two arguments specify the context of the test. The TestScript + provides the content of the test and the SQLTester providers the sandbox + in which that script is being evaluated. + + argv is a list with the command name followed by any arguments to + that command. The argcCheck() method from this class provides + very basic argc validation. + */ + public abstract void process( + SQLTester st, TestScript ts, String[] argv + ) throws Exception; + + /** + If argv.length-1 (-1 because the command's name is in argv[0]) does not + fall in the inclusive range (min,max) then this function throws. Use + a max value of -1 to mean unlimited. + */ + protected final void argcCheck(TestScript ts, String[] argv, int min, int max){ + int argc = argv.length-1; + if(argc=0 && argc>max)){ + if( min==max ){ + ts.toss(argv[0]," requires exactly ",min," argument(s)"); + }else if(max>0){ + ts.toss(argv[0]," requires ",min,"-",max," arguments."); + }else{ + ts.toss(argv[0]," requires at least ",min," arguments."); + } + } + } + + /** + Equivalent to argcCheck(argv,argc,argc). + */ + protected final void argcCheck(TestScript ts, String[] argv, int argc){ + argcCheck(ts, argv, argc, argc); + } +} + +//! --close command +class CloseDbCommand extends Command { + public void process(SQLTester t, TestScript ts, String[] argv){ + argcCheck(ts,argv,0,1); + int id; + if(argv.length>1){ + String arg = argv[1]; + if("all".equals(arg)){ + t.closeAllDbs(); + return; + } + else{ + id = Integer.parseInt(arg); + } + }else{ + id = t.getCurrentDbId(); + } + t.closeDb(id); + } +} + +//! --column-names command +class ColumnNamesCommand extends Command { + public void process( + SQLTester st, TestScript ts, String[] argv + ){ + argcCheck(ts,argv,1); + st.outputColumnNames( Integer.parseInt(argv[1])!=0 ); + } +} + +//! --db command +class DbCommand extends Command { + public void process(SQLTester t, TestScript ts, String[] argv){ + argcCheck(ts,argv,1); + t.setCurrentDb( Integer.parseInt(argv[1]) ); + } +} + +//! --glob command +class GlobCommand extends Command { + private boolean negate = false; + public GlobCommand(){} + protected GlobCommand(boolean negate){ this.negate = negate; } + + public void process(SQLTester t, TestScript ts, String[] argv){ + argcCheck(ts,argv,1,-1); + t.incrementTestCounter(); + final String sql = t.takeInputBuffer(); + int rc = t.execSql(null, true, ResultBufferMode.ESCAPED, + ResultRowMode.ONELINE, sql); + final String result = t.getResultText(); + final String sArgs = Util.argvToString(argv); + //t2.verbose2(argv[0]," rc = ",rc," result buffer:\n", result,"\nargs:\n",sArgs); + final String glob = Util.argvToString(argv); + rc = SQLTester.strglob(glob, result); + if( (negate && 0==rc) || (!negate && 0!=rc) ){ + ts.toss(argv[0], " mismatch: ", glob," vs input: ",result); + } + } +} + +//! --json command +class JsonCommand extends ResultCommand { + public JsonCommand(){ super(ResultBufferMode.ASIS); } +} + +//! --json-block command +class JsonBlockCommand extends TableResultCommand { + public JsonBlockCommand(){ super(true); } +} + +//! --new command +class NewDbCommand extends OpenDbCommand { + public NewDbCommand(){ super(true); } + public void process(SQLTester t, TestScript ts, String[] argv){ + if(argv.length>1){ + Util.unlink(argv[1]); + } + super.process(t, ts, argv); + } + +} + +//! Placeholder dummy/no-op/unimplemented commands +class NoopCommand extends Command { + private boolean verbose = false; + public NoopCommand(boolean verbose){ + this.verbose = verbose; + } + public NoopCommand(){} + public void process(SQLTester t, TestScript ts, String[] argv){ + if( this.verbose ){ + t.outln("Skipping unhandled command: "+argv[0]); + } + } +} + +//! --notglob command +class NotGlobCommand extends GlobCommand { + public NotGlobCommand(){ + super(true); + } +} + +//! --null command +class NullCommand extends Command { + public void process( + SQLTester st, TestScript ts, String[] argv + ){ + argcCheck(ts,argv,1); + st.setNullValue( argv[1] ); + } +} + +//! --open command +class OpenDbCommand extends Command { + private boolean createIfNeeded = false; + public OpenDbCommand(){} + protected OpenDbCommand(boolean c){createIfNeeded = c;} + public void process(SQLTester t, TestScript ts, String[] argv){ + argcCheck(ts,argv,1); + t.openDb(argv[1], createIfNeeded); + } +} + +//! --print command +class PrintCommand extends Command { + public void process( + SQLTester st, TestScript ts, String[] argv + ){ + st.out(ts.getOutputPrefix(),": "); + if( 1==argv.length ){ + st.out( st.getInputText() ); + }else{ + st.outln( Util.argvToString(argv) ); + } + } +} + +//! --result command +class ResultCommand extends Command { + private final ResultBufferMode bufferMode; + protected ResultCommand(ResultBufferMode bm){ bufferMode = bm; } + public ResultCommand(){ this(ResultBufferMode.ESCAPED); } + public void process(SQLTester t, TestScript ts, String[] argv){ + argcCheck(ts,argv,0,-1); + t.incrementTestCounter(); + final String sql = t.takeInputBuffer(); + //ts.verbose2(argv[0]," SQL =\n",sql); + int rc = t.execSql(null, false, bufferMode, ResultRowMode.ONELINE, sql); + final String result = t.getResultText().trim(); + final String sArgs = argv.length>1 ? Util.argvToString(argv) : ""; + if( !result.equals(sArgs) ){ + t.outln(argv[0]," FAILED comparison. Result buffer:\n", + result,"\nExpected result:\n",sArgs); + ts.toss(argv[0]+" comparison failed."); + } + } +} + +//! --run command +class RunCommand extends Command { + public void process(SQLTester t, TestScript ts, String[] argv){ + argcCheck(ts,argv,0,1); + final sqlite3 db = (1==argv.length) + ? t.getCurrentDb() : t.getDbById( Integer.parseInt(argv[1]) ); + final String sql = t.takeInputBuffer(); + final int rc = t.execSql(db, false, ResultBufferMode.NONE, + ResultRowMode.ONELINE, sql); + if( 0!=rc && t.isVerbose() ){ + String msg = sqlite3_errmsg(db); + ts.verbose1(argv[0]," non-fatal command error #",rc,": ", + msg,"\nfor SQL:\n",sql); + } + } +} + +//! --tableresult command +class TableResultCommand extends Command { + private final boolean jsonMode; + protected TableResultCommand(boolean jsonMode){ this.jsonMode = jsonMode; } + public TableResultCommand(){ this(false); } + public void process(SQLTester t, TestScript ts, String[] argv){ + argcCheck(ts,argv,0); + t.incrementTestCounter(); + String body = ts.fetchCommandBody(t); + if( null==body ) ts.toss("Missing ",argv[0]," body."); + body = body.trim(); + if( !body.endsWith("\n--end") ){ + ts.toss(argv[0], " must be terminated with --end."); + }else{ + body = body.substring(0, body.length()-6); + } + final String[] globs = body.split("\\s*\\n\\s*"); + if( globs.length < 1 ){ + ts.toss(argv[0], " requires 1 or more ", + (jsonMode ? "json snippets" : "globs"),"."); + } + final String sql = t.takeInputBuffer(); + t.execSql(null, true, + jsonMode ? ResultBufferMode.ASIS : ResultBufferMode.ESCAPED, + ResultRowMode.NEWLINE, sql); + final String rbuf = t.getResultText(); + final String[] res = rbuf.split("\n"); + if( res.length != globs.length ){ + ts.toss(argv[0], " failure: input has ", res.length, + " row(s) but expecting ",globs.length); + } + for(int i = 0; i < res.length; ++i){ + final String glob = globs[i].replaceAll("\\s+"," ").trim(); + //ts.verbose2(argv[0]," <<",glob,">> vs <<",res[i],">>"); + if( jsonMode ){ + if( !glob.equals(res[i]) ){ + ts.toss(argv[0], " json <<",glob, ">> does not match: <<", + res[i],">>"); + } + }else if( 0 != SQLTester.strglob(glob, res[i]) ){ + ts.toss(argv[0], " glob <<",glob,">> does not match: <<",res[i],">>"); + } + } + } +} + +//! --testcase command +class TestCaseCommand extends Command { + public void process(SQLTester t, TestScript ts, String[] argv){ + argcCheck(ts,argv,1); + ts.setTestCaseName(argv[1]); + t.clearResultBuffer(); + t.clearInputBuffer(); + } +} + +//! --verbosity command +class VerbosityCommand extends Command { + public void process(SQLTester t, TestScript ts, String[] argv){ + argcCheck(ts,argv,1); + ts.setVerbosity( Integer.parseInt(argv[1]) ); + } +} + +class CommandDispatcher { + + private static final java.util.Map commandMap = + new java.util.HashMap<>(); + + /** + Returns a (cached) instance mapped to name, or null if no match + is found. + */ + static Command getCommandByName(String name){ + Command rv = commandMap.get(name); + if( null!=rv ) return rv; + switch(name){ + case "close": rv = new CloseDbCommand(); break; + case "column-names": rv = new ColumnNamesCommand(); break; + case "db": rv = new DbCommand(); break; + case "glob": rv = new GlobCommand(); break; + case "json": rv = new JsonCommand(); break; + case "jsonglob": rv = new NoopCommand(true); break; + case "json-block": rv = new JsonBlockCommand(); break; + case "new": rv = new NewDbCommand(); break; + case "notglob": rv = new NotGlobCommand(); break; + case "null": rv = new NullCommand(); break; + case "oom": rv = new NoopCommand(); break; + case "open": rv = new OpenDbCommand(); break; + case "print": rv = new PrintCommand(); break; + case "result": rv = new ResultCommand(); break; + case "run": rv = new RunCommand(); break; + case "stmt-cache": rv = new NoopCommand(); break; + case "tableresult": rv = new TableResultCommand(); break; + case "testcase": rv = new TestCaseCommand(); break; + case "verbosity": rv = new VerbosityCommand(); break; + default: rv = null; break; + } + if( null!=rv ) commandMap.put(name, rv); + return rv; + } + + /** + Treats argv[0] as a command name, looks it up with + getCommandByName(), and calls process() on that instance, passing + it arguments given to this function. + */ + static void dispatch(SQLTester tester, TestScript ts, String[] argv) throws Exception{ + final Command cmd = getCommandByName(argv[0]); + if(null == cmd){ + throw new UnknownCommand(ts, argv[0]); + } + cmd.process(tester, ts, argv); + } +} + + +/** + This class represents a single test script. It handles (or + delegates) its the reading-in and parsing, but the details of + evaluation are delegated elsewhere. +*/ +class TestScript { + //! input file + private String filename = null; + //! Name pulled from the SCRIPT_MODULE_NAME directive of the file + private String moduleName = null; + //! Current test case name. + private String testCaseName = null; + //! Content buffer state. + private final Cursor cur = new Cursor(); + //! Utility for console output. + private final Outer outer = new Outer(); + + //! File content and parse state. + private static final class Cursor { + private final StringBuilder sb = new StringBuilder(); + byte[] src = null; + //! Current position in this.src. + int pos = 0; + //! Current line number. Starts at 0 for internal reasons and will + // line up with 1-based reality once parsing starts. + int lineNo = 0 /* yes, zero */; + //! Putback value for this.pos. + int putbackPos = 0; + //! Putback line number + int putbackLineNo = 0; + //! Peeked-to pos, used by peekLine() and consumePeeked(). + int peekedPos = 0; + //! Peeked-to line number. + int peekedLineNo = 0; + + //! Restore parsing state to the start of the stream. + void rewind(){ + sb.setLength(0); + pos = lineNo = putbackPos = putbackLineNo = peekedPos = peekedLineNo = 0 + /* kinda missing memset() about now. */; + } + } + + private byte[] readFile(String filename) throws Exception { + return java.nio.file.Files.readAllBytes(java.nio.file.Paths.get(filename)); + } + + /** + Initializes the script with the content of the given file. + Throws if it cannot read the file. + */ + public TestScript(String filename) throws Exception{ + this.filename = filename; + setVerbosity(2); + cur.src = readFile(filename); + } + + public String getFilename(){ + return filename; + } + + public String getModuleName(){ + return moduleName; + } + + /** + Verbosity level 0 produces no debug/verbose output. Level 1 produces + some and level 2 produces more. + */ + public void setVerbosity(int level){ + outer.setVerbosity(level); + } + + public String getOutputPrefix(){ + String rc = "["+(moduleName==null ? "" : moduleName)+"]"; + if( null!=testCaseName ) rc += "["+testCaseName+"]"; + if( null!=filename ) rc += "["+filename+"]"; + return rc + " line "+ cur.lineNo; + } + + static final String[] verboseLabel = {"🔈",/*"🔉",*/"🔊","📢"}; + //! Output vals only if level<=current verbosity level. + private TestScript verboseN(int level, Object... vals){ + final int verbosity = outer.getVerbosity(); + if(verbosity>=level){ + outer.out( verboseLabel[level-1], getOutputPrefix(), " ",level,": " + ).outln(vals); + } + return this; + } + + TestScript verbose1(Object... vals){return verboseN(1,vals);} + TestScript verbose2(Object... vals){return verboseN(2,vals);} + TestScript verbose3(Object... vals){return verboseN(3,vals);} + + private void reset(){ + testCaseName = null; + cur.rewind(); + } + + void setTestCaseName(String n){ testCaseName = n; } + + /** + Returns the next line from the buffer, minus the trailing EOL. + + Returns null when all input is consumed. Throws if it reads + illegally-encoded input, e.g. (non-)characters in the range + 128-256. + */ + String getLine(){ + if( cur.pos==cur.src.length ){ + return null /* EOF */; + } + cur.putbackPos = cur.pos; + cur.putbackLineNo = cur.lineNo; + cur.sb.setLength(0); + final boolean skipLeadingWs = false; + byte b = 0, prevB = 0; + int i = cur.pos; + if(skipLeadingWs) { + /* Skip any leading spaces, including newlines. This will eliminate + blank lines. */ + for(; i < cur.src.length; ++i, prevB=b){ + b = cur.src[i]; + switch((int)b){ + case 32/*space*/: case 9/*tab*/: case 13/*CR*/: continue; + case 10/*NL*/: ++cur.lineNo; continue; + default: break; + } + break; + } + if( i==cur.src.length ){ + return null /* EOF */; + } + } + boolean doBreak = false; + final byte[] aChar = {0,0,0,0} /* multi-byte char buffer */; + int nChar = 0 /* number of bytes in the char */; + for(; i < cur.src.length && !doBreak; ++i){ + b = cur.src[i]; + switch( (int)b ){ + case 13/*CR*/: continue; + case 10/*NL*/: + ++cur.lineNo; + if(cur.sb.length()>0) doBreak = true; + // Else it's an empty string + break; + default: + /* Multi-byte chars need to be gathered up and appended at + one time. Appending individual bytes to the StringBuffer + appends their integer value. */ + nChar = 1; + switch( b & 0xF0 ){ + case 0xC0: nChar = 2; break; + case 0xE0: nChar = 3; break; + case 0xF0: nChar = 4; break; + default: + if( b > 127 ) this.toss("Invalid character (#"+(int)b+")."); + break; + } + if( 1==nChar ){ + cur.sb.append((char)b); + }else{ + for(int x = 0; x < nChar; ++x) aChar[x] = cur.src[i+x]; + cur.sb.append(new String(Arrays.copyOf(aChar, nChar), + StandardCharsets.UTF_8)); + i += nChar-1; + } + break; + } + } + cur.pos = i; + final String rv = cur.sb.toString(); + if( i==cur.src.length && rv.isEmpty() ){ + return null /* EOF */; + } + return rv; + }/*getLine()*/ + + /** + Fetches the next line then resets the cursor to its pre-call + state. consumePeeked() can be used to consume this peeked line + without having to re-parse it. + */ + String peekLine(){ + final int oldPos = cur.pos; + final int oldPB = cur.putbackPos; + final int oldPBL = cur.putbackLineNo; + final int oldLine = cur.lineNo; + try{ return getLine(); } + finally{ + cur.peekedPos = cur.pos; + cur.peekedLineNo = cur.lineNo; + cur.pos = oldPos; + cur.lineNo = oldLine; + cur.putbackPos = oldPB; + cur.putbackLineNo = oldPBL; + } + } + + /** + Only valid after calling peekLine() and before calling getLine(). + This places the cursor to the position it would have been at had + the peekLine() had been fetched with getLine(). + */ + void consumePeeked(){ + cur.pos = cur.peekedPos; + cur.lineNo = cur.peekedLineNo; + } + + /** + Restores the cursor to the position it had before the previous + call to getLine(). + */ + void putbackLine(){ + cur.pos = cur.putbackPos; + cur.lineNo = cur.putbackLineNo; + } + + private boolean checkRequiredProperties(SQLTester t, String[] props) throws SQLTesterException{ + if( true ) return false; + int nOk = 0; + for(String rp : props){ + verbose1("REQUIRED_PROPERTIES: ",rp); + switch(rp){ + case "RECURSIVE_TRIGGERS": + t.appendDbInitSql("pragma recursive_triggers=on;"); + ++nOk; + break; + case "TEMPSTORE_FILE": + /* This _assumes_ that the lib is built with SQLITE_TEMP_STORE=1 or 2, + which we just happen to know is the case */ + t.appendDbInitSql("pragma temp_store=1;"); + ++nOk; + break; + case "TEMPSTORE_MEM": + /* This _assumes_ that the lib is built with SQLITE_TEMP_STORE=1 or 2, + which we just happen to know is the case */ + t.appendDbInitSql("pragma temp_store=0;"); + ++nOk; + break; + case "AUTOVACUUM": + t.appendDbInitSql("pragma auto_vacuum=full;"); + ++nOk; + case "INCRVACUUM": + t.appendDbInitSql("pragma auto_vacuum=incremental;"); + ++nOk; + default: + break; + } + } + return props.length == nOk; + } + + private static final Pattern patternRequiredProperties = + Pattern.compile(" REQUIRED_PROPERTIES:[ \\t]*(\\S.*)\\s*$"); + private static final Pattern patternScriptModuleName = + Pattern.compile(" SCRIPT_MODULE_NAME:[ \\t]*(\\S+)\\s*$"); + private static final Pattern patternMixedModuleName = + Pattern.compile(" ((MIXED_)?MODULE_NAME):[ \\t]*(\\S+)\\s*$"); + private static final Pattern patternCommand = + Pattern.compile("^--(([a-z-]+)( .*)?)$"); + + /** + Looks for "directives." If a compatible one is found, it is + processed and this function returns. If an incompatible one is found, + a description of it is returned and processing of the test must + end immediately. + */ + private void checkForDirective( + SQLTester tester, String line + ) throws IncompatibleDirective { + if(line.startsWith("#")){ + throw new IncompatibleDirective(this, "C-preprocessor input: "+line); + }else if(line.startsWith("---")){ + new IncompatibleDirective(this, "triple-dash: "+line); + } + Matcher m = patternScriptModuleName.matcher(line); + if( m.find() ){ + moduleName = m.group(1); + return; + } + m = patternRequiredProperties.matcher(line); + if( m.find() ){ + final String rp = m.group(1); + if( ! checkRequiredProperties( tester, rp.split("\\s+") ) ){ + throw new IncompatibleDirective(this, "REQUIRED_PROPERTIES: "+rp); + } + } + m = patternMixedModuleName.matcher(line); + if( m.find() ){ + throw new IncompatibleDirective(this, m.group(1)+": "+m.group(3)); + } + if( line.contains("\n|") ){ + throw new IncompatibleDirective(this, "newline-pipe combination."); + } + return; + } + + boolean isCommandLine(String line, boolean checkForImpl){ + final Matcher m = patternCommand.matcher(line); + boolean rc = m.find(); + if( rc && checkForImpl ){ + rc = null!=CommandDispatcher.getCommandByName(m.group(2)); + } + return rc; + } + + /** + If line looks like a command, returns an argv for that command + invocation, else returns null. + */ + String[] getCommandArgv(String line){ + final Matcher m = patternCommand.matcher(line); + return m.find() ? m.group(1).trim().split("\\s+") : null; + } + + /** + Fetches lines until the next recognized command. Throws if + checkForDirective() does. Returns null if there is no input or + it's only whitespace. The returned string retains all whitespace. + + Note that "subcommands", --command-like constructs in the body + which do not match a known command name are considered to be + content, not commands. + */ + String fetchCommandBody(SQLTester tester){ + final StringBuilder sb = new StringBuilder(); + String line; + while( (null != (line = peekLine())) ){ + checkForDirective(tester, line); + if( isCommandLine(line, true) ) break; + else { + sb.append(line).append("\n"); + consumePeeked(); + } + } + line = sb.toString(); + return line.trim().isEmpty() ? null : line; + } + + private void processCommand(SQLTester t, String[] argv) throws Exception{ + verbose1("running command: ",argv[0], " ", Util.argvToString(argv)); + if(outer.getVerbosity()>1){ + final String input = t.getInputText(); + if( !input.isEmpty() ) verbose3("Input buffer = ",input); + } + CommandDispatcher.dispatch(t, this, argv); + } + + void toss(Object... msg) throws TestScriptFailed { + StringBuilder sb = new StringBuilder(); + for(Object s : msg) sb.append(s); + throw new TestScriptFailed(this, sb.toString()); + } + + /** + Runs this test script in the context of the given tester object. + */ + public boolean run(SQLTester tester) throws Exception { + reset(); + setVerbosity(tester.getVerbosity()); + String line, directive; + String[] argv; + while( null != (line = getLine()) ){ + verbose3("input line: ",line); + checkForDirective(tester, line); + argv = getCommandArgv(line); + if( null!=argv ){ + processCommand(tester, argv); + continue; + } + tester.appendInput(line,true); + } + return true; + } +} diff --git a/ext/jni/src/org/sqlite/jni/capi/ScalarFunction.java b/ext/jni/src/org/sqlite/jni/capi/ScalarFunction.java new file mode 100644 index 0000000000..95541bdcba --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/capi/ScalarFunction.java @@ -0,0 +1,33 @@ +/* +** 2023-08-25 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file is part of the JNI bindings for the sqlite3 C API. +*/ +package org.sqlite.jni.capi; + + +/** + A SQLFunction implementation for scalar functions. +*/ +public abstract class ScalarFunction implements SQLFunction { + /** + As for the xFunc() argument of the C API's + sqlite3_create_function(). If this function throws, it is + translated into an sqlite3_result_error(). + */ + public abstract void xFunc(sqlite3_context cx, sqlite3_value[] args); + + /** + Optionally override to be notified when the UDF is finalized by + SQLite. This default implementation does nothing. + */ + public void xDestroy() {} +} diff --git a/ext/jni/src/org/sqlite/jni/capi/TableColumnMetadata.java b/ext/jni/src/org/sqlite/jni/capi/TableColumnMetadata.java new file mode 100644 index 0000000000..54808cd1ca --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/capi/TableColumnMetadata.java @@ -0,0 +1,35 @@ +/* +** 2023-07-21 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file is part of the JNI bindings for the sqlite3 C API. +*/ +package org.sqlite.jni.capi; + +/** + A wrapper object for use with sqlite3_table_column_metadata(). + They are populated only via that interface. +*/ +public final class TableColumnMetadata { + final OutputPointer.Bool pNotNull = new OutputPointer.Bool(); + final OutputPointer.Bool pPrimaryKey = new OutputPointer.Bool(); + final OutputPointer.Bool pAutoinc = new OutputPointer.Bool(); + final OutputPointer.String pzCollSeq = new OutputPointer.String(); + final OutputPointer.String pzDataType = new OutputPointer.String(); + + public TableColumnMetadata(){ + } + + public String getDataType(){ return pzDataType.value; } + public String getCollation(){ return pzCollSeq.value; } + public boolean isNotNull(){ return pNotNull.value; } + public boolean isPrimaryKey(){ return pPrimaryKey.value; } + public boolean isAutoincrement(){ return pAutoinc.value; } +} diff --git a/ext/jni/src/org/sqlite/jni/capi/Tester1.java b/ext/jni/src/org/sqlite/jni/capi/Tester1.java new file mode 100644 index 0000000000..9d14c954b8 --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/capi/Tester1.java @@ -0,0 +1,2207 @@ +/* +** 2023-07-21 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains a set of tests for the sqlite3 JNI bindings. +*/ +package org.sqlite.jni.capi; +import static org.sqlite.jni.capi.CApi.*; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; + +/** + An annotation for Tester1 tests which we do not want to run in + reflection-driven test mode because either they are not suitable + for multi-threaded threaded mode or we have to control their execution + order. +*/ +@java.lang.annotation.Retention(java.lang.annotation.RetentionPolicy.RUNTIME) +@java.lang.annotation.Target({java.lang.annotation.ElementType.METHOD}) +@interface ManualTest{} +/** + Annotation for Tester1 tests which mark those which must be skipped + in multi-threaded mode. +*/ +@java.lang.annotation.Retention(java.lang.annotation.RetentionPolicy.RUNTIME) +@java.lang.annotation.Target({java.lang.annotation.ElementType.METHOD}) +@interface SingleThreadOnly{} + +/** + Annotation for Tester1 tests which must only be run if + sqlite3_jni_supports_nio() is true. +*/ +@java.lang.annotation.Retention(java.lang.annotation.RetentionPolicy.RUNTIME) +@java.lang.annotation.Target({java.lang.annotation.ElementType.METHOD}) +@interface RequiresJniNio{} + +public class Tester1 implements Runnable { + //! True when running in multi-threaded mode. + private static boolean mtMode = false; + //! True to sleep briefly between tests. + private static boolean takeNaps = false; + //! True to shuffle the order of the tests. + private static boolean shuffle = false; + //! True to dump the list of to-run tests to stdout. + private static int listRunTests = 0; + //! True to squelch all out() and outln() output. + private static boolean quietMode = false; + //! Total number of runTests() calls. + private static int nTestRuns = 0; + //! List of test*() methods to run. + private static List testMethods = null; + //! List of exceptions collected by run() + private static final List listErrors = new ArrayList<>(); + private static final class Metrics { + //! Number of times createNewDb() (or equivalent) is invoked. + volatile int dbOpen = 0; + } + + private final Integer tId; + + Tester1(Integer id){ + tId = id; + } + + static final Metrics metrics = new Metrics(); + + public static synchronized void outln(){ + if( !quietMode ){ + System.out.println(); + } + } + + public static synchronized void outPrefix(){ + if( !quietMode ){ + System.out.print(Thread.currentThread().getName()+": "); + } + } + + public static synchronized void outln(Object val){ + if( !quietMode ){ + outPrefix(); + System.out.println(val); + } + } + + public static synchronized void out(Object val){ + if( !quietMode ){ + System.out.print(val); + } + } + + @SuppressWarnings("unchecked") + public static synchronized void out(Object... vals){ + if( !quietMode ){ + outPrefix(); + for(Object v : vals) out(v); + } + } + + @SuppressWarnings("unchecked") + public static synchronized void outln(Object... vals){ + if( !quietMode ){ + out(vals); out("\n"); + } + } + + static volatile int affirmCount = 0; + public static synchronized int affirm(Boolean v, String comment){ + ++affirmCount; + if( false ) assert( v /* prefer assert over exception if it's enabled because + the JNI layer sometimes has to suppress exceptions, + so they might be squelched on their way back to the + top. */); + if( !v ) throw new RuntimeException(comment); + return affirmCount; + } + + public static void affirm(Boolean v){ + affirm(v, "Affirmation failed."); + } + + @SingleThreadOnly /* because it's thread-agnostic */ + private void test1(){ + affirm(sqlite3_libversion_number() == SQLITE_VERSION_NUMBER); + } + + public static sqlite3 createNewDb(){ + final OutputPointer.sqlite3 out = new OutputPointer.sqlite3(); + int rc = sqlite3_open(":memory:", out); + ++metrics.dbOpen; + sqlite3 db = out.take(); + if( 0!=rc ){ + final String msg = + null==db ? sqlite3_errstr(rc) : sqlite3_errmsg(db); + sqlite3_close(db); + throw new RuntimeException("Opening db failed: "+msg); + } + affirm( null == out.get() ); + affirm( 0 != db.getNativePointer() ); + rc = sqlite3_busy_timeout(db, 2000); + affirm( 0 == rc ); + return db; + } + + public static void execSql(sqlite3 db, String[] sql){ + execSql(db, String.join("", sql)); + } + + public static int execSql(sqlite3 db, boolean throwOnError, String sql){ + OutputPointer.Int32 oTail = new OutputPointer.Int32(); + final byte[] sqlUtf8 = sql.getBytes(StandardCharsets.UTF_8); + int pos = 0, n = 1; + byte[] sqlChunk = sqlUtf8; + int rc = 0; + sqlite3_stmt stmt = null; + final OutputPointer.sqlite3_stmt outStmt = new OutputPointer.sqlite3_stmt(); + while(pos < sqlChunk.length){ + if(pos > 0){ + sqlChunk = Arrays.copyOfRange(sqlChunk, pos, + sqlChunk.length); + } + if( 0==sqlChunk.length ) break; + rc = sqlite3_prepare_v2(db, sqlChunk, outStmt, oTail); + if(throwOnError) affirm(0 == rc); + else if( 0!=rc ) break; + pos = oTail.value; + stmt = outStmt.take(); + if( null == stmt ){ + // empty statement was parsed. + continue; + } + affirm(0 != stmt.getNativePointer()); + while( SQLITE_ROW == (rc = sqlite3_step(stmt)) ){ + } + sqlite3_finalize(stmt); + affirm(0 == stmt.getNativePointer()); + if(0!=rc && SQLITE_ROW!=rc && SQLITE_DONE!=rc){ + break; + } + } + sqlite3_finalize(stmt); + if(SQLITE_ROW==rc || SQLITE_DONE==rc) rc = 0; + if( 0!=rc && throwOnError){ + throw new RuntimeException("db op failed with rc=" + +rc+": "+sqlite3_errmsg(db)); + } + return rc; + } + + public static void execSql(sqlite3 db, String sql){ + execSql(db, true, sql); + } + + public static sqlite3_stmt prepare(sqlite3 db, boolean throwOnError, String sql){ + final OutputPointer.sqlite3_stmt outStmt = new OutputPointer.sqlite3_stmt(); + int rc = sqlite3_prepare_v2(db, sql, outStmt); + if( throwOnError ){ + affirm( 0 == rc ); + } + final sqlite3_stmt rv = outStmt.take(); + affirm( null == outStmt.get() ); + if( throwOnError ){ + affirm( 0 != rv.getNativePointer() ); + } + return rv; + } + + public static sqlite3_stmt prepare(sqlite3 db, String sql){ + return prepare(db, true, sql); + } + + private void showCompileOption(){ + int i = 0; + String optName; + outln("compile options:"); + for( ; null != (optName = sqlite3_compileoption_get(i)); ++i){ + outln("\t"+optName+"\t (used="+ + sqlite3_compileoption_used(optName)+")"); + } + } + + private void testCompileOption(){ + int i = 0; + String optName; + for( ; null != (optName = sqlite3_compileoption_get(i)); ++i){ + } + affirm( i > 10 ); + affirm( null==sqlite3_compileoption_get(-1) ); + } + + private void testOpenDb1(){ + final OutputPointer.sqlite3 out = new OutputPointer.sqlite3(); + int rc = sqlite3_open(":memory:", out); + ++metrics.dbOpen; + sqlite3 db = out.get(); + affirm(0 == rc); + affirm(db.getNativePointer()!=0); + sqlite3_db_config(db, SQLITE_DBCONFIG_DEFENSIVE, 1, null) + /* This function has different mangled names in jdk8 vs jdk19, + and this call is here to ensure that the build fails + if it cannot find both names. */; + + affirm( 0==sqlite3_db_readonly(db,"main") ); + affirm( 0==sqlite3_db_readonly(db,null) ); + affirm( 0>sqlite3_db_readonly(db,"nope") ); + affirm( 0>sqlite3_db_readonly(null,null) ); + affirm( 0==sqlite3_last_insert_rowid(null) ); + + // These interrupt checks are only to make sure that the JNI binding + // has the proper exported symbol names. They don't actually test + // anything useful. + affirm( !sqlite3_is_interrupted(db) ); + sqlite3_interrupt(db); + affirm( sqlite3_is_interrupted(db) ); + sqlite3_close_v2(db); + affirm(0 == db.getNativePointer()); + } + + private void testOpenDb2(){ + final OutputPointer.sqlite3 out = new OutputPointer.sqlite3(); + int rc = sqlite3_open_v2(":memory:", out, + SQLITE_OPEN_READWRITE + | SQLITE_OPEN_CREATE, null); + ++metrics.dbOpen; + affirm(0 == rc); + sqlite3 db = out.get(); + affirm(0 != db.getNativePointer()); + sqlite3_close_v2(db); + affirm(0 == db.getNativePointer()); + } + + private void testPrepare123(){ + sqlite3 db = createNewDb(); + int rc; + final OutputPointer.sqlite3_stmt outStmt = new OutputPointer.sqlite3_stmt(); + rc = sqlite3_prepare(db, "CREATE TABLE t1(a);", outStmt); + affirm(0 == rc); + sqlite3_stmt stmt = outStmt.take(); + affirm(0 != stmt.getNativePointer()); + affirm( !sqlite3_stmt_readonly(stmt) ); + affirm( db == sqlite3_db_handle(stmt) ); + rc = sqlite3_step(stmt); + affirm(SQLITE_DONE == rc); + sqlite3_finalize(stmt); + affirm( null == sqlite3_db_handle(stmt) ); + affirm(0 == stmt.getNativePointer()); + + { /* Demonstrate how to use the "zTail" option of + sqlite3_prepare() family of functions. */ + OutputPointer.Int32 oTail = new OutputPointer.Int32(); + final byte[] sqlUtf8 = + "CREATE TABLE t2(a); INSERT INTO t2(a) VALUES(1),(2),(3)" + .getBytes(StandardCharsets.UTF_8); + int pos = 0, n = 1; + byte[] sqlChunk = sqlUtf8; + while(pos < sqlChunk.length){ + if(pos > 0){ + sqlChunk = Arrays.copyOfRange(sqlChunk, pos, sqlChunk.length); + } + //outln("SQL chunk #"+n+" length = "+sqlChunk.length+", pos = "+pos); + if( 0==sqlChunk.length ) break; + rc = sqlite3_prepare_v2(db, sqlChunk, outStmt, oTail); + affirm(0 == rc); + stmt = outStmt.get(); + pos = oTail.value; + /*outln("SQL tail pos = "+pos+". Chunk = "+ + (new String(Arrays.copyOfRange(sqlChunk,0,pos), + StandardCharsets.UTF_8)));*/ + switch(n){ + case 1: affirm(19 == pos); break; + case 2: affirm(36 == pos); break; + default: affirm( false /* can't happen */ ); + + } + ++n; + affirm(0 != stmt.getNativePointer()); + rc = sqlite3_step(stmt); + affirm(SQLITE_DONE == rc); + sqlite3_finalize(stmt); + affirm(0 == stmt.getNativePointer()); + } + } + + + rc = sqlite3_prepare_v3(db, "INSERT INTO t2(a) VALUES(1),(2),(3)", + 0, outStmt); + affirm(0 == rc); + stmt = outStmt.get(); + affirm(0 != stmt.getNativePointer()); + sqlite3_finalize(stmt); + affirm(0 == stmt.getNativePointer() ); + + affirm( 0==sqlite3_errcode(db) ); + stmt = sqlite3_prepare(db, "intentional error"); + affirm( null==stmt ); + affirm( 0!=sqlite3_errcode(db) ); + affirm( 0==sqlite3_errmsg(db).indexOf("near \"intentional\"") ); + sqlite3_finalize(stmt); + stmt = sqlite3_prepare(db, "/* empty input*/\n-- comments only"); + affirm( null==stmt ); + affirm( 0==sqlite3_errcode(db) ); + sqlite3_close_v2(db); + } + + private void testBindFetchInt(){ + sqlite3 db = createNewDb(); + execSql(db, "CREATE TABLE t(a)"); + + sqlite3_stmt stmt = prepare(db, "INSERT INTO t(a) VALUES(:a);"); + affirm(1 == sqlite3_bind_parameter_count(stmt)); + final int paramNdx = sqlite3_bind_parameter_index(stmt, ":a"); + affirm(1 == paramNdx); + affirm( ":a".equals(sqlite3_bind_parameter_name(stmt, paramNdx))); + int total1 = 0; + long rowid = -1; + int changes = sqlite3_changes(db); + int changesT = sqlite3_total_changes(db); + long changes64 = sqlite3_changes64(db); + long changesT64 = sqlite3_total_changes64(db); + int rc; + for(int i = 99; i < 102; ++i ){ + total1 += i; + rc = sqlite3_bind_int(stmt, paramNdx, i); + affirm(0 == rc); + rc = sqlite3_step(stmt); + sqlite3_reset(stmt); + affirm(SQLITE_DONE == rc); + long x = sqlite3_last_insert_rowid(db); + affirm(x > rowid); + rowid = x; + } + sqlite3_finalize(stmt); + affirm(300 == total1); + affirm(sqlite3_changes(db) > changes); + affirm(sqlite3_total_changes(db) > changesT); + affirm(sqlite3_changes64(db) > changes64); + affirm(sqlite3_total_changes64(db) > changesT64); + stmt = prepare(db, "SELECT a FROM t ORDER BY a DESC;"); + affirm( sqlite3_stmt_readonly(stmt) ); + affirm( !sqlite3_stmt_busy(stmt) ); + if( sqlite3_compileoption_used("ENABLE_COLUMN_METADATA") ){ + /* Unlike in native C code, JNI won't trigger an + UnsatisfiedLinkError until these are called (on Linux, at + least). */ + affirm("t".equals(sqlite3_column_table_name(stmt,0))); + affirm("main".equals(sqlite3_column_database_name(stmt,0))); + affirm("a".equals(sqlite3_column_origin_name(stmt,0))); + } + + int total2 = 0; + while( SQLITE_ROW == sqlite3_step(stmt) ){ + affirm( sqlite3_stmt_busy(stmt) ); + total2 += sqlite3_column_int(stmt, 0); + sqlite3_value sv = sqlite3_column_value(stmt, 0); + affirm( null != sv ); + affirm( 0 != sv.getNativePointer() ); + affirm( SQLITE_INTEGER == sqlite3_value_type(sv) ); + } + affirm( !sqlite3_stmt_busy(stmt) ); + sqlite3_finalize(stmt); + affirm(total1 == total2); + + // sqlite3_value_frombind() checks... + stmt = prepare(db, "SELECT 1, ?"); + sqlite3_bind_int(stmt, 1, 2); + rc = sqlite3_step(stmt); + affirm( SQLITE_ROW==rc ); + affirm( !sqlite3_value_frombind(sqlite3_column_value(stmt, 0)) ); + affirm( sqlite3_value_frombind(sqlite3_column_value(stmt, 1)) ); + sqlite3_finalize(stmt); + + sqlite3_close_v2(db); + affirm(0 == db.getNativePointer()); + } + + private void testBindFetchInt64(){ + try (sqlite3 db = createNewDb()){ + execSql(db, "CREATE TABLE t(a)"); + sqlite3_stmt stmt = prepare(db, "INSERT INTO t(a) VALUES(?);"); + long total1 = 0; + for(long i = 0xffffffff; i < 0xffffffff + 3; ++i ){ + total1 += i; + sqlite3_bind_int64(stmt, 1, i); + sqlite3_step(stmt); + sqlite3_reset(stmt); + } + sqlite3_finalize(stmt); + stmt = prepare(db, "SELECT a FROM t ORDER BY a DESC;"); + long total2 = 0; + while( SQLITE_ROW == sqlite3_step(stmt) ){ + total2 += sqlite3_column_int64(stmt, 0); + } + sqlite3_finalize(stmt); + affirm(total1 == total2); + //sqlite3_close_v2(db); + } + } + + private void testBindFetchDouble(){ + try (sqlite3 db = createNewDb()){ + execSql(db, "CREATE TABLE t(a)"); + sqlite3_stmt stmt = prepare(db, "INSERT INTO t(a) VALUES(?);"); + double total1 = 0; + for(double i = 1.5; i < 5.0; i = i + 1.0 ){ + total1 += i; + sqlite3_bind_double(stmt, 1, i); + sqlite3_step(stmt); + sqlite3_reset(stmt); + } + sqlite3_finalize(stmt); + stmt = prepare(db, "SELECT a FROM t ORDER BY a DESC;"); + double total2 = 0; + int counter = 0; + while( SQLITE_ROW == sqlite3_step(stmt) ){ + ++counter; + total2 += sqlite3_column_double(stmt, 0); + } + affirm(4 == counter); + sqlite3_finalize(stmt); + affirm(total2<=total1+0.01 && total2>=total1-0.01); + //sqlite3_close_v2(db); + } + } + + private void testBindFetchText(){ + sqlite3 db = createNewDb(); + execSql(db, "CREATE TABLE t(a)"); + sqlite3_stmt stmt = prepare(db, "INSERT INTO t(a) VALUES(?);"); + String[] list1 = { "hell🤩", "w😃rld", "!🤩" }; + int rc; + int n = 0; + for( String e : list1 ){ + rc = (0==n) + ? sqlite3_bind_text(stmt, 1, e) + : sqlite3_bind_text16(stmt, 1, e); + affirm(0 == rc); + rc = sqlite3_step(stmt); + affirm(SQLITE_DONE==rc); + sqlite3_reset(stmt); + } + sqlite3_finalize(stmt); + stmt = prepare(db, "SELECT a FROM t ORDER BY a DESC;"); + StringBuilder sbuf = new StringBuilder(); + n = 0; + final boolean tryNio = sqlite3_jni_supports_nio(); + while( SQLITE_ROW == sqlite3_step(stmt) ){ + final sqlite3_value sv = sqlite3_value_dup(sqlite3_column_value(stmt,0)); + final String txt = sqlite3_column_text16(stmt, 0); + sbuf.append( txt ); + affirm( txt.equals(new String( + sqlite3_column_text(stmt, 0), + StandardCharsets.UTF_8 + )) ); + affirm( txt.length() < sqlite3_value_bytes(sv) ); + affirm( txt.equals(new String( + sqlite3_value_text(sv), + StandardCharsets.UTF_8)) ); + affirm( txt.length() == sqlite3_value_bytes16(sv)/2 ); + affirm( txt.equals(sqlite3_value_text16(sv)) ); + if( tryNio ){ + java.nio.ByteBuffer bu = sqlite3_value_nio_buffer(sv); + byte ba[] = sqlite3_value_blob(sv); + affirm( ba.length == bu.capacity() ); + int i = 0; + for( byte b : ba ){ + affirm( b == bu.get(i++) ); + } + } + sqlite3_value_free(sv); + ++n; + } + sqlite3_finalize(stmt); + affirm(3 == n); + affirm("w😃rldhell🤩!🤩".contentEquals(sbuf)); + + try( sqlite3_stmt stmt2 = prepare(db, "SELECT ?, ?") ){ + rc = sqlite3_bind_text(stmt2, 1, ""); + affirm( 0==rc ); + rc = sqlite3_bind_text(stmt2, 2, (String)null); + affirm( 0==rc ); + rc = sqlite3_step(stmt2); + affirm( SQLITE_ROW==rc ); + byte[] colBa = sqlite3_column_text(stmt2, 0); + affirm( 0==colBa.length ); + colBa = sqlite3_column_text(stmt2, 1); + affirm( null==colBa ); + //sqlite3_finalize(stmt); + } + + if(true){ + sqlite3_close_v2(db); + }else{ + // Let the Object.finalize() override deal with it. + } + } + + private void testBindFetchBlob(){ + sqlite3 db = createNewDb(); + execSql(db, "CREATE TABLE t(a)"); + sqlite3_stmt stmt = prepare(db, "INSERT INTO t(a) VALUES(?);"); + byte[] list1 = { 0x32, 0x33, 0x34 }; + int rc = sqlite3_bind_blob(stmt, 1, list1); + affirm( 0==rc ); + rc = sqlite3_step(stmt); + affirm(SQLITE_DONE == rc); + sqlite3_finalize(stmt); + stmt = prepare(db, "SELECT a FROM t ORDER BY a DESC;"); + int n = 0; + int total = 0; + while( SQLITE_ROW == sqlite3_step(stmt) ){ + byte[] blob = sqlite3_column_blob(stmt, 0); + affirm(3 == blob.length); + int i = 0; + for(byte b : blob){ + affirm(b == list1[i++]); + total += b; + } + ++n; + } + sqlite3_finalize(stmt); + affirm(1 == n); + affirm(total == 0x32 + 0x33 + 0x34); + sqlite3_close_v2(db); + } + + @RequiresJniNio + private void testBindByteBuffer(){ + /* TODO: these tests need to be much more extensive to check the + begin/end range handling. */ + + java.nio.ByteBuffer zeroCheck = + java.nio.ByteBuffer.allocateDirect(0); + affirm( null != zeroCheck ); + zeroCheck = null; + sqlite3 db = createNewDb(); + execSql(db, "CREATE TABLE t(a)"); + + final java.nio.ByteBuffer buf = java.nio.ByteBuffer.allocateDirect(10); + buf.put((byte)0x31)/*note that we'll skip this one*/ + .put((byte)0x32) + .put((byte)0x33) + .put((byte)0x34) + .put((byte)0x35)/*we'll skip this one too*/; + + final int expectTotal = buf.get(1) + buf.get(2) + buf.get(3); + sqlite3_stmt stmt = prepare(db, "INSERT INTO t(a) VALUES(?);"); + affirm( SQLITE_ERROR == sqlite3_bind_blob(stmt, 1, buf, -1, 0), + "Buffer offset may not be negative." ); + affirm( 0 == sqlite3_bind_blob(stmt, 1, buf, 1, 3) ); + affirm( SQLITE_DONE == sqlite3_step(stmt) ); + sqlite3_finalize(stmt); + stmt = prepare(db, "SELECT a FROM t;"); + int total = 0; + affirm( SQLITE_ROW == sqlite3_step(stmt) ); + byte blob[] = sqlite3_column_blob(stmt, 0); + java.nio.ByteBuffer nioBlob = + sqlite3_column_nio_buffer(stmt, 0); + affirm(3 == blob.length); + affirm(blob.length == nioBlob.capacity()); + affirm(blob.length == nioBlob.limit()); + int i = 0; + for(byte b : blob){ + affirm( i<=3 ); + affirm(b == buf.get(1 + i)); + affirm(b == nioBlob.get(i)); + ++i; + total += b; + } + affirm( SQLITE_DONE == sqlite3_step(stmt) ); + sqlite3_finalize(stmt); + affirm(total == expectTotal); + + SQLFunction func = + new ScalarFunction(){ + public void xFunc(sqlite3_context cx, sqlite3_value[] args){ + sqlite3_result_blob(cx, buf, 1, 3); + } + }; + + affirm( 0 == sqlite3_create_function(db, "myfunc", -1, SQLITE_UTF8, func) ); + stmt = prepare(db, "SELECT myfunc()"); + affirm( SQLITE_ROW == sqlite3_step(stmt) ); + blob = sqlite3_column_blob(stmt, 0); + affirm(3 == blob.length); + i = 0; + total = 0; + for(byte b : blob){ + affirm( i<=3 ); + affirm(b == buf.get(1 + i++)); + total += b; + } + affirm( SQLITE_DONE == sqlite3_step(stmt) ); + sqlite3_finalize(stmt); + affirm(total == expectTotal); + + sqlite3_close_v2(db); + } + + private void testSql(){ + sqlite3 db = createNewDb(); + sqlite3_stmt stmt = prepare(db, "SELECT 1"); + affirm( "SELECT 1".equals(sqlite3_sql(stmt)) ); + sqlite3_finalize(stmt); + stmt = prepare(db, "SELECT ?"); + sqlite3_bind_text(stmt, 1, "hell😃"); + final String expect = "SELECT 'hell😃'"; + affirm( expect.equals(sqlite3_expanded_sql(stmt)) ); + String n = sqlite3_normalized_sql(stmt); + affirm( null==n || "SELECT?;".equals(n) ); + sqlite3_finalize(stmt); + sqlite3_close(db); + } + + private void testCollation(){ + final sqlite3 db = createNewDb(); + execSql(db, "CREATE TABLE t(a); INSERT INTO t(a) VALUES('a'),('b'),('c')"); + final ValueHolder xDestroyCalled = new ValueHolder<>(0); + final CollationCallback myCollation = new CollationCallback() { + private final String myState = + "this is local state. There is much like it, but this is mine."; + @Override + // Reverse-sorts its inputs... + public int call(byte[] lhs, byte[] rhs){ + int len = lhs.length > rhs.length ? rhs.length : lhs.length; + int c = 0, i = 0; + for(i = 0; i < len; ++i){ + c = lhs[i] - rhs[i]; + if(0 != c) break; + } + if(0==c){ + if(i < lhs.length) c = 1; + else if(i < rhs.length) c = -1; + } + return -c; + } + @Override + public void xDestroy() { + // Just demonstrates that xDestroy is called. + ++xDestroyCalled.value; + } + }; + final CollationNeededCallback collLoader = new CollationNeededCallback(){ + @Override + public void call(sqlite3 dbArg, int eTextRep, String collationName){ + affirm(dbArg == db/* as opposed to a temporary object*/); + sqlite3_create_collation(dbArg, "reversi", eTextRep, myCollation); + } + }; + int rc = sqlite3_collation_needed(db, collLoader); + affirm( 0 == rc ); + rc = sqlite3_collation_needed(db, collLoader); + affirm( 0 == rc /* Installing the same object again is a no-op */); + sqlite3_stmt stmt = prepare(db, "SELECT a FROM t ORDER BY a COLLATE reversi"); + int counter = 0; + while( SQLITE_ROW == sqlite3_step(stmt) ){ + final String val = sqlite3_column_text16(stmt, 0); + ++counter; + //outln("REVERSI'd row#"+counter+": "+val); + switch(counter){ + case 1: affirm("c".equals(val)); break; + case 2: affirm("b".equals(val)); break; + case 3: affirm("a".equals(val)); break; + } + } + affirm(3 == counter); + sqlite3_finalize(stmt); + stmt = prepare(db, "SELECT a FROM t ORDER BY a"); + counter = 0; + while( SQLITE_ROW == sqlite3_step(stmt) ){ + final String val = sqlite3_column_text16(stmt, 0); + ++counter; + //outln("Non-REVERSI'd row#"+counter+": "+val); + switch(counter){ + case 3: affirm("c".equals(val)); break; + case 2: affirm("b".equals(val)); break; + case 1: affirm("a".equals(val)); break; + } + } + affirm(3 == counter); + sqlite3_finalize(stmt); + affirm( 0 == xDestroyCalled.value ); + rc = sqlite3_collation_needed(db, null); + affirm( 0 == rc ); + sqlite3_close_v2(db); + affirm( 0 == db.getNativePointer() ); + affirm( 1 == xDestroyCalled.value ); + } + + @SingleThreadOnly /* because it's thread-agnostic */ + private void testToUtf8(){ + /** + https://docs.oracle.com/javase/8/docs/api/java/nio/charset/Charset.html + + Let's ensure that we can convert to standard UTF-8 in Java code + (noting that the JNI native API has no way to do this). + */ + final byte[] ba = "a \0 b".getBytes(StandardCharsets.UTF_8); + affirm( 5 == ba.length /* as opposed to 6 in modified utf-8 */); + } + + private void testStatus(){ + final OutputPointer.Int64 cur64 = new OutputPointer.Int64(); + final OutputPointer.Int64 high64 = new OutputPointer.Int64(); + final OutputPointer.Int32 cur32 = new OutputPointer.Int32(); + final OutputPointer.Int32 high32 = new OutputPointer.Int32(); + final sqlite3 db = createNewDb(); + execSql(db, "create table t(a); insert into t values(1),(2),(3)"); + + int rc = sqlite3_status(SQLITE_STATUS_MEMORY_USED, cur32, high32, false); + affirm( 0 == rc ); + affirm( cur32.value > 0 ); + affirm( high32.value >= cur32.value ); + + rc = sqlite3_status64(SQLITE_STATUS_MEMORY_USED, cur64, high64, false); + affirm( 0 == rc ); + affirm( cur64.value > 0 ); + affirm( high64.value >= cur64.value ); + + cur32.value = 0; + high32.value = 1; + rc = sqlite3_db_status(db, SQLITE_DBSTATUS_SCHEMA_USED, cur32, high32, false); + affirm( 0 == rc ); + affirm( cur32.value > 0 ); + affirm( high32.value == 0 /* always 0 for SCHEMA_USED */ ); + + sqlite3_close_v2(db); + } + + private void testUdf1(){ + final sqlite3 db = createNewDb(); + // These ValueHolders are just to confirm that the func did what we want... + final ValueHolder xDestroyCalled = new ValueHolder<>(false); + final ValueHolder xFuncAccum = new ValueHolder<>(0); + final ValueHolder neverEverDoThisInClientCode = new ValueHolder<>(null); + final ValueHolder neverEverDoThisInClientCode2 = new ValueHolder<>(null); + + // Create an SQLFunction instance using one of its 3 subclasses: + // Scalar, Aggregate, or Window: + SQLFunction func = + // Each of the 3 subclasses requires a different set of + // functions, all of which must be implemented. Anonymous + // classes are a convenient way to implement these. + new ScalarFunction(){ + public void xFunc(sqlite3_context cx, sqlite3_value[] args){ + affirm(db == sqlite3_context_db_handle(cx)); + if( null==neverEverDoThisInClientCode.value ){ + /* !!!NEVER!!! hold a reference to an sqlite3_value or + sqlite3_context object like this in client code! They + are ONLY legal for the duration of their single + call. We do it here ONLY to test that the defenses + against clients doing this are working. */ + neverEverDoThisInClientCode2.value = cx; + neverEverDoThisInClientCode.value = args; + } + int result = 0; + for( sqlite3_value v : args ) result += sqlite3_value_int(v); + xFuncAccum.value += result;// just for post-run testing + sqlite3_result_int(cx, result); + } + /* OPTIONALLY override xDestroy... */ + public void xDestroy(){ + xDestroyCalled.value = true; + } + }; + + // Register and use the function... + int rc = sqlite3_create_function(db, "myfunc", -1, SQLITE_UTF8, func); + affirm(0 == rc); + affirm(0 == xFuncAccum.value); + final sqlite3_stmt stmt = prepare(db, "SELECT myfunc(1,2,3)"); + int n = 0; + while( SQLITE_ROW == sqlite3_step(stmt) ){ + affirm( 6 == sqlite3_column_int(stmt, 0) ); + ++n; + } + sqlite3_finalize(stmt); + affirm(1 == n); + affirm(6 == xFuncAccum.value); + affirm( !xDestroyCalled.value ); + affirm( null!=neverEverDoThisInClientCode.value ); + affirm( null!=neverEverDoThisInClientCode2.value ); + affirm( 0 xFuncAccum = new ValueHolder<>(0); + + SQLFunction funcAgg = new AggregateFunction(){ + @Override public void xStep(sqlite3_context cx, sqlite3_value[] args){ + /** Throwing from here should emit loud noise on stdout or stderr + but the exception is suppressed because we have no way to inform + sqlite about it from these callbacks. */ + //throw new RuntimeException("Throwing from an xStep"); + } + @Override public void xFinal(sqlite3_context cx){ + throw new RuntimeException("Throwing from an xFinal"); + } + }; + int rc = sqlite3_create_function(db, "myagg", 1, SQLITE_UTF8, funcAgg); + affirm(0 == rc); + affirm(0 == xFuncAccum.value); + sqlite3_stmt stmt = prepare(db, "SELECT myagg(1)"); + rc = sqlite3_step(stmt); + sqlite3_finalize(stmt); + affirm( 0 != rc ); + affirm( sqlite3_errmsg(db).indexOf("an xFinal") > 0 ); + + SQLFunction funcSc = new ScalarFunction(){ + @Override public void xFunc(sqlite3_context cx, sqlite3_value[] args){ + throw new RuntimeException("Throwing from an xFunc"); + } + }; + rc = sqlite3_create_function(db, "mysca", 0, SQLITE_UTF8, funcSc); + affirm(0 == rc); + affirm(0 == xFuncAccum.value); + stmt = prepare(db, "SELECT mysca()"); + rc = sqlite3_step(stmt); + sqlite3_finalize(stmt); + affirm( 0 != rc ); + affirm( sqlite3_errmsg(db).indexOf("an xFunc") > 0 ); + rc = sqlite3_create_function(db, "mysca", 1, -1, funcSc); + affirm( SQLITE_FORMAT==rc, "invalid encoding value." ); + sqlite3_close_v2(db); + } + + @SingleThreadOnly + private void testUdfJavaObject(){ + affirm( !mtMode ); + final sqlite3 db = createNewDb(); + final ValueHolder testResult = new ValueHolder<>(db); + final ValueHolder boundObj = new ValueHolder<>(42); + final SQLFunction func = new ScalarFunction(){ + public void xFunc(sqlite3_context cx, sqlite3_value args[]){ + sqlite3_result_java_object(cx, testResult.value); + affirm( sqlite3_value_java_object(args[0]) == boundObj ); + } + }; + int rc = sqlite3_create_function(db, "myfunc", -1, SQLITE_UTF8, func); + affirm(0 == rc); + sqlite3_stmt stmt = prepare(db, "select myfunc(?)"); + affirm( 0 != stmt.getNativePointer() ); + affirm( testResult.value == db ); + rc = sqlite3_bind_java_object(stmt, 1, boundObj); + affirm( 0==rc ); + int n = 0; + if( SQLITE_ROW == sqlite3_step(stmt) ){ + affirm( testResult.value == sqlite3_column_java_object(stmt, 0) ); + affirm( testResult.value == sqlite3_column_java_object(stmt, 0, sqlite3.class) ); + affirm( null == sqlite3_column_java_object(stmt, 0, sqlite3_stmt.class) ); + affirm( null == sqlite3_column_java_object(stmt,1) ); + final sqlite3_value v = sqlite3_column_value(stmt, 0); + affirm( testResult.value == sqlite3_value_java_object(v) ); + affirm( testResult.value == sqlite3_value_java_object(v, sqlite3.class) ); + affirm( testResult.value == + sqlite3_value_java_object(v, testResult.value.getClass()) ); + affirm( testResult.value == sqlite3_value_java_object(v, Object.class) ); + affirm( null == sqlite3_value_java_object(v, String.class) ); + ++n; + } + sqlite3_finalize(stmt); + affirm( 1 == n ); + affirm( 0==sqlite3_db_release_memory(db) ); + sqlite3_close_v2(db); + } + + private void testUdfAggregate(){ + final sqlite3 db = createNewDb(); + final ValueHolder xFinalNull = + // To confirm that xFinal() is called with no aggregate state + // when the corresponding result set is empty. + new ValueHolder<>(false); + final ValueHolder neverEverDoThisInClientCode = new ValueHolder<>(null); + final ValueHolder neverEverDoThisInClientCode2 = new ValueHolder<>(null); + SQLFunction func = new AggregateFunction(){ + @Override + public void xStep(sqlite3_context cx, sqlite3_value[] args){ + if( null==neverEverDoThisInClientCode.value ){ + /* !!!NEVER!!! hold a reference to an sqlite3_value or + sqlite3_context object like this in client code! They + are ONLY legal for the duration of their single + call. We do it here ONLY to test that the defenses + against clients doing this are working. */ + neverEverDoThisInClientCode.value = args; + } + final ValueHolder agg = this.getAggregateState(cx, 0); + agg.value += sqlite3_value_int(args[0]); + affirm( agg == this.getAggregateState(cx, 0) ); + } + @Override + public void xFinal(sqlite3_context cx){ + if( null==neverEverDoThisInClientCode2.value ){ + neverEverDoThisInClientCode2.value = cx; + } + final Integer v = this.takeAggregateState(cx); + if(null == v){ + xFinalNull.value = true; + sqlite3_result_null(cx); + }else{ + sqlite3_result_int(cx, v); + } + } + }; + execSql(db, "CREATE TABLE t(a); INSERT INTO t(a) VALUES(1),(2),(3)"); + int rc = sqlite3_create_function(db, "myfunc", 1, SQLITE_UTF8, func); + affirm(0 == rc); + sqlite3_stmt stmt = prepare(db, "select myfunc(a), myfunc(a+10) from t"); + affirm( 0==sqlite3_stmt_status(stmt, SQLITE_STMTSTATUS_RUN, false) ); + int n = 0; + if( SQLITE_ROW == sqlite3_step(stmt) ){ + int v = sqlite3_column_int(stmt, 0); + affirm( 6 == v ); + int v2 = sqlite3_column_int(stmt, 1); + affirm( 30+v == v2 ); + ++n; + } + affirm( 1==n ); + affirm(!xFinalNull.value); + affirm( null!=neverEverDoThisInClientCode.value ); + affirm( null!=neverEverDoThisInClientCode2.value ); + affirm( 0(){ + + private void xStepInverse(sqlite3_context cx, int v){ + this.getAggregateState(cx,0).value += v; + } + @Override public void xStep(sqlite3_context cx, sqlite3_value[] args){ + this.xStepInverse(cx, sqlite3_value_int(args[0])); + } + @Override public void xInverse(sqlite3_context cx, sqlite3_value[] args){ + this.xStepInverse(cx, -sqlite3_value_int(args[0])); + } + + private void xFinalValue(sqlite3_context cx, Integer v){ + if(null == v) sqlite3_result_null(cx); + else sqlite3_result_int(cx, v); + } + @Override public void xFinal(sqlite3_context cx){ + xFinalValue(cx, this.takeAggregateState(cx)); + } + @Override public void xValue(sqlite3_context cx){ + xFinalValue(cx, this.getAggregateState(cx,null).value); + } + }; + int rc = sqlite3_create_function(db, "winsumint", 1, SQLITE_UTF8, func); + affirm( 0 == rc ); + execSql(db, new String[] { + "CREATE TEMP TABLE twin(x, y); INSERT INTO twin VALUES", + "('a', 4),('b', 5),('c', 3),('d', 8),('e', 1)" + }); + final sqlite3_stmt stmt = prepare(db, + "SELECT x, winsumint(y) OVER ("+ + "ORDER BY x ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING"+ + ") AS sum_y "+ + "FROM twin ORDER BY x;"); + int n = 0; + while( SQLITE_ROW == sqlite3_step(stmt) ){ + final String s = sqlite3_column_text16(stmt, 0); + final int i = sqlite3_column_int(stmt, 1); + switch(++n){ + case 1: affirm( "a".equals(s) && 9==i ); break; + case 2: affirm( "b".equals(s) && 12==i ); break; + case 3: affirm( "c".equals(s) && 16==i ); break; + case 4: affirm( "d".equals(s) && 12==i ); break; + case 5: affirm( "e".equals(s) && 9==i ); break; + default: affirm( false /* cannot happen */ ); + } + } + sqlite3_finalize(stmt); + affirm( 5 == n ); + sqlite3_close_v2(db); + } + + private void listBoundMethods(){ + if(false){ + final java.lang.reflect.Field[] declaredFields = + CApi.class.getDeclaredFields(); + outln("Bound constants:\n"); + for(java.lang.reflect.Field field : declaredFields) { + if(java.lang.reflect.Modifier.isStatic(field.getModifiers())) { + outln("\t",field.getName()); + } + } + } + final java.lang.reflect.Method[] declaredMethods = + CApi.class.getDeclaredMethods(); + final java.util.List funcList = new java.util.ArrayList<>(); + for(java.lang.reflect.Method m : declaredMethods){ + if((m.getModifiers() & java.lang.reflect.Modifier.STATIC) != 0){ + final String name = m.getName(); + if(name.startsWith("sqlite3_")){ + funcList.add(name); + } + } + } + int count = 0; + java.util.Collections.sort(funcList); + for(String n : funcList){ + ++count; + outln("\t",n,"()"); + } + outln(count," functions named sqlite3_*."); + } + + private void testTrace(){ + final sqlite3 db = createNewDb(); + final ValueHolder counter = new ValueHolder<>(0); + /* Ensure that characters outside of the UTF BMP survive the trip + from Java to sqlite3 and back to Java. (At no small efficiency + penalty.) */ + final String nonBmpChar = "😃"; + int rc = sqlite3_trace_v2( + db, SQLITE_TRACE_STMT | SQLITE_TRACE_PROFILE + | SQLITE_TRACE_ROW | SQLITE_TRACE_CLOSE, + new TraceV2Callback(){ + @Override public int call(int traceFlag, Object pNative, Object x){ + ++counter.value; + //outln("TRACE "+traceFlag+" pNative = "+pNative.getClass().getName()); + switch(traceFlag){ + case SQLITE_TRACE_STMT: + affirm(pNative instanceof sqlite3_stmt); + //outln("TRACE_STMT sql = "+x); + affirm(x instanceof String); + affirm( ((String)x).indexOf(nonBmpChar) > 0 ); + break; + case SQLITE_TRACE_PROFILE: + affirm(pNative instanceof sqlite3_stmt); + affirm(x instanceof Long); + //outln("TRACE_PROFILE time = "+x); + break; + case SQLITE_TRACE_ROW: + affirm(pNative instanceof sqlite3_stmt); + affirm(null == x); + //outln("TRACE_ROW = "+sqlite3_column_text16((sqlite3_stmt)pNative, 0)); + break; + case SQLITE_TRACE_CLOSE: + affirm(pNative instanceof sqlite3); + affirm(null == x); + break; + default: + affirm(false /*cannot happen*/); + break; + } + return 0; + } + }); + affirm( 0==rc ); + execSql(db, "SELECT coalesce(null,null,'"+nonBmpChar+"'); "+ + "SELECT 'w"+nonBmpChar+"orld'"); + affirm( 6 == counter.value ); + sqlite3_close_v2(db); + affirm( 7 == counter.value ); + } + + @SingleThreadOnly /* because threads inherently break this test */ + private static void testBusy(){ + final String dbName = "_busy-handler.db"; + try{ + final OutputPointer.sqlite3 outDb = new OutputPointer.sqlite3(); + final OutputPointer.sqlite3_stmt outStmt = new OutputPointer.sqlite3_stmt(); + + int rc = sqlite3_open(dbName, outDb); + ++metrics.dbOpen; + affirm( 0 == rc ); + final sqlite3 db1 = outDb.get(); + execSql(db1, "CREATE TABLE IF NOT EXISTS t(a)"); + rc = sqlite3_open(dbName, outDb); + ++metrics.dbOpen; + affirm( 0 == rc ); + affirm( outDb.get() != db1 ); + final sqlite3 db2 = outDb.get(); + + affirm( "main".equals( sqlite3_db_name(db1, 0) ) ); + rc = sqlite3_db_config(db1, SQLITE_DBCONFIG_MAINDBNAME, "foo"); + affirm( sqlite3_db_filename(db1, "foo").endsWith(dbName) ); + affirm( "foo".equals( sqlite3_db_name(db1, 0) ) ); + affirm( SQLITE_MISUSE == sqlite3_db_config(db1, 0, 0, null) ); + + final ValueHolder xBusyCalled = new ValueHolder<>(0); + BusyHandlerCallback handler = new BusyHandlerCallback(){ + @Override public int call(int n){ + //outln("busy handler #"+n); + return n > 2 ? 0 : ++xBusyCalled.value; + } + }; + rc = sqlite3_busy_handler(db2, handler); + affirm(0 == rc); + + // Force a locked condition... + execSql(db1, "BEGIN EXCLUSIVE"); + rc = sqlite3_prepare_v2(db2, "SELECT * from t", outStmt); + affirm( SQLITE_BUSY == rc); + affirm( null == outStmt.get() ); + affirm( 3 == xBusyCalled.value ); + sqlite3_close_v2(db1); + sqlite3_close_v2(db2); + }finally{ + try{(new java.io.File(dbName)).delete();} + catch(Exception e){/* ignore */} + } + } + + private void testProgress(){ + final sqlite3 db = createNewDb(); + final ValueHolder counter = new ValueHolder<>(0); + sqlite3_progress_handler(db, 1, new ProgressHandlerCallback(){ + @Override public int call(){ + ++counter.value; + return 0; + } + }); + execSql(db, "SELECT 1; SELECT 2;"); + affirm( counter.value > 0 ); + int nOld = counter.value; + sqlite3_progress_handler(db, 0, null); + execSql(db, "SELECT 1; SELECT 2;"); + affirm( nOld == counter.value ); + sqlite3_close_v2(db); + } + + private void testCommitHook(){ + final sqlite3 db = createNewDb(); + sqlite3_extended_result_codes(db, true); + final ValueHolder counter = new ValueHolder<>(0); + final ValueHolder hookResult = new ValueHolder<>(0); + final CommitHookCallback theHook = new CommitHookCallback(){ + @Override public int call(){ + ++counter.value; + return hookResult.value; + } + }; + CommitHookCallback oldHook = sqlite3_commit_hook(db, theHook); + affirm( null == oldHook ); + execSql(db, "CREATE TABLE t(a); INSERT INTO t(a) VALUES('a'),('b'),('c')"); + affirm( 2 == counter.value ); + execSql(db, "BEGIN; SELECT 1; SELECT 2; COMMIT;"); + affirm( 2 == counter.value /* NOT invoked if no changes are made */ ); + execSql(db, "BEGIN; update t set a='d' where a='c'; COMMIT;"); + affirm( 3 == counter.value ); + oldHook = sqlite3_commit_hook(db, theHook); + affirm( theHook == oldHook ); + execSql(db, "BEGIN; update t set a='e' where a='d'; COMMIT;"); + affirm( 4 == counter.value ); + oldHook = sqlite3_commit_hook(db, null); + affirm( theHook == oldHook ); + execSql(db, "BEGIN; update t set a='f' where a='e'; COMMIT;"); + affirm( 4 == counter.value ); + oldHook = sqlite3_commit_hook(db, null); + affirm( null == oldHook ); + execSql(db, "BEGIN; update t set a='g' where a='f'; COMMIT;"); + affirm( 4 == counter.value ); + + final CommitHookCallback newHook = new CommitHookCallback(){ + @Override public int call(){return 0;} + }; + oldHook = sqlite3_commit_hook(db, newHook); + affirm( null == oldHook ); + execSql(db, "BEGIN; update t set a='h' where a='g'; COMMIT;"); + affirm( 4 == counter.value ); + oldHook = sqlite3_commit_hook(db, theHook); + affirm( newHook == oldHook ); + execSql(db, "BEGIN; update t set a='i' where a='h'; COMMIT;"); + affirm( 5 == counter.value ); + hookResult.value = SQLITE_ERROR; + int rc = execSql(db, false, "BEGIN; update t set a='j' where a='i'; COMMIT;"); + affirm( SQLITE_CONSTRAINT_COMMITHOOK == rc ); + affirm( 6 == counter.value ); + sqlite3_close_v2(db); + } + + private void testUpdateHook(){ + final sqlite3 db = createNewDb(); + final ValueHolder counter = new ValueHolder<>(0); + final ValueHolder expectedOp = new ValueHolder<>(0); + final UpdateHookCallback theHook = new UpdateHookCallback(){ + @Override + public void call(int opId, String dbName, String tableName, long rowId){ + ++counter.value; + if( 0!=expectedOp.value ){ + affirm( expectedOp.value == opId ); + } + } + }; + UpdateHookCallback oldHook = sqlite3_update_hook(db, theHook); + affirm( null == oldHook ); + expectedOp.value = SQLITE_INSERT; + execSql(db, "CREATE TABLE t(a); INSERT INTO t(a) VALUES('a'),('b'),('c')"); + affirm( 3 == counter.value ); + expectedOp.value = SQLITE_UPDATE; + execSql(db, "update t set a='d' where a='c';"); + affirm( 4 == counter.value ); + oldHook = sqlite3_update_hook(db, theHook); + affirm( theHook == oldHook ); + expectedOp.value = SQLITE_DELETE; + execSql(db, "DELETE FROM t where a='d'"); + affirm( 5 == counter.value ); + oldHook = sqlite3_update_hook(db, null); + affirm( theHook == oldHook ); + execSql(db, "update t set a='e' where a='b';"); + affirm( 5 == counter.value ); + oldHook = sqlite3_update_hook(db, null); + affirm( null == oldHook ); + + final UpdateHookCallback newHook = new UpdateHookCallback(){ + @Override public void call(int opId, String dbName, String tableName, long rowId){ + } + }; + oldHook = sqlite3_update_hook(db, newHook); + affirm( null == oldHook ); + execSql(db, "update t set a='h' where a='a'"); + affirm( 5 == counter.value ); + oldHook = sqlite3_update_hook(db, theHook); + affirm( newHook == oldHook ); + expectedOp.value = SQLITE_UPDATE; + execSql(db, "update t set a='i' where a='h'"); + affirm( 6 == counter.value ); + sqlite3_close_v2(db); + } + + /** + This test is functionally identical to testUpdateHook(), only with a + different callback type. + */ + private void testPreUpdateHook(){ + if( !sqlite3_compileoption_used("ENABLE_PREUPDATE_HOOK") ){ + //outln("Skipping testPreUpdateHook(): no pre-update hook support."); + return; + } + final sqlite3 db = createNewDb(); + final ValueHolder counter = new ValueHolder<>(0); + final ValueHolder expectedOp = new ValueHolder<>(0); + final PreupdateHookCallback theHook = new PreupdateHookCallback(){ + @Override + public void call(sqlite3 db, int opId, String dbName, String dbTable, + long iKey1, long iKey2 ){ + ++counter.value; + switch( opId ){ + case SQLITE_UPDATE: + affirm( 0 < sqlite3_preupdate_count(db) ); + affirm( null != sqlite3_preupdate_new(db, 0) ); + affirm( null != sqlite3_preupdate_old(db, 0) ); + break; + case SQLITE_INSERT: + affirm( null != sqlite3_preupdate_new(db, 0) ); + break; + case SQLITE_DELETE: + affirm( null != sqlite3_preupdate_old(db, 0) ); + break; + default: + break; + } + if( 0!=expectedOp.value ){ + affirm( expectedOp.value == opId ); + } + } + }; + PreupdateHookCallback oldHook = sqlite3_preupdate_hook(db, theHook); + affirm( null == oldHook ); + expectedOp.value = SQLITE_INSERT; + execSql(db, "CREATE TABLE t(a); INSERT INTO t(a) VALUES('a'),('b'),('c')"); + affirm( 3 == counter.value ); + expectedOp.value = SQLITE_UPDATE; + execSql(db, "update t set a='d' where a='c';"); + affirm( 4 == counter.value ); + oldHook = sqlite3_preupdate_hook(db, theHook); + affirm( theHook == oldHook ); + expectedOp.value = SQLITE_DELETE; + execSql(db, "DELETE FROM t where a='d'"); + affirm( 5 == counter.value ); + oldHook = sqlite3_preupdate_hook(db, null); + affirm( theHook == oldHook ); + execSql(db, "update t set a='e' where a='b';"); + affirm( 5 == counter.value ); + oldHook = sqlite3_preupdate_hook(db, null); + affirm( null == oldHook ); + + final PreupdateHookCallback newHook = new PreupdateHookCallback(){ + @Override + public void call(sqlite3 db, int opId, String dbName, + String tableName, long iKey1, long iKey2){ + } + }; + oldHook = sqlite3_preupdate_hook(db, newHook); + affirm( null == oldHook ); + execSql(db, "update t set a='h' where a='a'"); + affirm( 5 == counter.value ); + oldHook = sqlite3_preupdate_hook(db, theHook); + affirm( newHook == oldHook ); + expectedOp.value = SQLITE_UPDATE; + execSql(db, "update t set a='i' where a='h'"); + affirm( 6 == counter.value ); + + sqlite3_close_v2(db); + } + + private void testRollbackHook(){ + final sqlite3 db = createNewDb(); + final ValueHolder counter = new ValueHolder<>(0); + final RollbackHookCallback theHook = new RollbackHookCallback(){ + @Override public void call(){ + ++counter.value; + } + }; + RollbackHookCallback oldHook = sqlite3_rollback_hook(db, theHook); + affirm( null == oldHook ); + execSql(db, "CREATE TABLE t(a); INSERT INTO t(a) VALUES('a'),('b'),('c')"); + affirm( 0 == counter.value ); + execSql(db, false, "BEGIN; SELECT 1; SELECT 2; ROLLBACK;"); + affirm( 1 == counter.value /* contra to commit hook, is invoked if no changes are made */ ); + + final RollbackHookCallback newHook = new RollbackHookCallback(){ + @Override public void call(){return;} + }; + oldHook = sqlite3_rollback_hook(db, newHook); + affirm( theHook == oldHook ); + execSql(db, false, "BEGIN; SELECT 1; ROLLBACK;"); + affirm( 1 == counter.value ); + oldHook = sqlite3_rollback_hook(db, theHook); + affirm( newHook == oldHook ); + execSql(db, false, "BEGIN; SELECT 1; ROLLBACK;"); + affirm( 2 == counter.value ); + int rc = execSql(db, false, "BEGIN; SELECT 1; ROLLBACK;"); + affirm( 0 == rc ); + affirm( 3 == counter.value ); + sqlite3_close_v2(db); + } + + /** + If FTS5 is available, runs FTS5 tests, else returns with no side + effects. If it is available but loading of the FTS5 bits fails, + it throws. + */ + @SuppressWarnings("unchecked") + @SingleThreadOnly /* because the Fts5 parts are not yet known to be + thread-safe */ + private void testFts5() throws Exception { + if( !sqlite3_compileoption_used("ENABLE_FTS5") ){ + //outln("SQLITE_ENABLE_FTS5 is not set. Skipping FTS5 tests."); + return; + } + Exception err = null; + try { + Class t = Class.forName("org.sqlite.jni.fts5.TesterFts5"); + java.lang.reflect.Constructor ctor = t.getConstructor(); + ctor.setAccessible(true); + final long timeStart = System.currentTimeMillis(); + ctor.newInstance() /* will run all tests */; + final long timeEnd = System.currentTimeMillis(); + outln("FTS5 Tests done in ",(timeEnd - timeStart),"ms"); + }catch(ClassNotFoundException e){ + outln("FTS5 classes not loaded."); + err = e; + }catch(NoSuchMethodException e){ + outln("FTS5 tester ctor not found."); + err = e; + }catch(Exception e){ + outln("Instantiation of FTS5 tester threw."); + err = e; + } + if( null != err ){ + outln("Exception: "+err); + err.printStackTrace(); + throw err; + } + } + + private void testAuthorizer(){ + final sqlite3 db = createNewDb(); + final ValueHolder counter = new ValueHolder<>(0); + final ValueHolder authRc = new ValueHolder<>(0); + final AuthorizerCallback auth = new AuthorizerCallback(){ + public int call(int op, String s0, String s1, String s2, String s3){ + ++counter.value; + //outln("xAuth(): "+s0+" "+s1+" "+s2+" "+s3); + return authRc.value; + } + }; + execSql(db, "CREATE TABLE t(a); INSERT INTO t(a) VALUES('a'),('b'),('c')"); + sqlite3_set_authorizer(db, auth); + execSql(db, "UPDATE t SET a=1"); + affirm( 1 == counter.value ); + authRc.value = SQLITE_DENY; + int rc = execSql(db, false, "UPDATE t SET a=2"); + affirm( SQLITE_AUTH==rc ); + sqlite3_set_authorizer(db, null); + rc = execSql(db, false, "UPDATE t SET a=2"); + affirm( 0==rc ); + // TODO: expand these tests considerably + sqlite3_close(db); + } + + @SingleThreadOnly /* because multiple threads legitimately make these + results unpredictable */ + private synchronized void testAutoExtension(){ + final ValueHolder val = new ValueHolder<>(0); + final ValueHolder toss = new ValueHolder<>(null); + final AutoExtensionCallback ax = new AutoExtensionCallback(){ + @Override public int call(sqlite3 db){ + ++val.value; + if( null!=toss.value ){ + throw new RuntimeException(toss.value); + } + return 0; + } + }; + int rc = sqlite3_auto_extension( ax ); + affirm( 0==rc ); + sqlite3_close(createNewDb()); + affirm( 1==val.value ); + sqlite3_close(createNewDb()); + affirm( 2==val.value ); + sqlite3_reset_auto_extension(); + sqlite3_close(createNewDb()); + affirm( 2==val.value ); + rc = sqlite3_auto_extension( ax ); + affirm( 0==rc ); + // Must not add a new entry + rc = sqlite3_auto_extension( ax ); + affirm( 0==rc ); + sqlite3_close( createNewDb() ); + affirm( 3==val.value ); + + sqlite3 db = createNewDb(); + affirm( 4==val.value ); + execSql(db, "ATTACH ':memory:' as foo"); + affirm( 4==val.value, "ATTACH uses the same connection, not sub-connections." ); + sqlite3_close(db); + db = null; + + affirm( sqlite3_cancel_auto_extension(ax) ); + affirm( !sqlite3_cancel_auto_extension(ax) ); + sqlite3_close(createNewDb()); + affirm( 4==val.value ); + rc = sqlite3_auto_extension( ax ); + affirm( 0==rc ); + Exception err = null; + toss.value = "Throwing from auto_extension."; + try{ + sqlite3_close(createNewDb()); + }catch(Exception e){ + err = e; + } + affirm( err!=null ); + affirm( err.getMessage().indexOf(toss.value)>0 ); + toss.value = null; + + val.value = 0; + final AutoExtensionCallback ax2 = new AutoExtensionCallback(){ + @Override public int call(sqlite3 db){ + ++val.value; + return 0; + } + }; + rc = sqlite3_auto_extension( ax2 ); + affirm( 0 == rc ); + sqlite3_close(createNewDb()); + affirm( 2 == val.value ); + affirm( sqlite3_cancel_auto_extension(ax) ); + affirm( !sqlite3_cancel_auto_extension(ax) ); + sqlite3_close(createNewDb()); + affirm( 3 == val.value ); + rc = sqlite3_auto_extension( ax ); + affirm( 0 == rc ); + sqlite3_close(createNewDb()); + affirm( 5 == val.value ); + affirm( sqlite3_cancel_auto_extension(ax2) ); + affirm( !sqlite3_cancel_auto_extension(ax2) ); + sqlite3_close(createNewDb()); + affirm( 6 == val.value ); + rc = sqlite3_auto_extension( ax2 ); + affirm( 0 == rc ); + sqlite3_close(createNewDb()); + affirm( 8 == val.value ); + + sqlite3_reset_auto_extension(); + sqlite3_close(createNewDb()); + affirm( 8 == val.value ); + affirm( !sqlite3_cancel_auto_extension(ax) ); + affirm( !sqlite3_cancel_auto_extension(ax2) ); + sqlite3_close(createNewDb()); + affirm( 8 == val.value ); + } + + + private void testColumnMetadata(){ + final sqlite3 db = createNewDb(); + execSql(db, new String[] { + "CREATE TABLE t(a duck primary key not null collate noCase); ", + "INSERT INTO t(a) VALUES(1),(2),(3);" + }); + OutputPointer.Bool bNotNull = new OutputPointer.Bool(); + OutputPointer.Bool bPrimaryKey = new OutputPointer.Bool(); + OutputPointer.Bool bAutoinc = new OutputPointer.Bool(); + OutputPointer.String zCollSeq = new OutputPointer.String(); + OutputPointer.String zDataType = new OutputPointer.String(); + int rc = sqlite3_table_column_metadata( + db, "main", "t", "a", zDataType, zCollSeq, + bNotNull, bPrimaryKey, bAutoinc); + affirm( 0==rc ); + affirm( bPrimaryKey.value ); + affirm( !bAutoinc.value ); + affirm( bNotNull.value ); + affirm( "noCase".equals(zCollSeq.value) ); + affirm( "duck".equals(zDataType.value) ); + + TableColumnMetadata m = + sqlite3_table_column_metadata(db, "main", "t", "a"); + affirm( null != m ); + affirm( bPrimaryKey.value == m.isPrimaryKey() ); + affirm( bAutoinc.value == m.isAutoincrement() ); + affirm( bNotNull.value == m.isNotNull() ); + affirm( zCollSeq.value.equals(m.getCollation()) ); + affirm( zDataType.value.equals(m.getDataType()) ); + + affirm( null == sqlite3_table_column_metadata(db, "nope", "t", "a") ); + affirm( null == sqlite3_table_column_metadata(db, "main", "nope", "a") ); + + m = sqlite3_table_column_metadata(db, "main", "t", null) + /* Check only for existence of table */; + affirm( null != m ); + affirm( m.isPrimaryKey() ); + affirm( !m.isAutoincrement() ); + affirm( !m.isNotNull() ); + affirm( "BINARY".equalsIgnoreCase(m.getCollation()) ); + affirm( "INTEGER".equalsIgnoreCase(m.getDataType()) ); + + sqlite3_close_v2(db); + } + + private void testTxnState(){ + final sqlite3 db = createNewDb(); + affirm( SQLITE_TXN_NONE == sqlite3_txn_state(db, null) ); + affirm( sqlite3_get_autocommit(db) ); + execSql(db, "BEGIN;"); + affirm( !sqlite3_get_autocommit(db) ); + affirm( SQLITE_TXN_NONE == sqlite3_txn_state(db, null) ); + execSql(db, "SELECT * FROM sqlite_schema;"); + affirm( SQLITE_TXN_READ == sqlite3_txn_state(db, "main") ); + execSql(db, "CREATE TABLE t(a);"); + affirm( SQLITE_TXN_WRITE == sqlite3_txn_state(db, null) ); + execSql(db, "ROLLBACK;"); + affirm( SQLITE_TXN_NONE == sqlite3_txn_state(db, null) ); + sqlite3_close_v2(db); + } + + + private void testExplain(){ + final sqlite3 db = createNewDb(); + sqlite3_stmt stmt = prepare(db,"SELECT 1"); + + affirm( 0 == sqlite3_stmt_isexplain(stmt) ); + int rc = sqlite3_stmt_explain(stmt, 1); + affirm( 1 == sqlite3_stmt_isexplain(stmt) ); + rc = sqlite3_stmt_explain(stmt, 2); + affirm( 2 == sqlite3_stmt_isexplain(stmt) ); + sqlite3_finalize(stmt); + sqlite3_close_v2(db); + } + + private void testLimit(){ + final sqlite3 db = createNewDb(); + int v; + + v = sqlite3_limit(db, SQLITE_LIMIT_LENGTH, -1); + affirm( v > 0 ); + affirm( v == sqlite3_limit(db, SQLITE_LIMIT_LENGTH, v-1) ); + affirm( v-1 == sqlite3_limit(db, SQLITE_LIMIT_LENGTH, -1) ); + sqlite3_close_v2(db); + } + + private void testComplete(){ + affirm( 0==sqlite3_complete("select 1") ); + affirm( 0!=sqlite3_complete("select 1;") ); + affirm( 0!=sqlite3_complete("nope 'nope' 'nope' 1;"), "Yup" ); + } + + private void testKeyword(){ + final int n = sqlite3_keyword_count(); + affirm( n>0 ); + affirm( !sqlite3_keyword_check("_nope_") ); + affirm( sqlite3_keyword_check("seLect") ); + affirm( null!=sqlite3_keyword_name(0) ); + affirm( null!=sqlite3_keyword_name(n-1) ); + affirm( null==sqlite3_keyword_name(n) ); + } + + private void testBackup(){ + final sqlite3 dbDest = createNewDb(); + + try (sqlite3 dbSrc = createNewDb()) { + execSql(dbSrc, new String[]{ + "pragma page_size=512; VACUUM;", + "create table t(a);", + "insert into t(a) values(1),(2),(3);" + }); + affirm( null==sqlite3_backup_init(dbSrc,"main",dbSrc,"main") ); + try (sqlite3_backup b = sqlite3_backup_init(dbDest,"main",dbSrc,"main")) { + affirm( null!=b ); + affirm( b.getNativePointer()!=0 ); + int rc; + while( SQLITE_DONE!=(rc = sqlite3_backup_step(b, 1)) ){ + affirm( 0==rc ); + } + affirm( sqlite3_backup_pagecount(b) > 0 ); + rc = sqlite3_backup_finish(b); + affirm( 0==rc ); + affirm( b.getNativePointer()==0 ); + } + } + + try (sqlite3_stmt stmt = prepare(dbDest,"SELECT sum(a) from t")) { + sqlite3_step(stmt); + affirm( sqlite3_column_int(stmt,0) == 6 ); + } + sqlite3_close_v2(dbDest); + } + + private void testRandomness(){ + byte[] foo = new byte[20]; + int i = 0; + for( byte b : foo ){ + i += b; + } + affirm( i==0 ); + sqlite3_randomness(foo); + for( byte b : foo ){ + if(b!=0) ++i; + } + affirm( i!=0, "There's a very slight chance that 0 is actually correct." ); + } + + private void testBlobOpen(){ + final sqlite3 db = createNewDb(); + + execSql(db, "CREATE TABLE T(a BLOB);" + +"INSERT INTO t(rowid,a) VALUES(1, 'def'),(2, 'XYZ');" + ); + final OutputPointer.sqlite3_blob pOut = new OutputPointer.sqlite3_blob(); + int rc = sqlite3_blob_open(db, "main", "t", "a", + sqlite3_last_insert_rowid(db), 1, pOut); + affirm( 0==rc ); + sqlite3_blob b = pOut.take(); + affirm( null!=b ); + affirm( 0!=b.getNativePointer() ); + affirm( 3==sqlite3_blob_bytes(b) ); + rc = sqlite3_blob_write( b, new byte[] {100, 101, 102 /*"DEF"*/}, 0); + affirm( 0==rc ); + rc = sqlite3_blob_close(b); + affirm( 0==rc ); + rc = sqlite3_blob_close(b); + affirm( 0!=rc ); + affirm( 0==b.getNativePointer() ); + sqlite3_stmt stmt = prepare(db,"SELECT length(a), a FROM t ORDER BY a"); + affirm( SQLITE_ROW == sqlite3_step(stmt) ); + affirm( 3 == sqlite3_column_int(stmt,0) ); + affirm( "def".equals(sqlite3_column_text16(stmt,1)) ); + sqlite3_finalize(stmt); + + b = sqlite3_blob_open(db, "main", "t", "a", + sqlite3_last_insert_rowid(db), 0); + affirm( null!=b ); + rc = sqlite3_blob_reopen(b, 2); + affirm( 0==rc ); + final byte[] tgt = new byte[3]; + rc = sqlite3_blob_read(b, tgt, 0); + affirm( 0==rc ); + affirm( 100==tgt[0] && 101==tgt[1] && 102==tgt[2], "DEF" ); + rc = sqlite3_blob_close(b); + affirm( 0==rc ); + + if( !sqlite3_jni_supports_nio() ){ + outln("WARNING: skipping tests for ByteBuffer-using sqlite3_blob APIs ", + "because this platform lacks that support."); + sqlite3_close_v2(db); + return; + } + /* Sanity checks for the java.nio.ByteBuffer-taking overloads of + sqlite3_blob_read/write(). */ + execSql(db, "UPDATE t SET a=zeroblob(10)"); + b = sqlite3_blob_open(db, "main", "t", "a", 1, 1); + affirm( null!=b ); + java.nio.ByteBuffer bb = java.nio.ByteBuffer.allocateDirect(10); + for( byte i = 0; i < 10; ++i ){ + bb.put((int)i, (byte)(48+i & 0xff)); + } + rc = sqlite3_blob_write(b, 1, bb, 1, 10); + affirm( rc==SQLITE_ERROR, "b length < (srcOffset + bb length)" ); + rc = sqlite3_blob_write(b, -1, bb); + affirm( rc==SQLITE_ERROR, "Target offset may not be negative" ); + rc = sqlite3_blob_write(b, 0, bb, -1, -1); + affirm( rc==SQLITE_ERROR, "Source offset may not be negative" ); + rc = sqlite3_blob_write(b, 1, bb, 1, 8); + affirm( rc==0 ); + // b's contents: 0 49 50 51 52 53 54 55 56 0 + // ascii: 0 '1' '2' '3' '4' '5' '6' '7' '8' 0 + byte br[] = new byte[10]; + java.nio.ByteBuffer bbr = + java.nio.ByteBuffer.allocateDirect(bb.limit()); + rc = sqlite3_blob_read( b, br, 0 ); + affirm( rc==0 ); + rc = sqlite3_blob_read( b, bbr ); + affirm( rc==0 ); + java.nio.ByteBuffer bbr2 = sqlite3_blob_read_nio_buffer(b, 0, 12); + affirm( null==bbr2, "Read size is too big"); + bbr2 = sqlite3_blob_read_nio_buffer(b, -1, 3); + affirm( null==bbr2, "Source offset is negative"); + bbr2 = sqlite3_blob_read_nio_buffer(b, 5, 6); + affirm( null==bbr2, "Read pos+size is too big"); + bbr2 = sqlite3_blob_read_nio_buffer(b, 4, 7); + affirm( null==bbr2, "Read pos+size is too big"); + bbr2 = sqlite3_blob_read_nio_buffer(b, 4, 6); + affirm( null!=bbr2 ); + java.nio.ByteBuffer bbr3 = + java.nio.ByteBuffer.allocateDirect(2 * bb.limit()); + java.nio.ByteBuffer bbr4 = + java.nio.ByteBuffer.allocateDirect(5); + rc = sqlite3_blob_read( b, bbr3 ); + affirm( rc==0 ); + rc = sqlite3_blob_read( b, bbr4 ); + affirm( rc==0 ); + affirm( sqlite3_blob_bytes(b)==bbr3.limit() ); + affirm( 5==bbr4.limit() ); + sqlite3_blob_close(b); + affirm( 0==br[0] ); + affirm( 0==br[9] ); + affirm( 0==bbr.get(0) ); + affirm( 0==bbr.get(9) ); + affirm( bbr2.limit() == 6 ); + affirm( 0==bbr3.get(0) ); + { + Exception ex = null; + try{ bbr3.get(11); } + catch(Exception e){ex = e;} + affirm( ex instanceof IndexOutOfBoundsException, + "bbr3.limit() was reset by read()" ); + ex = null; + } + affirm( 0==bbr4.get(0) ); + for( int i = 1; i < 9; ++i ){ + affirm( br[i] == 48 + i ); + affirm( br[i] == bbr.get(i) ); + affirm( br[i] == bbr3.get(i) ); + if( i>3 ){ + affirm( br[i] == bbr2.get(i-4) ); + } + if( i < bbr4.limit() ){ + affirm( br[i] == bbr4.get(i) ); + } + } + sqlite3_close_v2(db); + } + + private void testPrepareMulti(){ + final sqlite3 db = createNewDb(); + final String[] sql = { + "create table t(","a)", + "; insert into t(a) values(1),(2),(3);", + "select a from t;" + }; + final List liStmt = new ArrayList<>(); + final PrepareMultiCallback proxy = new PrepareMultiCallback.StepAll(); + final ValueHolder toss = new ValueHolder<>(null); + PrepareMultiCallback m = new PrepareMultiCallback() { + @Override public int call(sqlite3_stmt st){ + liStmt.add(st); + if( null!=toss.value ){ + throw new RuntimeException(toss.value); + } + return proxy.call(st); + } + }; + int rc = sqlite3_prepare_multi(db, sql, m); + affirm( 0==rc ); + affirm( liStmt.size() == 3 ); + for( sqlite3_stmt st : liStmt ){ + sqlite3_finalize(st); + } + toss.value = "This is an exception."; + rc = sqlite3_prepare_multi(db, "SELECT 1", m); + affirm( SQLITE_ERROR==rc ); + affirm( sqlite3_errmsg(db).indexOf(toss.value)>0 ); + sqlite3_close_v2(db); + } + + private void testSetErrmsg(){ + final sqlite3 db = createNewDb(); + + int rc = sqlite3_set_errmsg(db, SQLITE_RANGE, "nope"); + affirm( 0==rc ); + affirm( SQLITE_MISUSE == sqlite3_set_errmsg(null, 0, null) ); + affirm( "nope".equals(sqlite3_errmsg(db)) ); + affirm( SQLITE_RANGE == sqlite3_errcode(db) ); + rc = sqlite3_set_errmsg(db, 0, null); + affirm( "not an error".equals(sqlite3_errmsg(db)) ); + affirm( 0 == sqlite3_errcode(db) ); + sqlite3_close_v2(db); + } + + /* Copy/paste/rename this to add new tests. */ + private void _testTemplate(){ + final sqlite3 db = createNewDb(); + sqlite3_stmt stmt = prepare(db,"SELECT 1"); + sqlite3_finalize(stmt); + sqlite3_close_v2(db); + } + + + @ManualTest /* we really only want to run this test manually */ + private void testSleep(){ + out("Sleeping briefly... "); + sqlite3_sleep(600); + outln("Woke up."); + } + + private void nap() throws InterruptedException { + if( takeNaps ){ + Thread.sleep(java.util.concurrent.ThreadLocalRandom.current().nextInt(3, 17), 0); + } + } + + @ManualTest /* because we only want to run this test on demand */ + private void testFail(){ + affirm( false, "Intentional failure." ); + } + + private void runTests(boolean fromThread) throws Exception { + if(false) showCompileOption(); + List mlist = testMethods; + affirm( null!=mlist ); + if( shuffle ){ + mlist = new ArrayList<>( testMethods.subList(0, testMethods.size()) ); + java.util.Collections.shuffle(mlist); + } + if( (!fromThread && listRunTests>0) || listRunTests>1 ){ + synchronized(this.getClass()){ + if( !fromThread ){ + out("Initial test"," list: "); + for(java.lang.reflect.Method m : testMethods){ + out(m.getName()+" "); + } + outln(); + outln("(That list excludes some which are hard-coded to run.)"); + } + out("Running"," tests: "); + for(java.lang.reflect.Method m : mlist){ + out(m.getName()+" "); + } + outln(); + } + } + for(java.lang.reflect.Method m : mlist){ + nap(); + try{ + m.invoke(this); + }catch(java.lang.reflect.InvocationTargetException e){ + outln("FAILURE: ",m.getName(),"(): ", e.getCause()); + throw e; + } + } + synchronized( this.getClass() ){ + ++nTestRuns; + } + } + + public void run() { + try { + runTests(0!=this.tId); + }catch(Exception e){ + synchronized( listErrors ){ + listErrors.add(e); + } + }finally{ + affirm( sqlite3_java_uncache_thread() ); + affirm( !sqlite3_java_uncache_thread() ); + } + } + + /** + Runs the basic sqlite3 JNI binding sanity-check suite. + + CLI flags: + + -q|-quiet: disables most test output. + + -t|-thread N: runs the tests in N threads + concurrently. Default=1. + + -r|-repeat N: repeats the tests in a loop N times, each one + consisting of the -thread value's threads. + + -shuffle: randomizes the order of most of the test functions. + + -naps: sleep small random intervals between tests in order to add + some chaos for cross-thread contention. + + + -list-tests: outputs the list of tests being run, minus some + which are hard-coded. In multi-threaded mode, use this twice to + to emit the list run by each thread (which may differ from the initial + list, in particular if -shuffle is used). + + -fail: forces an exception to be thrown during the test run. Use + with -shuffle to make its appearance unpredictable. + + -v: emit some developer-mode info at the end. + */ + public static void main(String[] args) throws Exception { + int nThread = 1; + boolean doSomethingForDev = false; + int nRepeat = 1; + boolean forceFail = false; + boolean sqlLog = false; + boolean configLog = false; + boolean squelchTestOutput = false; + for( int i = 0; i < args.length; ){ + String arg = args[i++]; + if(arg.startsWith("-")){ + arg = arg.replaceFirst("-+",""); + if(arg.equals("v")){ + doSomethingForDev = true; + //listBoundMethods(); + }else if(arg.equals("t") || arg.equals("thread")){ + nThread = Integer.parseInt(args[i++]); + }else if(arg.equals("r") || arg.equals("repeat")){ + nRepeat = Integer.parseInt(args[i++]); + }else if(arg.equals("shuffle")){ + shuffle = true; + }else if(arg.equals("list-tests")){ + ++listRunTests; + }else if(arg.equals("fail")){ + forceFail = true; + }else if(arg.equals("sqllog")){ + sqlLog = true; + }else if(arg.equals("configlog")){ + configLog = true; + }else if(arg.equals("naps")){ + takeNaps = true; + }else if(arg.equals("q") || arg.equals("quiet")){ + squelchTestOutput = true; + }else{ + throw new IllegalArgumentException("Unhandled flag:"+arg); + } + } + } + + if( sqlLog ){ + if( sqlite3_compileoption_used("ENABLE_SQLLOG") ){ + final ConfigSqlLogCallback log = new ConfigSqlLogCallback() { + @Override public void call(sqlite3 db, String msg, int op){ + switch(op){ + case 0: outln("Opening db: ",db); break; + case 1: outln("SQL ",db,": ",msg); break; + case 2: outln("Closing db: ",db); break; + } + } + }; + int rc = sqlite3_config( log ); + affirm( 0==rc ); + rc = sqlite3_config( (ConfigSqlLogCallback)null ); + affirm( 0==rc ); + rc = sqlite3_config( log ); + affirm( 0==rc ); + }else{ + outln("WARNING: -sqllog is not active because library was built ", + "without SQLITE_ENABLE_SQLLOG."); + } + } + if( configLog ){ + final ConfigLogCallback log = new ConfigLogCallback() { + @Override public void call(int code, String msg){ + outln("ConfigLogCallback: ",ResultCode.getEntryForInt(code),": ", msg); + } + }; + int rc = sqlite3_config( log ); + affirm( 0==rc ); + rc = sqlite3_config( (ConfigLogCallback)null ); + affirm( 0==rc ); + rc = sqlite3_config( log ); + affirm( 0==rc ); + } + + quietMode = squelchTestOutput; + outln("If you just saw warning messages regarding CallStaticObjectMethod, ", + "you are very likely seeing the side effects of a known openjdk8 ", + "bug. It is unsightly but does not affect the library."); + + { + // Build list of tests to run from the methods named test*(). + testMethods = new ArrayList<>(); + int nSkipped = 0; + for(final java.lang.reflect.Method m : Tester1.class.getDeclaredMethods()){ + final String name = m.getName(); + if( name.equals("testFail") ){ + if( forceFail ){ + testMethods.add(m); + } + }else if( m.isAnnotationPresent( RequiresJniNio.class ) + && !sqlite3_jni_supports_nio() ){ + outln("Skipping test for lack of JNI java.nio.ByteBuffer support: ", + name,"()\n"); + ++nSkipped; + }else if( !m.isAnnotationPresent( ManualTest.class ) ){ + if( nThread>1 && m.isAnnotationPresent( SingleThreadOnly.class ) ){ + out("Skipping test in multi-thread mode: ",name,"()\n"); + ++nSkipped; + }else if( name.startsWith("test") ){ + testMethods.add(m); + } + } + } + } + + final long timeStart = System.currentTimeMillis(); + int nLoop = 0; + switch( sqlite3_threadsafe() ){ /* Sanity checking */ + case 0: + affirm( SQLITE_ERROR==sqlite3_config( SQLITE_CONFIG_SINGLETHREAD ), + "Could not switch to single-thread mode." ); + affirm( SQLITE_ERROR==sqlite3_config( SQLITE_CONFIG_MULTITHREAD ), + "Could switch to multithread mode." ); + affirm( SQLITE_ERROR==sqlite3_config( SQLITE_CONFIG_SERIALIZED ), + "Could not switch to serialized threading mode." ); + outln("This is a single-threaded build. Not using threads."); + nThread = 1; + break; + case 1: + case 2: + affirm( 0==sqlite3_config( SQLITE_CONFIG_SINGLETHREAD ), + "Could not switch to single-thread mode." ); + affirm( 0==sqlite3_config( SQLITE_CONFIG_MULTITHREAD ), + "Could not switch to multithread mode." ); + affirm( 0==sqlite3_config( SQLITE_CONFIG_SERIALIZED ), + "Could not switch to serialized threading mode." ); + break; + default: + affirm( false, "Unhandled SQLITE_THREADSAFE value." ); + } + outln("libversion_number: ", + sqlite3_libversion_number(),"\n", + sqlite3_libversion(),"\n",SQLITE_SOURCE_ID,"\n", + "SQLITE_THREADSAFE=",sqlite3_threadsafe()); + outln("JVM NIO support? ",sqlite3_jni_supports_nio() ? "YES" : "NO"); + final boolean showLoopCount = (nRepeat>1 && nThread>1); + if( showLoopCount ){ + outln("Running ",nRepeat," loop(s) with ",nThread," thread(s) each."); + } + if( takeNaps ) outln("Napping between tests is enabled."); + for( int n = 0; n < nRepeat; ++n ){ + ++nLoop; + if( showLoopCount ) out((1==nLoop ? "" : " ")+nLoop); + if( nThread<=1 ){ + new Tester1(0).runTests(false); + continue; + } + Tester1.mtMode = true; + final ExecutorService ex = Executors.newFixedThreadPool( nThread ); + for( int i = 0; i < nThread; ++i ){ + ex.submit( new Tester1(i), i ); + } + ex.shutdown(); + try{ + ex.awaitTermination(nThread*200, java.util.concurrent.TimeUnit.MILLISECONDS); + ex.shutdownNow(); + }catch (InterruptedException ie){ + ex.shutdownNow(); + Thread.currentThread().interrupt(); + } + if( !listErrors.isEmpty() ){ + quietMode = false; + outln("TEST ERRORS:"); + Exception err = null; + for( Exception e : listErrors ){ + e.printStackTrace(); + if( null==err ) err = e; + } + if( null!=err ) throw err; + } + } + if( showLoopCount ) outln(); + quietMode = false; + + final long timeEnd = System.currentTimeMillis(); + outln("Tests done. Metrics across ",nTestRuns," total iteration(s):"); + outln("\tAssertions checked: ",affirmCount); + outln("\tDatabases opened: ",metrics.dbOpen); + if( doSomethingForDev ){ + sqlite3_jni_internal_details(); + } + affirm( 0==sqlite3_release_memory(1) ); + sqlite3_shutdown(); + int nMethods = 0; + int nNatives = 0; + final java.lang.reflect.Method[] declaredMethods = + CApi.class.getDeclaredMethods(); + for(java.lang.reflect.Method m : declaredMethods){ + final int mod = m.getModifiers(); + if( 0!=(mod & java.lang.reflect.Modifier.STATIC) ){ + final String name = m.getName(); + if(name.startsWith("sqlite3_")){ + ++nMethods; + if( 0!=(mod & java.lang.reflect.Modifier.NATIVE) ){ + ++nNatives; + } + } + } + } + outln("\tCApi.sqlite3_*() methods: "+ + nMethods+" total, with "+ + nNatives+" native, "+ + (nMethods - nNatives)+" Java" + ); + outln("\tTotal test time = " + +(timeEnd - timeStart)+"ms"); + } +} diff --git a/ext/jni/src/org/sqlite/jni/capi/TraceV2Callback.java b/ext/jni/src/org/sqlite/jni/capi/TraceV2Callback.java new file mode 100644 index 0000000000..56465a2c0a --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/capi/TraceV2Callback.java @@ -0,0 +1,50 @@ +/* +** 2023-08-25 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file is part of the JNI bindings for the sqlite3 C API. +*/ +package org.sqlite.jni.capi; +import org.sqlite.jni.annotation.Nullable; + +/** + Callback for use with {@link CApi#sqlite3_trace_v2}. +*/ +public interface TraceV2Callback extends CallbackProxy { + /** + Called by sqlite3 for various tracing operations, as per + sqlite3_trace_v2(). Note that this interface elides the 2nd + argument to the native trace callback, as that role is better + filled by instance-local state. + +

    These callbacks may throw, in which case their exceptions are + converted to C-level error information. + +

    The 2nd argument to this function, if non-null, will be a an + sqlite3 or sqlite3_stmt object, depending on the first argument + (see below). + +

    The final argument to this function is the "X" argument + documented for sqlite3_trace() and sqlite3_trace_v2(). Its type + depends on value of the first argument: + +

    - SQLITE_TRACE_STMT: pNative is a sqlite3_stmt. pX is a String + containing the prepared SQL. + +

    - SQLITE_TRACE_PROFILE: pNative is a sqlite3_stmt. pX is a Long + holding an approximate number of nanoseconds the statement took + to run. + +

    - SQLITE_TRACE_ROW: pNative is a sqlite3_stmt. pX is null. + +

    - SQLITE_TRACE_CLOSE: pNative is a sqlite3. pX is null. + */ + int call(int traceFlag, Object pNative, @Nullable Object pX); +} diff --git a/ext/jni/src/org/sqlite/jni/capi/UpdateHookCallback.java b/ext/jni/src/org/sqlite/jni/capi/UpdateHookCallback.java new file mode 100644 index 0000000000..e3d491f67e --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/capi/UpdateHookCallback.java @@ -0,0 +1,26 @@ +/* +** 2023-08-25 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file is part of the JNI bindings for the sqlite3 C API. +*/ +package org.sqlite.jni.capi; + +/** + Callback for use with {@link CApi#sqlite3_update_hook}. +*/ +public interface UpdateHookCallback extends CallbackProxy { + /** + Must function as described for the C-level sqlite3_update_hook() + callback. If it throws, the exception is translated into + a db-level error. + */ + void call(int opId, String dbName, String tableName, long rowId); +} diff --git a/ext/jni/src/org/sqlite/jni/capi/ValueHolder.java b/ext/jni/src/org/sqlite/jni/capi/ValueHolder.java new file mode 100644 index 0000000000..0a469fea9a --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/capi/ValueHolder.java @@ -0,0 +1,27 @@ +/* +** 2023-10-16 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains the ValueHolder utility class for the sqlite3 +** JNI bindings. +*/ +package org.sqlite.jni.capi; + +/** + A helper class which simply holds a single value. Its primary use + is for communicating values out of anonymous classes, as doing so + requires a "final" reference, as well as communicating aggregate + SQL function state across calls to such functions. +*/ +public class ValueHolder { + public T value; + public ValueHolder(){} + public ValueHolder(T v){value = v;} +} diff --git a/ext/jni/src/org/sqlite/jni/capi/WindowFunction.java b/ext/jni/src/org/sqlite/jni/capi/WindowFunction.java new file mode 100644 index 0000000000..eaf1bb9a35 --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/capi/WindowFunction.java @@ -0,0 +1,39 @@ +/* +** 2023-08-25 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file is part of the JNI bindings for the sqlite3 C API. +*/ +package org.sqlite.jni.capi; + + +/** + A SQLFunction implementation for window functions. Note that + WindowFunction inherits from {@link AggregateFunction} and each + instance is required to implement the inherited abstract methods + from that class. See {@link AggregateFunction} for information on + managing the UDF's invocation-specific state. +*/ +public abstract class WindowFunction extends AggregateFunction { + + /** + As for the xInverse() argument of the C API's + sqlite3_create_window_function(). If this function throws, the + exception is not propagated and a warning might be emitted + to a debugging channel. + */ + public abstract void xInverse(sqlite3_context cx, sqlite3_value[] args); + + /** + As for the xValue() argument of the C API's sqlite3_create_window_function(). + See xInverse() for the fate of any exceptions this throws. + */ + public abstract void xValue(sqlite3_context cx); +} diff --git a/ext/jni/src/org/sqlite/jni/capi/XDestroyCallback.java b/ext/jni/src/org/sqlite/jni/capi/XDestroyCallback.java new file mode 100644 index 0000000000..ce6c6a6abf --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/capi/XDestroyCallback.java @@ -0,0 +1,37 @@ +/* +** 2023-07-21 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file declares JNI bindings for the sqlite3 C API. +*/ +package org.sqlite.jni.capi; + +/** + Callback for a hook called by SQLite when certain client-provided + state are destroyed. It gets its name from the pervasive use of + the symbol name xDestroy() for this purpose in the C API + documentation. +*/ +public interface XDestroyCallback { + /** + Must perform any cleanup required by this object. Must not + throw. Must not call back into the sqlite3 API, else it might + invoke a deadlock. + + WARNING: as a rule, it is never safe to register individual + instances with this interface multiple times in the + library. e.g., do not register the same CollationCallback with + multiple arities or names using sqlite3_create_collation(). If + this rule is violated, the library will eventually try to free + each individual reference, leading to memory corruption or a + crash via duplicate free(). + */ + void xDestroy(); +} diff --git a/ext/jni/src/org/sqlite/jni/capi/package-info.java b/ext/jni/src/org/sqlite/jni/capi/package-info.java new file mode 100644 index 0000000000..127f380675 --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/capi/package-info.java @@ -0,0 +1,89 @@ +/** + This package houses a JNI binding to the SQLite3 C API. + +

    The primary interfaces are in {@link + org.sqlite.jni.capi.CApi}.

    + +

    API Goals and Requirements

    + +
      + +
    • A 1-to-1(-ish) mapping of the C API to Java via JNI, insofar + as cross-language semantics allow for. A closely-related goal is + that the C + documentation should be usable as-is, insofar as possible, + for most of the JNI binding. As a rule, undocumented symbols in + the Java interface behave as documented for their C API + counterpart. Only semantic differences and Java-specific features + are documented here.
    • + +
    • Support Java as far back as version 8 (2014).
    • + +
    • Environment-independent. Should work everywhere both Java and + SQLite3 do.
    • + +
    • No 3rd-party dependencies beyond the JDK. That includes no + build-level dependencies for specific IDEs and toolchains. We + welcome the addition of build files for arbitrary environments + insofar as they neither interfere with each other nor become a + maintenance burden for the sqlite developers.
    • + +
    + +

    Non-Goals

    + +
      + +
    • Creation of high-level OO wrapper APIs. Clients are free to + create them off of the C-style API.
    • + +
    • Support for mixed-mode operation, where client code accesses + SQLite both via the Java-side API and the C API via their own + native code. In such cases, proxy functionalities (primarily + callback handler wrappers of all sorts) may fail because the + C-side use of the SQLite APIs will bypass those proxies.
    • + +
    + +

    State of this API

    + +

    As of version 3.43, this software is in "tech preview" form. We + tentatively plan to stamp it as stable with the 3.44 release.

    + +

    Threading Considerations

    + +

    This API is, if built with SQLITE_THREADSAFE set to 1 or 2, + thread-safe, insofar as the C API guarantees, with some addenda:

    + +
      + +
    • It is not legal to use Java-facing SQLite3 resource handles + (sqlite3, sqlite3_stmt, etc) from multiple threads concurrently, + nor to use any database-specific resources concurrently in a + thread separate from the one the database is currently in use + in. i.e. do not use a sqlite3_stmt in thread #2 when thread #1 is + using the database which prepared that handle. + +
      Violating this will eventually corrupt the JNI-level bindings + between Java's and C's view of the database. This is a limitation + of the JNI bindings, not the lower-level library. +
    • + +
    • It is legal to use a given handle, and database-specific + resources, across threads, so long as no two threads pass + resources owned by the same database into the library + concurrently. +
    • + +
    + +

    Any number of threads may, of course, create and use any number + of database handles they wish. Care only needs to be taken when + those handles or their associated resources cross threads, or...

    + +

    When built with SQLITE_THREADSAFE=0 then no threading guarantees + are provided and multi-threaded use of the library will provoke + undefined behavior.

    + +*/ +package org.sqlite.jni.capi; diff --git a/ext/jni/src/org/sqlite/jni/capi/sqlite3.java b/ext/jni/src/org/sqlite/jni/capi/sqlite3.java new file mode 100644 index 0000000000..cc6f2e6e8d --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/capi/sqlite3.java @@ -0,0 +1,43 @@ +/* +** 2023-07-21 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file is part of the JNI bindings for the sqlite3 C API. +*/ +package org.sqlite.jni.capi; + +/** + A wrapper for communicating C-level (sqlite3*) instances with + Java. These wrappers do not own their associated pointer, they + simply provide a type-safe way to communicate it between Java + and C via JNI. +*/ +public final class sqlite3 extends NativePointerHolder + implements AutoCloseable { + + // Only invoked from JNI + private sqlite3(){} + + public String toString(){ + final long ptr = getNativePointer(); + if( 0==ptr ){ + return sqlite3.class.getSimpleName()+"@null"; + } + final String fn = CApi.sqlite3_db_filename(this, "main"); + return sqlite3.class.getSimpleName() + +"@"+String.format("0x%08x",ptr) + +"["+((null == fn) ? "" : fn)+"]" + ; + } + + @Override public void close(){ + CApi.sqlite3_close_v2(this); + } +} diff --git a/ext/jni/src/org/sqlite/jni/capi/sqlite3_backup.java b/ext/jni/src/org/sqlite/jni/capi/sqlite3_backup.java new file mode 100644 index 0000000000..0ef75c17eb --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/capi/sqlite3_backup.java @@ -0,0 +1,31 @@ +/* +** 2023-09-03 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file is part of the JNI bindings for the sqlite3 C API. +*/ +package org.sqlite.jni.capi; + +/** + A wrapper for passing C-level (sqlite3_backup*) instances around in + Java. These wrappers do not own their associated pointer, they + simply provide a type-safe way to communicate it between Java and C + via JNI. +*/ +public final class sqlite3_backup extends NativePointerHolder + implements AutoCloseable { + // Only invoked from JNI. + private sqlite3_backup(){} + + @Override public void close(){ + CApi.sqlite3_backup_finish(this); + } + +} diff --git a/ext/jni/src/org/sqlite/jni/capi/sqlite3_blob.java b/ext/jni/src/org/sqlite/jni/capi/sqlite3_blob.java new file mode 100644 index 0000000000..bdc0200af4 --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/capi/sqlite3_blob.java @@ -0,0 +1,30 @@ +/* +** 2023-09-03 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file is part of the JNI bindings for the sqlite3 C API. +*/ +package org.sqlite.jni.capi; + +/** + A wrapper for passing C-level (sqlite3_blob*) instances around in + Java. These wrappers do not own their associated pointer, they + simply provide a type-safe way to communicate it between Java and C + via JNI. +*/ +public final class sqlite3_blob extends NativePointerHolder + implements AutoCloseable { + // Only invoked from JNI. + private sqlite3_blob(){} + + @Override public void close(){ + CApi.sqlite3_blob_close(this); + } +} diff --git a/ext/jni/src/org/sqlite/jni/capi/sqlite3_context.java b/ext/jni/src/org/sqlite/jni/capi/sqlite3_context.java new file mode 100644 index 0000000000..82ec49af16 --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/capi/sqlite3_context.java @@ -0,0 +1,79 @@ +/* +** 2023-07-21 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file is part of the JNI bindings for the sqlite3 C API. +*/ +package org.sqlite.jni.capi; + +/** + sqlite3_context instances are used in conjunction with user-defined + SQL functions (a.k.a. UDFs). +*/ +public final class sqlite3_context extends NativePointerHolder { + private Long aggregateContext = null; + + /** + getAggregateContext() corresponds to C's + sqlite3_aggregate_context(), with a slightly different interface + to account for cross-language differences. It serves the same + purposes in a slightly different way: it provides a key which is + stable across invocations of a UDF's callbacks, such that all + calls into those callbacks can determine which "set" of those + calls they belong to. + +

    Note that use of this method is not a requirement for proper use + of this class. sqlite3_aggregate_context() can also be used. + +

    If the argument is true and the aggregate context has not yet + been set up, it will be initialized and fetched on demand, else it + won't. The intent is that xStep(), xValue(), and xInverse() + methods pass true and xFinal() methods pass false. + +

    This function treats numeric 0 as null, always returning null instead + of 0. + +

    If this object is being used in the context of an aggregate or + window UDF, this function returns a non-0 value which is distinct + for each set of UDF callbacks from a single invocation of the + UDF, otherwise it returns 0. The returned value is only only + valid within the context of execution of a single SQL statement, + and must not be re-used by future invocations of the UDF in + different SQL statements. + +

    Consider this SQL, where MYFUNC is a user-defined aggregate function: + +

    {@code
    +     SELECT MYFUNC(A), MYFUNC(B) FROM T;
    +     }
    + +

    The xStep() and xFinal() methods of the callback need to be able + to differentiate between those two invocations in order to + perform their work properly. The value returned by + getAggregateContext() will be distinct for each of those + invocations of MYFUNC() and is intended to be used as a lookup + key for mapping callback invocations to whatever client-defined + state is needed by the UDF. + +

    There is one case where this will return null in the context + of an aggregate or window function: if the result set has no + rows, the UDF's xFinal() will be called without any other x...() + members having been called. In that one case, no aggregate + context key will have been generated. xFinal() implementations + need to be prepared to accept that condition as legal. + */ + public synchronized Long getAggregateContext(boolean initIfNeeded){ + if( aggregateContext==null ){ + aggregateContext = CApi.sqlite3_aggregate_context(this, initIfNeeded); + if( !initIfNeeded && null==aggregateContext ) aggregateContext = 0L; + } + return (null==aggregateContext || 0!=aggregateContext) ? aggregateContext : null; + } +} diff --git a/ext/jni/src/org/sqlite/jni/capi/sqlite3_stmt.java b/ext/jni/src/org/sqlite/jni/capi/sqlite3_stmt.java new file mode 100644 index 0000000000..564891c727 --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/capi/sqlite3_stmt.java @@ -0,0 +1,30 @@ +/* +** 2023-07-21 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file is part of the JNI bindings for the sqlite3 C API. +*/ +package org.sqlite.jni.capi; + +/** + A wrapper for communicating C-level (sqlite3_stmt*) instances with + Java. These wrappers do not own their associated pointer, they + simply provide a type-safe way to communicate it between Java and C + via JNI. +*/ +public final class sqlite3_stmt extends NativePointerHolder + implements AutoCloseable { + // Only invoked from JNI. + private sqlite3_stmt(){} + + @Override public void close(){ + CApi.sqlite3_finalize(this); + } +} diff --git a/ext/jni/src/org/sqlite/jni/capi/sqlite3_value.java b/ext/jni/src/org/sqlite/jni/capi/sqlite3_value.java new file mode 100644 index 0000000000..a4772f0f63 --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/capi/sqlite3_value.java @@ -0,0 +1,19 @@ +/* +** 2023-07-21 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file is part of the JNI bindings for the sqlite3 C API. +*/ +package org.sqlite.jni.capi; + +public final class sqlite3_value extends NativePointerHolder { + //! Invoked only from JNI. + private sqlite3_value(){} +} diff --git a/ext/jni/src/org/sqlite/jni/fts5/Fts5.java b/ext/jni/src/org/sqlite/jni/fts5/Fts5.java new file mode 100644 index 0000000000..0dceeafd2e --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/fts5/Fts5.java @@ -0,0 +1,32 @@ +/* +** 2023-08-05 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file is part of the JNI bindings for the sqlite3 C API. +*/ +package org.sqlite.jni.fts5; + +/** + INCOMPLETE AND COMPLETELY UNTESTED. + + A utility object for holding FTS5-specific types and constants + which are used by multiple FTS5 classes. +*/ +public final class Fts5 { + /* Not used */ + private Fts5(){} + + + public static final int FTS5_TOKENIZE_QUERY = 0x0001; + public static final int FTS5_TOKENIZE_PREFIX = 0x0002; + public static final int FTS5_TOKENIZE_DOCUMENT = 0x0004; + public static final int FTS5_TOKENIZE_AUX = 0x0008; + public static final int FTS5_TOKEN_COLOCATED = 0x0001; +} diff --git a/ext/jni/src/org/sqlite/jni/fts5/Fts5Context.java b/ext/jni/src/org/sqlite/jni/fts5/Fts5Context.java new file mode 100644 index 0000000000..439b477910 --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/fts5/Fts5Context.java @@ -0,0 +1,24 @@ +/* +** 2023-08-04 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file is part of the JNI bindings for the sqlite3 C API. +*/ +package org.sqlite.jni.fts5; +import org.sqlite.jni.capi.*; + +/** + A wrapper for communicating C-level (Fts5Context*) instances with + Java. These wrappers do not own their associated pointer, they + simply provide a type-safe way to communicate it between Java and C + via JNI. +*/ +public final class Fts5Context extends NativePointerHolder { +} diff --git a/ext/jni/src/org/sqlite/jni/fts5/Fts5ExtensionApi.java b/ext/jni/src/org/sqlite/jni/fts5/Fts5ExtensionApi.java new file mode 100644 index 0000000000..f409f4961d --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/fts5/Fts5ExtensionApi.java @@ -0,0 +1,96 @@ +/* +** 2023-08-04 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file is part of the JNI bindings for the sqlite3 C API. +*/ +package org.sqlite.jni.fts5; +import org.sqlite.jni.capi.*; +import org.sqlite.jni.annotation.*; + +/** +*/ +public final class Fts5ExtensionApi extends NativePointerHolder { + //! Only called from JNI + private Fts5ExtensionApi(){} + private final int iVersion = 2; + + /* Callback type for used by xQueryPhrase(). */ + public interface XQueryPhraseCallback { + int call(Fts5ExtensionApi fapi, Fts5Context cx); + } + + /** + Returns the singleton instance of this class. + */ + public static native Fts5ExtensionApi getInstance(); + + public native int xColumnCount(@NotNull Fts5Context fcx); + + public native int xColumnSize(@NotNull Fts5Context cx, int iCol, + @NotNull OutputPointer.Int32 pnToken); + + public native int xColumnText(@NotNull Fts5Context cx, int iCol, + @NotNull OutputPointer.String txt); + + public native int xColumnTotalSize(@NotNull Fts5Context fcx, int iCol, + @NotNull OutputPointer.Int64 pnToken); + + public native Object xGetAuxdata(@NotNull Fts5Context cx, boolean clearIt); + + public native int xInst(@NotNull Fts5Context cx, int iIdx, + @NotNull OutputPointer.Int32 piPhrase, + @NotNull OutputPointer.Int32 piCol, + @NotNull OutputPointer.Int32 piOff); + + public native int xInstCount(@NotNull Fts5Context fcx, + @NotNull OutputPointer.Int32 pnInst); + + public native int xPhraseCount(@NotNull Fts5Context fcx); + + public native int xPhraseFirst(@NotNull Fts5Context cx, int iPhrase, + @NotNull Fts5PhraseIter iter, + @NotNull OutputPointer.Int32 iCol, + @NotNull OutputPointer.Int32 iOff); + + public native int xPhraseFirstColumn(@NotNull Fts5Context cx, int iPhrase, + @NotNull Fts5PhraseIter iter, + @NotNull OutputPointer.Int32 iCol); + public native void xPhraseNext(@NotNull Fts5Context cx, + @NotNull Fts5PhraseIter iter, + @NotNull OutputPointer.Int32 iCol, + @NotNull OutputPointer.Int32 iOff); + public native void xPhraseNextColumn(@NotNull Fts5Context cx, + @NotNull Fts5PhraseIter iter, + @NotNull OutputPointer.Int32 iCol); + public native int xPhraseSize(@NotNull Fts5Context fcx, int iPhrase); + + public native int xQueryPhrase(@NotNull Fts5Context cx, int iPhrase, + @NotNull XQueryPhraseCallback callback); + public native int xRowCount(@NotNull Fts5Context fcx, + @NotNull OutputPointer.Int64 nRow); + + public native long xRowid(@NotNull Fts5Context cx); + /* Note that the JNI binding lacks the C version's xDelete() + callback argument. Instead, if pAux has an xDestroy() method, it + is called if the FTS5 API finalizes the aux state (including if + allocation of storage for the auxdata fails). Any reference to + pAux held by the JNI layer will be relinquished regardless of + whether pAux has an xDestroy() method. */ + + public native int xSetAuxdata(@NotNull Fts5Context cx, @Nullable Object pAux); + + public native int xTokenize(@NotNull Fts5Context cx, @NotNull byte[] pText, + @NotNull XTokenizeCallback callback); + + public native Object xUserData(Fts5Context cx); + //^^^ returns the pointer passed as the 3rd arg to the C-level + // fts5_api::xCreateFunction(). +} diff --git a/ext/jni/src/org/sqlite/jni/fts5/Fts5PhraseIter.java b/ext/jni/src/org/sqlite/jni/fts5/Fts5PhraseIter.java new file mode 100644 index 0000000000..5774eb5936 --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/fts5/Fts5PhraseIter.java @@ -0,0 +1,25 @@ +/* +** 2023-08-04 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file is part of the JNI bindings for the sqlite3 C API. +*/ +package org.sqlite.jni.fts5; +import org.sqlite.jni.capi.NativePointerHolder; + +/** + A wrapper for C-level Fts5PhraseIter. They are only modified and + inspected by native-level code. +*/ +public final class Fts5PhraseIter extends NativePointerHolder { + //! Updated and used only by native code. + private long a; + private long b; +} diff --git a/ext/jni/src/org/sqlite/jni/fts5/Fts5Tokenizer.java b/ext/jni/src/org/sqlite/jni/fts5/Fts5Tokenizer.java new file mode 100644 index 0000000000..b72e5d0fc0 --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/fts5/Fts5Tokenizer.java @@ -0,0 +1,31 @@ +/* +** 2023-08-05x +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file is part of the JNI bindings for the sqlite3 C API. +*/ +package org.sqlite.jni.fts5; +import org.sqlite.jni.capi.NativePointerHolder; + +/** + INCOMPLETE AND COMPLETELY UNTESTED. + + A wrapper for communicating C-level (Fts5Tokenizer*) instances with + Java. These wrappers do not own their associated pointer, they + simply provide a type-safe way to communicate it between Java and C + via JNI. + + At the C level, the Fts5Tokenizer type is essentially a void + pointer used specifically for tokenizers. +*/ +public final class Fts5Tokenizer extends NativePointerHolder { + //! Only called from JNI. + private Fts5Tokenizer(){} +} diff --git a/ext/jni/src/org/sqlite/jni/fts5/TesterFts5.java b/ext/jni/src/org/sqlite/jni/fts5/TesterFts5.java new file mode 100644 index 0000000000..4d97ced47d --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/fts5/TesterFts5.java @@ -0,0 +1,841 @@ +/* +** 2023-08-04 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains a set of tests for the sqlite3 JNI bindings. +*/ +package org.sqlite.jni.fts5; +import java.util.*; +import static org.sqlite.jni.capi.CApi.*; +import static org.sqlite.jni.capi.Tester1.*; +import org.sqlite.jni.capi.*; +import java.nio.charset.StandardCharsets; + +public class TesterFts5 { + + private static void test1(){ + final Fts5ExtensionApi fea = Fts5ExtensionApi.getInstance(); + affirm( null != fea ); + affirm( fea.getNativePointer() != 0 ); + affirm( fea == Fts5ExtensionApi.getInstance() )/*singleton*/; + + sqlite3 db = createNewDb(); + fts5_api fApi = fts5_api.getInstanceForDb(db); + affirm( fApi != null ); + affirm( fApi == fts5_api.getInstanceForDb(db) /* singleton per db */ ); + + execSql(db, new String[] { + "CREATE VIRTUAL TABLE ft USING fts5(a, b);", + "INSERT INTO ft(rowid, a, b) VALUES(1, 'X Y', 'Y Z');", + "INSERT INTO ft(rowid, a, b) VALUES(2, 'A Z', 'Y Y');" + }); + + final String pUserData = "This is pUserData"; + final int outputs[] = {0, 0}; + final fts5_extension_function func = new fts5_extension_function(){ + @Override public void call(Fts5ExtensionApi ext, Fts5Context fCx, + sqlite3_context pCx, sqlite3_value argv[]){ + final int nCols = ext.xColumnCount(fCx); + affirm( 2 == nCols ); + affirm( nCols == argv.length ); + affirm( ext.xUserData(fCx) == pUserData ); + final OutputPointer.String op = new OutputPointer.String(); + final OutputPointer.Int32 colsz = new OutputPointer.Int32(); + final OutputPointer.Int64 colTotalSz = new OutputPointer.Int64(); + for(int i = 0; i < nCols; ++i ){ + int rc = ext.xColumnText(fCx, i, op); + affirm( 0 == rc ); + final String val = op.value; + affirm( val.equals(sqlite3_value_text16(argv[i])) ); + rc = ext.xColumnSize(fCx, i, colsz); + affirm( 0==rc ); + affirm( 3==sqlite3_value_bytes(argv[i]) ); + rc = ext.xColumnTotalSize(fCx, i, colTotalSz); + affirm( 0==rc ); + } + ++outputs[0]; + } + public void xDestroy(){ + outputs[1] = 1; + } + }; + + int rc = fApi.xCreateFunction("myaux", pUserData, func); + affirm( 0==rc ); + + affirm( 0==outputs[0] ); + execSql(db, "select myaux(ft,a,b) from ft;"); + affirm( 2==outputs[0] ); + affirm( 0==outputs[1] ); + sqlite3_close_v2(db); + affirm( 1==outputs[1] ); + } + + /* + ** Argument sql is a string containing one or more SQL statements + ** separated by ";" characters. This function executes each of these + ** statements against the database passed as the first argument. If + ** no error occurs, the results of the SQL script are returned as + ** an array of strings. If an error does occur, a RuntimeException is + ** thrown. + */ + private static String[] sqlite3_exec(sqlite3 db, String sql) { + List aOut = new ArrayList<>(); + + /* Iterate through the list of SQL statements. For each, step through + ** it and add any results to the aOut[] array. */ + int rc = sqlite3_prepare_multi(db, sql, new PrepareMultiCallback() { + @Override public int call(sqlite3_stmt pStmt){ + while( SQLITE_ROW==sqlite3_step(pStmt) ){ + int ii; + for(ii=0; ii, ); + */ + class fts5_aux implements fts5_extension_function { + @Override public void call( + Fts5ExtensionApi ext, + Fts5Context fCx, + sqlite3_context pCx, + sqlite3_value argv[] + ){ + if( argv.length>1 ){ + throw new RuntimeException("fts5_aux: wrong number of args"); + } + + boolean bClear = (argv.length==1); + Object obj = ext.xGetAuxdata(fCx, bClear); + if( obj instanceof String ){ + sqlite3_result_text16(pCx, (String)obj); + } + + if( argv.length==1 ){ + String val = sqlite3_value_text16(argv[0]); + if( !val.isEmpty() ){ + ext.xSetAuxdata(fCx, val); + } + } + } + public void xDestroy(){ } + } + + /* + ** fts5_inst(); + ** + ** This is used to test the xInstCount() and xInst() APIs. It returns a + ** text value containing a Tcl list with xInstCount() elements. Each + ** element is itself a list of 3 integers - the phrase number, column + ** number and token offset returned by each call to xInst(). + */ + fts5_extension_function fts5_inst = new fts5_extension_function(){ + @Override public void call( + Fts5ExtensionApi ext, + Fts5Context fCx, + sqlite3_context pCx, + sqlite3_value argv[] + ){ + if( argv.length!=0 ){ + throw new RuntimeException("fts5_inst: wrong number of args"); + } + + OutputPointer.Int32 pnInst = new OutputPointer.Int32(); + OutputPointer.Int32 piPhrase = new OutputPointer.Int32(); + OutputPointer.Int32 piCol = new OutputPointer.Int32(); + OutputPointer.Int32 piOff = new OutputPointer.Int32(); + String ret = ""; + + int rc = ext.xInstCount(fCx, pnInst); + int nInst = pnInst.get(); + int ii; + + for(ii=0; rc==SQLITE_OK && ii0 ) ret += " "; + ret += "{"+piPhrase.get()+" "+piCol.get()+" "+piOff.get()+"}"; + } + + sqlite3_result_text(pCx, ret); + } + public void xDestroy(){ } + }; + + /* + ** fts5_pinst(); + ** + ** Like SQL function fts5_inst(), except using the following + ** + ** xPhraseCount + ** xPhraseFirst + ** xPhraseNext + */ + fts5_extension_function fts5_pinst = new fts5_extension_function(){ + @Override public void call( + Fts5ExtensionApi ext, + Fts5Context fCx, + sqlite3_context pCx, + sqlite3_value argv[] + ){ + if( argv.length!=0 ){ + throw new RuntimeException("fts5_pinst: wrong number of args"); + } + + OutputPointer.Int32 piCol = new OutputPointer.Int32(); + OutputPointer.Int32 piOff = new OutputPointer.Int32(); + String ret = ""; + int rc = SQLITE_OK; + + int nPhrase = ext.xPhraseCount(fCx); + int ii; + + for(ii=0; rc==SQLITE_OK && ii=0; + ext.xPhraseNext(fCx, pIter, piCol, piOff) + ){ + if( !ret.isEmpty() ) ret += " "; + ret += "{"+ii+" "+piCol.get()+" "+piOff.get()+"}"; + } + } + + if( rc!=SQLITE_OK ){ + throw new RuntimeException("fts5_pinst: rc=" + rc); + }else{ + sqlite3_result_text(pCx, ret); + } + } + public void xDestroy(){ } + }; + + /* + ** fts5_pcolinst(); + ** + ** Like SQL function fts5_pinst(), except using the following + ** + ** xPhraseFirstColumn + ** xPhraseNextColumn + */ + fts5_extension_function fts5_pcolinst = new fts5_extension_function(){ + @Override public void call( + Fts5ExtensionApi ext, + Fts5Context fCx, + sqlite3_context pCx, + sqlite3_value argv[] + ){ + if( argv.length!=0 ){ + throw new RuntimeException("fts5_pcolinst: wrong number of args"); + } + + OutputPointer.Int32 piCol = new OutputPointer.Int32(); + String ret = ""; + int rc = SQLITE_OK; + + int nPhrase = ext.xPhraseCount(fCx); + int ii; + + for(ii=0; rc==SQLITE_OK && ii=0; + ext.xPhraseNextColumn(fCx, pIter, piCol) + ){ + if( !ret.isEmpty() ) ret += " "; + ret += "{"+ii+" "+piCol.get()+"}"; + } + } + + if( rc!=SQLITE_OK ){ + throw new RuntimeException("fts5_pcolinst: rc=" + rc); + }else{ + sqlite3_result_text(pCx, ret); + } + } + public void xDestroy(){ } + }; + + /* + ** fts5_rowcount(); + */ + fts5_extension_function fts5_rowcount = new fts5_extension_function(){ + @Override public void call( + Fts5ExtensionApi ext, + Fts5Context fCx, + sqlite3_context pCx, + sqlite3_value argv[] + ){ + if( argv.length!=0 ){ + throw new RuntimeException("fts5_rowcount: wrong number of args"); + } + OutputPointer.Int64 pnRow = new OutputPointer.Int64(); + + int rc = ext.xRowCount(fCx, pnRow); + if( rc==SQLITE_OK ){ + sqlite3_result_int64(pCx, pnRow.get()); + }else{ + throw new RuntimeException("fts5_rowcount: rc=" + rc); + } + } + public void xDestroy(){ } + }; + + /* + ** fts5_phrasesize(); + */ + fts5_extension_function fts5_phrasesize = new fts5_extension_function(){ + @Override public void call( + Fts5ExtensionApi ext, + Fts5Context fCx, + sqlite3_context pCx, + sqlite3_value argv[] + ){ + if( argv.length!=1 ){ + throw new RuntimeException("fts5_phrasesize: wrong number of args"); + } + int iPhrase = sqlite3_value_int(argv[0]); + + int sz = ext.xPhraseSize(fCx, iPhrase); + sqlite3_result_int(pCx, sz); + } + public void xDestroy(){ } + }; + + /* + ** fts5_phrasehits(, ); + ** + ** Use the xQueryPhrase() API to determine how many hits, in total, + ** there are for phrase in the database. + */ + fts5_extension_function fts5_phrasehits = new fts5_extension_function(){ + @Override public void call( + Fts5ExtensionApi ext, + Fts5Context fCx, + sqlite3_context pCx, + sqlite3_value argv[] + ){ + if( argv.length!=1 ){ + throw new RuntimeException("fts5_phrasesize: wrong number of args"); + } + int iPhrase = sqlite3_value_int(argv[0]); + int rc = SQLITE_OK; + + class MyCallback implements Fts5ExtensionApi.XQueryPhraseCallback { + public int nRet = 0; + public int getRet() { return nRet; } + + @Override + public int call(Fts5ExtensionApi fapi, Fts5Context cx){ + OutputPointer.Int32 pnInst = new OutputPointer.Int32(); + int rc = fapi.xInstCount(cx, pnInst); + nRet += pnInst.get(); + return rc; + } + }; + + MyCallback xCall = new MyCallback(); + rc = ext.xQueryPhrase(fCx, iPhrase, xCall); + if( rc!=SQLITE_OK ){ + throw new RuntimeException("fts5_phrasehits: rc=" + rc); + } + sqlite3_result_int(pCx, xCall.getRet()); + } + public void xDestroy(){ } + }; + + /* + ** fts5_tokenize(, ) + */ + fts5_extension_function fts5_tokenize = new fts5_extension_function(){ + @Override public void call( + Fts5ExtensionApi ext, + Fts5Context fCx, + sqlite3_context pCx, + sqlite3_value argv[] + ){ + if( argv.length!=1 ){ + throw new RuntimeException("fts5_tokenize: wrong number of args"); + } + byte[] utf8 = sqlite3_value_text(argv[0]); + int rc = SQLITE_OK; + + class MyCallback implements XTokenizeCallback { + private List myList = new ArrayList<>(); + + public String getval() { + return String.join("+", myList); + } + + @Override + public int call(int tFlags, byte[] txt, int iStart, int iEnd){ + try { + String str = new String(txt, StandardCharsets.UTF_8); + myList.add(str); + } catch (Exception e) { + } + return SQLITE_OK; + } + }; + + MyCallback xCall = new MyCallback(); + ext.xTokenize(fCx, utf8, xCall); + sqlite3_result_text16(pCx, xCall.getval()); + + if( rc!=SQLITE_OK ){ + throw new RuntimeException("fts5_tokenize: rc=" + rc); + } + } + public void xDestroy(){ } + }; + + fts5_api api = fts5_api.getInstanceForDb(db); + api.xCreateFunction("fts5_rowid", fts5_rowid); + api.xCreateFunction("fts5_columncount", fts5_columncount); + api.xCreateFunction("fts5_columnsize", fts5_columnsize); + api.xCreateFunction("fts5_columntext", fts5_columntext); + api.xCreateFunction("fts5_columntotalsize", fts5_columntsize); + + api.xCreateFunction("fts5_aux1", new fts5_aux()); + api.xCreateFunction("fts5_aux2", new fts5_aux()); + + api.xCreateFunction("fts5_inst", fts5_inst); + api.xCreateFunction("fts5_pinst", fts5_pinst); + api.xCreateFunction("fts5_pcolinst", fts5_pcolinst); + api.xCreateFunction("fts5_rowcount", fts5_rowcount); + api.xCreateFunction("fts5_phrasesize", fts5_phrasesize); + api.xCreateFunction("fts5_phrasehits", fts5_phrasehits); + api.xCreateFunction("fts5_tokenize", fts5_tokenize); + } + /* + ** Test of various Fts5ExtensionApi methods + */ + private static void test2(){ + + /* Open db and populate an fts5 table */ + sqlite3 db = createNewDb(); + do_execsql_test(db, + "CREATE VIRTUAL TABLE ft USING fts5(a, b);" + + "INSERT INTO ft(rowid, a, b) VALUES(-9223372036854775808, 'x', 'x');" + + "INSERT INTO ft(rowid, a, b) VALUES(0, 'x', 'x');" + + "INSERT INTO ft(rowid, a, b) VALUES(1, 'x y z', 'x y z');" + + "INSERT INTO ft(rowid, a, b) VALUES(2, 'x y z', 'x z');" + + "INSERT INTO ft(rowid, a, b) VALUES(3, 'x y z', 'x y z');" + + "INSERT INTO ft(rowid, a, b) VALUES(9223372036854775807, 'x', 'x');" + ); + + create_test_functions(db); + + /* Test that fts5_rowid() seems to work */ + do_execsql_test(db, + "SELECT rowid==fts5_rowid(ft) FROM ft('x')", + "[1, 1, 1, 1, 1, 1]" + ); + + /* Test fts5_columncount() */ + do_execsql_test(db, + "SELECT fts5_columncount(ft) FROM ft('x')", + "[2, 2, 2, 2, 2, 2]" + ); + + /* Test fts5_columnsize() */ + do_execsql_test(db, + "SELECT fts5_columnsize(ft, 0) FROM ft('x') ORDER BY rowid", + "[1, 1, 3, 3, 3, 1]" + ); + do_execsql_test(db, + "SELECT fts5_columnsize(ft, 1) FROM ft('x') ORDER BY rowid", + "[1, 1, 3, 2, 3, 1]" + ); + do_execsql_test(db, + "SELECT fts5_columnsize(ft, -1) FROM ft('x') ORDER BY rowid", + "[2, 2, 6, 5, 6, 2]" + ); + + /* Test that xColumnSize() returns SQLITE_RANGE if the column number + ** is out-of range */ + try { + do_execsql_test(db, + "SELECT fts5_columnsize(ft, 2) FROM ft('x') ORDER BY rowid" + ); + } catch( RuntimeException e ){ + affirm( e.getMessage().matches(".*column index out of range") ); + } + + /* Test fts5_columntext() */ + do_execsql_test(db, + "SELECT fts5_columntext(ft, 0) FROM ft('x') ORDER BY rowid", + "[x, x, x y z, x y z, x y z, x]" + ); + do_execsql_test(db, + "SELECT fts5_columntext(ft, 1) FROM ft('x') ORDER BY rowid", + "[x, x, x y z, x z, x y z, x]" + ); + boolean threw = false; + try{ + /* columntext() used to return NULLs when given an out-of bounds column + but now results in a range error. */ + do_execsql_test(db, + "SELECT fts5_columntext(ft, 2) FROM ft('x') ORDER BY rowid", + "[null, null, null, null, null, null]" + ); + }catch(Exception e){ + threw = true; + affirm( e.getMessage().matches(".*column index out of range") ); + } + affirm( threw ); + threw = false; + + /* Test fts5_columntotalsize() */ + do_execsql_test(db, + "SELECT fts5_columntotalsize(ft, 0) FROM ft('x') ORDER BY rowid", + "[12, 12, 12, 12, 12, 12]" + ); + do_execsql_test(db, + "SELECT fts5_columntotalsize(ft, 1) FROM ft('x') ORDER BY rowid", + "[11, 11, 11, 11, 11, 11]" + ); + do_execsql_test(db, + "SELECT fts5_columntotalsize(ft, -1) FROM ft('x') ORDER BY rowid", + "[23, 23, 23, 23, 23, 23]" + ); + + /* Test that xColumnTotalSize() returns SQLITE_RANGE if the column + ** number is out-of range */ + try { + do_execsql_test(db, + "SELECT fts5_columntotalsize(ft, 2) FROM ft('x') ORDER BY rowid" + ); + } catch( RuntimeException e ){ + affirm( e.getMessage().matches(".*column index out of range") ); + } + + do_execsql_test(db, + "SELECT rowid, fts5_rowcount(ft) FROM ft('z')", + "[1, 6, 2, 6, 3, 6]" + ); + + sqlite3_close_v2(db); + } + + /* + ** Test of various Fts5ExtensionApi methods + */ + private static void test3(){ + + /* Open db and populate an fts5 table */ + sqlite3 db = createNewDb(); + do_execsql_test(db, + "CREATE VIRTUAL TABLE ft USING fts5(a, b);" + + "INSERT INTO ft(a, b) VALUES('the one', 1);" + + "INSERT INTO ft(a, b) VALUES('the two', 2);" + + "INSERT INTO ft(a, b) VALUES('the three', 3);" + + "INSERT INTO ft(a, b) VALUES('the four', '');" + ); + create_test_functions(db); + + /* Test fts5_aux1() + fts5_aux2() - users of xGetAuxdata and xSetAuxdata */ + do_execsql_test(db, + "SELECT fts5_aux1(ft, a) FROM ft('the')", + "[null, the one, the two, the three]" + ); + do_execsql_test(db, + "SELECT fts5_aux2(ft, b) FROM ft('the')", + "[null, 1, 2, 3]" + ); + do_execsql_test(db, + "SELECT fts5_aux1(ft, a), fts5_aux2(ft, b) FROM ft('the')", + "[null, null, the one, 1, the two, 2, the three, 3]" + ); + do_execsql_test(db, + "SELECT fts5_aux1(ft, b), fts5_aux1(ft) FROM ft('the')", + "[null, 1, 1, 2, 2, 3, 3, null]" + ); + } + + /* + ** Test of various Fts5ExtensionApi methods + */ + private static void test4(){ + + /* Open db and populate an fts5 table */ + sqlite3 db = createNewDb(); + create_test_functions(db); + do_execsql_test(db, + "CREATE VIRTUAL TABLE ft USING fts5(a, b);" + + "INSERT INTO ft(a, b) VALUES('one two three', 'two three four');" + + "INSERT INTO ft(a, b) VALUES('two three four', 'three four five');" + + "INSERT INTO ft(a, b) VALUES('three four five', 'four five six');" + ); + + + do_execsql_test(db, + "SELECT fts5_inst(ft) FROM ft('two')", + "[{0 0 1} {0 1 0}, {0 0 0}]" + ); + do_execsql_test(db, + "SELECT fts5_inst(ft) FROM ft('four')", + "[{0 1 2}, {0 0 2} {0 1 1}, {0 0 1} {0 1 0}]" + ); + + do_execsql_test(db, + "SELECT fts5_inst(ft) FROM ft('a OR b OR four')", + "[{2 1 2}, {2 0 2} {2 1 1}, {2 0 1} {2 1 0}]" + ); + do_execsql_test(db, + "SELECT fts5_inst(ft) FROM ft('two four')", + "[{0 0 1} {0 1 0} {1 1 2}, {0 0 0} {1 0 2} {1 1 1}]" + ); + + do_execsql_test(db, + "SELECT fts5_pinst(ft) FROM ft('two')", + "[{0 0 1} {0 1 0}, {0 0 0}]" + ); + do_execsql_test(db, + "SELECT fts5_pinst(ft) FROM ft('four')", + "[{0 1 2}, {0 0 2} {0 1 1}, {0 0 1} {0 1 0}]" + ); + do_execsql_test(db, + "SELECT fts5_pinst(ft) FROM ft('a OR b OR four')", + "[{2 1 2}, {2 0 2} {2 1 1}, {2 0 1} {2 1 0}]" + ); + do_execsql_test(db, + "SELECT fts5_pinst(ft) FROM ft('two four')", + "[{0 0 1} {0 1 0} {1 1 2}, {0 0 0} {1 0 2} {1 1 1}]" + ); + + do_execsql_test(db, + "SELECT fts5_pcolinst(ft) FROM ft('two')", + "[{0 0} {0 1}, {0 0}]" + ); + do_execsql_test(db, + "SELECT fts5_pcolinst(ft) FROM ft('four')", + "[{0 1}, {0 0} {0 1}, {0 0} {0 1}]" + ); + do_execsql_test(db, + "SELECT fts5_pcolinst(ft) FROM ft('a OR b OR four')", + "[{2 1}, {2 0} {2 1}, {2 0} {2 1}]" + ); + do_execsql_test(db, + "SELECT fts5_pcolinst(ft) FROM ft('two four')", + "[{0 0} {0 1} {1 1}, {0 0} {1 0} {1 1}]" + ); + + do_execsql_test(db, + "SELECT fts5_phrasesize(ft, 0) FROM ft('four five six') LIMIT 1;", + "[1]" + ); + do_execsql_test(db, + "SELECT fts5_phrasesize(ft, 0) FROM ft('four + five + six') LIMIT 1;", + "[3]" + ); + + + sqlite3_close_v2(db); + } + + private static void test5(){ + /* Open db and populate an fts5 table */ + sqlite3 db = createNewDb(); + create_test_functions(db); + do_execsql_test(db, + "CREATE VIRTUAL TABLE ft USING fts5(x, b);" + + "INSERT INTO ft(x) VALUES('one two three four five six seven eight');" + + "INSERT INTO ft(x) VALUES('one two one four one six one eight');" + + "INSERT INTO ft(x) VALUES('one two three four five six seven eight');" + ); + + do_execsql_test(db, + "SELECT fts5_phrasehits(ft, 0) FROM ft('one') LIMIT 1", + "[6]" + ); + + sqlite3_close_v2(db); + } + + private static void test6(){ + sqlite3 db = createNewDb(); + create_test_functions(db); + do_execsql_test(db, + "CREATE VIRTUAL TABLE ft USING fts5(x, b);" + + "INSERT INTO ft(x) VALUES('one two three four five six seven eight');" + ); + + do_execsql_test(db, + "SELECT fts5_tokenize(ft, 'abc def ghi') FROM ft('one')", + "[abc+def+ghi]" + ); + do_execsql_test(db, + "SELECT fts5_tokenize(ft, 'it''s BEEN a...') FROM ft('one')", + "[it+s+been+a]" + ); + + sqlite3_close_v2(db); + } + + private static synchronized void runTests(){ + test1(); + test2(); + test3(); + test4(); + test5(); + test6(); + } + + public TesterFts5(){ + runTests(); + } +} diff --git a/ext/jni/src/org/sqlite/jni/fts5/XTokenizeCallback.java b/ext/jni/src/org/sqlite/jni/fts5/XTokenizeCallback.java new file mode 100644 index 0000000000..3aa514f314 --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/fts5/XTokenizeCallback.java @@ -0,0 +1,22 @@ +/* +** 2023-08-04 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file is part of the JNI bindings for the sqlite3 C API. +*/ +package org.sqlite.jni.fts5; + + +/** + Callback type for use with xTokenize() variants. +*/ +public interface XTokenizeCallback { + int call(int tFlags, byte[] txt, int iStart, int iEnd); +} diff --git a/ext/jni/src/org/sqlite/jni/fts5/fts5_api.java b/ext/jni/src/org/sqlite/jni/fts5/fts5_api.java new file mode 100644 index 0000000000..d7d2da430d --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/fts5/fts5_api.java @@ -0,0 +1,76 @@ +/* +** 2023-08-05 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file is part of the JNI bindings for the sqlite3 C API. +*/ +package org.sqlite.jni.fts5; +import org.sqlite.jni.annotation.*; +import org.sqlite.jni.capi.*; + +/** + A wrapper for communicating C-level (fts5_api*) instances with + Java. These wrappers do not own their associated pointer, they + simply provide a type-safe way to communicate it between Java and C + via JNI. +*/ +public final class fts5_api extends NativePointerHolder { + /* Only invoked from JNI */ + private fts5_api(){} + + public static final int iVersion = 2; + + /** + Returns the fts5_api instance associated with the given db, or + null if something goes horribly wrong. + */ + public static synchronized native fts5_api getInstanceForDb(@NotNull sqlite3 db); + + public synchronized native int xCreateFunction(@NotNull String name, + @Nullable Object userData, + @NotNull fts5_extension_function xFunction); + + /** + Convenience overload which passes null as the 2nd argument to the + 3-parameter form. + */ + public int xCreateFunction(@NotNull String name, + @NotNull fts5_extension_function xFunction){ + return xCreateFunction(name, null, xFunction); + } + + // /* Create a new auxiliary function */ + // int (*xCreateFunction)( + // fts5_api *pApi, + // const char *zName, + // void *pContext, + // fts5_extension_function xFunction, + // void (*xDestroy)(void*) + // ); + + // Still potentially todo: + + // int (*xCreateTokenizer)( + // fts5_api *pApi, + // const char *zName, + // void *pContext, + // fts5_tokenizer *pTokenizer, + // void (*xDestroy)(void*) + // ); + + // /* Find an existing tokenizer */ + // int (*xFindTokenizer)( + // fts5_api *pApi, + // const char *zName, + // void **ppContext, + // fts5_tokenizer *pTokenizer + // ); + +} diff --git a/ext/jni/src/org/sqlite/jni/fts5/fts5_extension_function.java b/ext/jni/src/org/sqlite/jni/fts5/fts5_extension_function.java new file mode 100644 index 0000000000..6e98f64ff3 --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/fts5/fts5_extension_function.java @@ -0,0 +1,51 @@ +/* +** 2023-08-05 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file is part of the JNI bindings for the sqlite3 C API. +*/ +package org.sqlite.jni.fts5; +import org.sqlite.jni.capi.sqlite3_context; +import org.sqlite.jni.capi.sqlite3_value; + +/** + JNI-level wrapper for C's fts5_extension_function type. +*/ +public interface fts5_extension_function { + // typedef void (*fts5_extension_function)( + // const Fts5ExtensionApi *pApi, /* API offered by current FTS version */ + // Fts5Context *pFts, /* First arg to pass to pApi functions */ + // sqlite3_context *pCtx, /* Context for returning result/error */ + // int nVal, /* Number of values in apVal[] array */ + // sqlite3_value **apVal /* Array of trailing arguments */ + // ); + + /** + The callback implementation, corresponding to the xFunction + argument of C's fts5_api::xCreateFunction(). + */ + void call(Fts5ExtensionApi ext, Fts5Context fCx, + sqlite3_context pCx, sqlite3_value argv[]); + /** + Is called when this function is destroyed by sqlite3. Typically + this function will be empty. + */ + void xDestroy(); + + /** + A base implementation of fts5_extension_function() which has a + no-op xDestroy() method. + */ + abstract class Abstract implements fts5_extension_function { + @Override public abstract void call(Fts5ExtensionApi ext, Fts5Context fCx, + sqlite3_context pCx, sqlite3_value argv[]); + @Override public void xDestroy(){} + } +} diff --git a/ext/jni/src/org/sqlite/jni/fts5/fts5_tokenizer.java b/ext/jni/src/org/sqlite/jni/fts5/fts5_tokenizer.java new file mode 100644 index 0000000000..f4ada4dc30 --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/fts5/fts5_tokenizer.java @@ -0,0 +1,49 @@ +/* +** 2023-08-05 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file is part of the JNI bindings for the sqlite3 C API. +*/ +package org.sqlite.jni.fts5; +import org.sqlite.jni.capi.NativePointerHolder; +import org.sqlite.jni.annotation.NotNull; + +/** + A wrapper for communicating C-level (fts5_tokenizer*) instances with + Java. These wrappers do not own their associated pointer, they + simply provide a type-safe way to communicate it between Java and C + via JNI. +*/ +public final class fts5_tokenizer extends NativePointerHolder { + /* Only invoked by JNI */ + private fts5_tokenizer(){} + + // int (*xCreate)(void*, const char **azArg, int nArg, Fts5Tokenizer **ppOut); + // void (*xDelete)(Fts5Tokenizer*); + + public native int xTokenize(@NotNull Fts5Tokenizer t, int tokFlags, + @NotNull byte pText[], + @NotNull XTokenizeCallback callback); + + + // int (*xTokenize)(Fts5Tokenizer*, + // void *pCtx, + // int flags, /* Mask of FTS5_TOKENIZE_* flags */ + // const char *pText, int nText, + // int (*xToken)( + // void *pCtx, /* Copy of 2nd argument to xTokenize() */ + // int tflags, /* Mask of FTS5_TOKEN_* flags */ + // const char *pToken, /* Pointer to buffer containing token */ + // int nToken, /* Size of token in bytes */ + // int iStart, /* Byte offset of token within input text */ + // int iEnd /* Byte offset of end of token within input text */ + // ) + // ); +} diff --git a/ext/jni/src/org/sqlite/jni/test-script-interpreter.md b/ext/jni/src/org/sqlite/jni/test-script-interpreter.md new file mode 100644 index 0000000000..cc7b7e7f9a --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/test-script-interpreter.md @@ -0,0 +1,270 @@ +# Specifications For A Rudimentary SQLite Test Script Interpreter + +## Overview + +The purpose of the Test Script Interpreter is to read and interpret +script files that contain SQL commands and desired results. The +interpreter will check results and report any discrepancies found. + +The test script files are ASCII text files. The filename always ends with +".test". Each script is evaluated independently; context does not carry +forward from one script to the next. So, for example, the --null command +run in one test script does not cause any changes in the behavior of +subsequent test scripts. All open database connections are closed +at the end of each test script. All database files created by a test +script are deleted when the script finishes. + +## Parsing Rules: + + 1. The test script is read line by line, where a line is a sequence of + characters that runs up to the next '\\n' (0x0a) character or until + the end of the file. There is never a need to read ahead past the + end of the current line. + + 2. If any line contains the string " MODULE_NAME:" (with a space before + the initial "M") or "MIXED_MODULE_NAME:" then that test script is + incompatible with this spec. Processing of the test script should + end immediately. There is no need to read any more of the file. + In verbose mode, the interpreter might choose to emit an informational + messages saying that the test script was abandoned due to an + incompatible module type. + + 3. If any line contains the string "SCRIPT_MODULE_NAME:" then the input + script is known to be of the correct type for this specification and + processing may continue. The "MODULE_NAME" checking in steps 2 and 3 + may optionally be discontinued after sighting a "SCRIPT_MODULE_NAME". + + 4. If any line contains "REQUIRED_PROPERTIES:" and that substring is followed + by any non-whitespace text, then the script is not compatible with this + spec. Processing should stop immediately. In verbose mode, the + interpreter might choose to emit an information message saying that the + test script was abandoned due to unsupported requirement properties. + + 5. If any line begins with the "\|" (0x7c) character, that indicates that + the input script is not compatible with this specification. Processing + of the script should stop immediately. In verbose mode, the interpreter + might choose to emit an informational message indicating that the + test script was abandoned because it contained "a dbtotxt format database + specification". + + 6. Any line that begins with "#" is a C-preprocessor line. The interpreter + described by this spec does not know how to deal with C-preprocessor lines. + Hence, processing should be abandoned. In verbose mode, the interpreter + might emit an informational message similar to + "script NAME abandoned due to C-preprocessor line: ..." + + 7. If a line begins with exactly two minus signs followed by a + lowercase letter, that is a command. Process commands as described + below. + + 8. All other lines should be accumulated into the "input buffer". + The various commands will have access to this input buffer. + Some commands will reset the buffer. + +## Initialization + +The initial state of the interpreter at the start of processing each script +is as if the following command sequence had been run: + +> ~~~ +--close all +--db 0 +--new test.db +--null nil +~~~ + +In words, all database connections are closed except for connection 0 (the +default) which is open on an empty database named "test.db". The string +"nil" is displayed for NULL column values. + +The only context carried forward after the evaluation of one test script +into the evaluation of the next test script is the count of the number of +tests run and the number of failures seen. + +## Commands: + +Each command looks like an SQL comment. The command begins at the left +margin (no leading space) and starts with exactly 2 minus signs ("-"). +The command name consists of lowercase letters and maybe a "-" or two. +Some commands have arguments. + +The arguments are separated from the command name by one or more spaces. + +Commands have access to the input buffer and might reset the input buffer. +The command can also optionally read (and consume) additional text from +script that comes after the command. + +Unknown or unrecognized commands indicate that the script contains features +that are not (yet) supported by this specification. Processing of the +script should terminate immediately. When this happens and when the +interpreter is in a "verbose" mode, the interpreter might choose to emit +an informational message along the lines of "test script NAME abandoned +due to unsupported command: --whatever". + +The initial implementation will only recognize a few commands. Other +commands may be added later. The following is the initial set of +commands: + +### The --testcase command + +Every test case starts with a --testcase command. The --testcase +command resets both the "input buffer" and the "result buffer". The +argument to the --testcase command is the name of the test case. That +test case name is used for logging and debugging and when printing +errors. The input buffer is set to the body of the test case. + +### The --result command + +The --result command tries to execute the text in the input buffer as SQL. +For each row of result coming out of this SQL, the text of that result is +appended to the "result buffer". If a result row contains multiple columns, +the columns are processed from left to right. For each column, text is +appended to the result buffer according to the following rules: + + * If the result buffer already contains some text, append a space. + (In this way, all column values and all row values are separated from + each other by a single space.) + + * If sqlite3_column_text() returns NULL, then append "nil" - or + some other text that is specified by the --null command - and skip + all subsequent rules. + + * If sqlite3_column_text() is an empty string, append `{}` to the + result buffer and skip all subsequent rules. + + * If sqlite3_column_text() does not contain any special + characters, append it to the result buffer without any + formatting and skip all subsequent rules. Special characters are: + 0x00 to 0x20 (inclusive), double-quote (0x22), backslash (0x5c), + curly braces (0x7b and 0x7d). + + * If sqlite3_column_text() does not contains curly braces, then put + the text inside of `{...}` and append it and skip all subsequent rules. + + * Append the text within double-quotes (`"..."`) and within the text + escape '"' and '\\' by prepending a single '\\' and escape any + control characters (characters less than 0x20) using octal notation: + '\\NNN'. + +If an error is encountered while running the SQL, then append the +symbolic C-preprocessor name for the error +code (ex: "SQLITE_CONSTRAINT") as if it were a column value. Then append +the error message text as if it where a column value. Then stop processing. + +After the SQL text has been run, compare the content of the result buffer +against the argument to the --result command and report a testing error if +there are any differences. + +The --result command resets the input buffer, but it does not reset +the result buffer. This distinction does not matter for the --result +command itself, but it is important for related commands like --glob +and --notglob. Sometimes test cases will contains a bunch of SQL +followed by multiple --glob and/or --notglob statements. All of the +globs should be evaluated against the result buffer, but the SQL should +only be run once. This is accomplished by resetting the input buffer +but not the result buffer. + +### The --glob command + +The --glob command works just like --result except that the argument to +--glob is interpreted as a TEST-GLOB pattern and the results are compared +using that glob pattern rather than using strcmp(). Other than that, +the two operate the same. + +The TEST-GLOB pattern is slightly different for a standard GLOB: + + * The '*' character matches zero or more characters. + + * The '?' character matches any single character + + * The '[...]' character sequence machines a single character + in between the brackets. + + * The '#' character matches one or more digits (This is the main + difference between standard unix-glob and TEST-GLOB. unix-glob + does not have this feature. It was added to because it comes + up a lot during SQLite testing.) + +### The --notglob command + +The --notglob command works just like --glob except that it reports an +error if the GLOB does match, rather than if the GLOB does not match. + +### The --oom command + +This command is to be used for out-of-memory testing. It means that +OOM errors should be simulated to ensure that SQLite is able to deal with +them. This command can be silently ignored for now. We might add support +for this later. + +### The --tableresult command + +The --tableresult command works like --glob except that the GLOB pattern +to be matched is taken from subsequent lines of the input script up to +the next --end. Every span of one or more whitespace characters in this +pattern text is collapsed into a single space (0x20). +Leading and trailing whitespace are removed from the pattern. +The --end that ends the GLOB pattern is not part of the GLOB pattern, but +the --end is consumed from the script input. + +### The --new and --open commands + +The --new and --open commands cause a database file to be opened. +The name of the file is the argument to the command. The --new command +opens an initially empty database (it deletes the file before opening it) +whereas the --open command opens an existing database if it already +exists. + +### The --db command + +The script interpreter can have up to 7 different SQLite database +connections open at a time. The --db command is used to switch between +them. The argument to --db is an integer between 0 and 6 that selects +which database connection to use moving forward. + +### The --close command + +The --close command causes an existing database connection to close. +This command is a no-op if the database connection is not currently +open. There can be up to 7 different database connections, numbered 0 +through 6. The number of the database connection to close is an +argument to the --close command, which will fail if an out-of-range +value is provided. Or if the argument to --close is "all" then all +open database connections are closed. If passed no argument, the +currently-active database is assumed. + +### The --null command + +The NULL command changes the text that is used to represent SQL NULL +values in the result buffer. + +### The --run command + +The --run command executes text in the input buffer as if it where SQL. +However, nothing is added to the result buffer. Any output from the SQL +is silently ignored. Errors in the SQL are silently ignored. + +The --run command normally executes the SQL in the current database +connection. However, if --run has an argument that is an integer between +0 and 6 then the SQL is run in the alternative database connection specified +by that argument. + +### The --json and --json-block commands + +The --json and --json-block commands work like --result and --tableresult, +respectively. The difference is that column values are appended to the +result buffer literally, without ever enclosing the values in `{...}` or +`"..."` and without escaping any characters in the column value and comparison +is always an exact strcmp() not a GLOB. + +### The --print command + +The --print command emits both its arguments and its body (if any) to +stdout, indenting each line of output. + +### The --column-names command + +The --column-names command requires 0 or 1 as an argument, to disable +resp. enable it, and modifies SQL execution to include column names +in output. When this option is on, each column value emitted gets +prefixed by its column name, with a single space between them. diff --git a/ext/jni/src/org/sqlite/jni/wrapper1/AggregateFunction.java b/ext/jni/src/org/sqlite/jni/wrapper1/AggregateFunction.java new file mode 100644 index 0000000000..fc63b53542 --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/wrapper1/AggregateFunction.java @@ -0,0 +1,144 @@ +/* +** 2023-10-16 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file is part of the wrapper1 interface for sqlite3. +*/ +package org.sqlite.jni.wrapper1; + +/** + EXPERIMENTAL/INCOMPLETE/UNTESTED + + A SqlFunction implementation for aggregate functions. The T type + represents the type of data accumulated by this aggregate while it + works. e.g. a SUM()-like UDF might use Integer or Long and a + CONCAT()-like UDF might use a StringBuilder or a List. +*/ +public abstract class AggregateFunction implements SqlFunction { + + /** + As for the xStep() argument of the C API's + sqlite3_create_function(). If this function throws, the + exception is reported via sqlite3_result_error(). + */ + public abstract void xStep(SqlFunction.Arguments args); + + /** + As for the xFinal() argument of the C API's + sqlite3_create_function(). If this function throws, it is + translated into sqlite3_result_error(). + + Note that the passed-in object will not actually contain any + arguments for xFinal() but will contain the context object needed + for setting the call's result or error state. + */ + public abstract void xFinal(SqlFunction.Arguments args); + + /** + Optionally override to be notified when the UDF is finalized by + SQLite. + */ + public void xDestroy() {} + + /** + PerContextState assists aggregate and window functions in + managing their accumulator state across calls to the UDF's + callbacks. + +

    T must be of a type which can be legally stored as a value in + java.util.HashMap. + +

    If a given aggregate or window function is called multiple times + in a single SQL statement, e.g. SELECT MYFUNC(A), MYFUNC(B)..., + then the clients need some way of knowing which call is which so + that they can map their state between their various UDF callbacks + and reset it via xFinal(). This class takes care of such + mappings. + +

    This class works by mapping + sqlite3_context.getAggregateContext() to a single piece of + state, of a client-defined type (the T part of this class), which + persists across a "matching set" of the UDF's callbacks. + +

    This class is a helper providing commonly-needed functionality + - it is not required for use with aggregate or window functions. + Client UDFs are free to perform such mappings using custom + approaches. The provided {@link AggregateFunction} and {@link + WindowFunction} classes use this. + */ + public static final class PerContextState { + private final java.util.Map> map + = new java.util.HashMap<>(); + + /** + Should be called from a UDF's xStep(), xValue(), and xInverse() + methods, passing it that method's first argument and an initial + value for the persistent state. If there is currently no + mapping for the given context within the map, one is created + using the given initial value, else the existing one is used + and the 2nd argument is ignored. It returns a ValueHolder + which can be used to modify that state directly without + requiring that the client update the underlying map's entry. + +

    The caller is obligated to eventually call + takeAggregateState() to clear the mapping. + */ + public ValueHolder getAggregateState(SqlFunction.Arguments args, T initialValue){ + final Long key = args.getContext().getAggregateContext(true); + ValueHolder rc = null==key ? null : map.get(key); + if( null==rc ){ + map.put(key, rc = new ValueHolder<>(initialValue)); + } + return rc; + } + + /** + Should be called from a UDF's xFinal() method and passed that + method's first argument. This function removes the value + associated with with the arguments' aggregate context from the + map and returns it, returning null if no other UDF method has + been called to set up such a mapping. The latter condition will + be the case if a UDF is used in a statement which has no result + rows. + */ + public T takeAggregateState(SqlFunction.Arguments args){ + final ValueHolder h = map.remove(args.getContext().getAggregateContext(false)); + return null==h ? null : h.value; + } + } + + /** Per-invocation state for the UDF. */ + private final PerContextState map = new PerContextState<>(); + + /** + To be called from the implementation's xStep() method, as well + as the xValue() and xInverse() methods of the {@link WindowFunction} + subclass, to fetch the current per-call UDF state. On the + first call to this method for any given sqlite3_context + argument, the context is set to the given initial value. On all other + calls, the 2nd argument is ignored. + + @see SQLFunction.PerContextState#getAggregateState + */ + protected final ValueHolder getAggregateState(SqlFunction.Arguments args, T initialValue){ + return map.getAggregateState(args, initialValue); + } + + /** + To be called from the implementation's xFinal() method to fetch + the final state of the UDF and remove its mapping. + + see SQLFunction.PerContextState#takeAggregateState + */ + protected final T takeAggregateState(SqlFunction.Arguments args){ + return map.takeAggregateState(args); + } + +} diff --git a/ext/jni/src/org/sqlite/jni/wrapper1/ScalarFunction.java b/ext/jni/src/org/sqlite/jni/wrapper1/ScalarFunction.java new file mode 100644 index 0000000000..c616ae7393 --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/wrapper1/ScalarFunction.java @@ -0,0 +1,33 @@ +/* +** 2023-10-16 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file is part of the wrapper1 interface for sqlite3. +*/ +package org.sqlite.jni.wrapper1; + +/** + The SqlFunction type for scalar SQL functions. +*/ +public abstract class ScalarFunction implements SqlFunction { + /** + As for the xFunc() argument of the C API's + sqlite3_create_function(). If this function throws, it is + translated into an sqlite3_result_error(). + */ + public abstract void xFunc(SqlFunction.Arguments args); + + /** + Optionally override to be notified when the UDF is finalized by + SQLite. This default implementation does nothing. + */ + public void xDestroy() {} + +} diff --git a/ext/jni/src/org/sqlite/jni/wrapper1/SqlFunction.java b/ext/jni/src/org/sqlite/jni/wrapper1/SqlFunction.java new file mode 100644 index 0000000000..bb0fd0ccd4 --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/wrapper1/SqlFunction.java @@ -0,0 +1,318 @@ +/* +** 2023-10-16 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file is part of the wrapper1 interface for sqlite3. +*/ +package org.sqlite.jni.wrapper1; +import org.sqlite.jni.capi.CApi; +import org.sqlite.jni.capi.sqlite3_context; +import org.sqlite.jni.capi.sqlite3_value; + +/** + Base marker interface for SQLite's three types of User-Defined SQL + Functions (UDFs): Scalar, Aggregate, and Window functions. +*/ +public interface SqlFunction { + + int DETERMINISTIC = CApi.SQLITE_DETERMINISTIC; + int INNOCUOUS = CApi.SQLITE_INNOCUOUS; + int DIRECTONLY = CApi.SQLITE_DIRECTONLY; + int SUBTYPE = CApi.SQLITE_SUBTYPE; + int RESULT_SUBTYPE = CApi.SQLITE_RESULT_SUBTYPE; + int UTF8 = CApi.SQLITE_UTF8; + int UTF16 = CApi.SQLITE_UTF16; + + /** + The Arguments type is an abstraction on top of the lower-level + UDF function argument types. It provides _most_ of the functionality + of the lower-level interface, insofar as possible without "leaking" + those types into this API. + */ + final class Arguments implements Iterable{ + private final sqlite3_context cx; + private final sqlite3_value args[]; + public final int length; + + /** + Must be passed the context and arguments for the UDF call this + object is wrapping. Intended to be used by internal proxy + classes which "convert" the lower-level interface into this + package's higher-level interface, e.g. ScalarAdapter and + AggregateAdapter. + + Passing null for the args is equivalent to passing a length-0 + array. + */ + Arguments(sqlite3_context cx, sqlite3_value args[]){ + this.cx = cx; + this.args = args==null ? new sqlite3_value[0] : args; + this.length = this.args.length; + } + + /** + Returns the sqlite3_value at the given argument index or throws + an IllegalArgumentException exception if ndx is out of range. + */ + private sqlite3_value valueAt(int ndx){ + if(ndx<0 || ndx>=args.length){ + throw new IllegalArgumentException( + "SQL function argument index "+ndx+" is out of range." + ); + } + return args[ndx]; + } + + //! Returns the underlying sqlite3_context for these arguments. + sqlite3_context getContext(){return cx;} + + /** + Returns the Sqlite (db) object associated with this UDF call, + or null if the UDF is somehow called without such an object or + the db has been closed in an untimely manner (e.g. closed by a + UDF call). + */ + public Sqlite getDb(){ + return Sqlite.fromNative( CApi.sqlite3_context_db_handle(cx) ); + } + + public int getArgCount(){ return args.length; } + + public int getInt(int argNdx){return CApi.sqlite3_value_int(valueAt(argNdx));} + public long getInt64(int argNdx){return CApi.sqlite3_value_int64(valueAt(argNdx));} + public double getDouble(int argNdx){return CApi.sqlite3_value_double(valueAt(argNdx));} + public byte[] getBlob(int argNdx){return CApi.sqlite3_value_blob(valueAt(argNdx));} + public byte[] getText(int argNdx){return CApi.sqlite3_value_text(valueAt(argNdx));} + public String getText16(int argNdx){return CApi.sqlite3_value_text16(valueAt(argNdx));} + public int getBytes(int argNdx){return CApi.sqlite3_value_bytes(valueAt(argNdx));} + public int getBytes16(int argNdx){return CApi.sqlite3_value_bytes16(valueAt(argNdx));} + public Object getObject(int argNdx){return CApi.sqlite3_value_java_object(valueAt(argNdx));} + public T getObject(int argNdx, Class type){ + return CApi.sqlite3_value_java_object(valueAt(argNdx), type); + } + + public int getType(int argNdx){return CApi.sqlite3_value_type(valueAt(argNdx));} + public int getSubtype(int argNdx){return CApi.sqlite3_value_subtype(valueAt(argNdx));} + public int getNumericType(int argNdx){return CApi.sqlite3_value_numeric_type(valueAt(argNdx));} + public int getNoChange(int argNdx){return CApi.sqlite3_value_nochange(valueAt(argNdx));} + public boolean getFromBind(int argNdx){return CApi.sqlite3_value_frombind(valueAt(argNdx));} + public int getEncoding(int argNdx){return CApi.sqlite3_value_encoding(valueAt(argNdx));} + + public void resultInt(int v){ CApi.sqlite3_result_int(cx, v); } + public void resultInt64(long v){ CApi.sqlite3_result_int64(cx, v); } + public void resultDouble(double v){ CApi.sqlite3_result_double(cx, v); } + public void resultError(String msg){CApi.sqlite3_result_error(cx, msg);} + public void resultError(Exception e){CApi.sqlite3_result_error(cx, e);} + public void resultErrorTooBig(){CApi.sqlite3_result_error_toobig(cx);} + public void resultErrorCode(int rc){CApi.sqlite3_result_error_code(cx, rc);} + public void resultObject(Object o){CApi.sqlite3_result_java_object(cx, o);} + public void resultNull(){CApi.sqlite3_result_null(cx);} + /** + Analog to sqlite3_result_value(), using the Value object at the + given argument index. + */ + public void resultArg(int argNdx){CApi.sqlite3_result_value(cx, valueAt(argNdx));} + public void resultSubtype(int subtype){CApi.sqlite3_result_subtype(cx, subtype);} + public void resultZeroBlob(long n){ + // Throw on error? If n is too big, + // sqlite3_result_error_toobig() is automatically called. + CApi.sqlite3_result_zeroblob64(cx, n); + } + + public void resultBlob(byte[] blob){CApi.sqlite3_result_blob(cx, blob);} + public void resultText(byte[] utf8){CApi.sqlite3_result_text(cx, utf8);} + public void resultText(String txt){CApi.sqlite3_result_text(cx, txt);} + public void resultText16(byte[] utf16){CApi.sqlite3_result_text16(cx, utf16);} + public void resultText16(String txt){CApi.sqlite3_result_text16(cx, txt);} + + /** + Callbacks should invoke this on OOM errors, instead of throwing + OutOfMemoryError, because the latter cannot be propagated + through the C API. + */ + public void resultNoMem(){CApi.sqlite3_result_error_nomem(cx);} + + /** + Analog to sqlite3_set_auxdata() but throws if argNdx is out of + range. + */ + public void setAuxData(int argNdx, Object o){ + /* From the API docs: https://sqlite.org/c3ref/get_auxdata.html + + The value of the N parameter to these interfaces should be + non-negative. Future enhancements may make use of negative N + values to define new kinds of function caching behavior. + */ + valueAt(argNdx); + CApi.sqlite3_set_auxdata(cx, argNdx, o); + } + + /** + Analog to sqlite3_get_auxdata() but throws if argNdx is out of + range. + */ + public Object getAuxData(int argNdx){ + valueAt(argNdx); + return CApi.sqlite3_get_auxdata(cx, argNdx); + } + + /** + Represents a single SqlFunction argument. Primarily intended + for use with the Arguments class's Iterable interface. + */ + public final static class Arg { + private final Arguments a; + private final int ndx; + /* Only for use by the Arguments class. */ + private Arg(Arguments a, int ndx){ + this.a = a; + this.ndx = ndx; + } + /** Returns this argument's index in its parent argument list. */ + public int getIndex(){return ndx;} + public int getInt(){return a.getInt(ndx);} + public long getInt64(){return a.getInt64(ndx);} + public double getDouble(){return a.getDouble(ndx);} + public byte[] getBlob(){return a.getBlob(ndx);} + public byte[] getText(){return a.getText(ndx);} + public String getText16(){return a.getText16(ndx);} + public int getBytes(){return a.getBytes(ndx);} + public int getBytes16(){return a.getBytes16(ndx);} + public Object getObject(){return a.getObject(ndx);} + public T getObject(Class type){ return a.getObject(ndx, type); } + public int getType(){return a.getType(ndx);} + public Object getAuxData(){return a.getAuxData(ndx);} + public void setAuxData(Object o){a.setAuxData(ndx, o);} + } + + @Override + public java.util.Iterator iterator(){ + final Arg[] proxies = new Arg[args.length]; + for( int i = 0; i < args.length; ++i ){ + proxies[i] = new Arg(this, i); + } + return java.util.Arrays.stream(proxies).iterator(); + } + + } + + /** + Internal-use adapter for wrapping this package's ScalarFunction + for use with the org.sqlite.jni.capi.ScalarFunction interface. + */ + final class ScalarAdapter extends org.sqlite.jni.capi.ScalarFunction { + private final ScalarFunction impl; + ScalarAdapter(ScalarFunction impl){ + this.impl = impl; + } + /** + Proxies this.impl.xFunc(), adapting the call arguments to that + function's signature. If the proxy throws, it's translated to + sqlite_result_error() with the exception's message. + */ + public void xFunc(sqlite3_context cx, sqlite3_value[] args){ + try{ + impl.xFunc( new SqlFunction.Arguments(cx, args) ); + }catch(Exception e){ + CApi.sqlite3_result_error(cx, e); + } + } + + public void xDestroy(){ + impl.xDestroy(); + } + } + + /** + Internal-use adapter for wrapping this package's AggregateFunction + for use with the org.sqlite.jni.capi.AggregateFunction interface. + */ + class AggregateAdapter extends org.sqlite.jni.capi.AggregateFunction { + /*cannot be final without duplicating the whole body in WindowAdapter*/ + private final AggregateFunction impl; + AggregateAdapter(AggregateFunction impl){ + this.impl = impl; + } + + /** + Proxies this.impl.xStep(), adapting the call arguments to that + function's signature. If the proxied function throws, it is + translated to sqlite_result_error() with the exception's + message. + */ + public void xStep(sqlite3_context cx, sqlite3_value[] args){ + try{ + impl.xStep( new SqlFunction.Arguments(cx, args) ); + }catch(Exception e){ + CApi.sqlite3_result_error(cx, e); + } + } + + /** + As for the xFinal() argument of the C API's + sqlite3_create_function(). If the proxied function throws, it + is translated into a sqlite3_result_error(). + */ + public void xFinal(sqlite3_context cx){ + try{ + impl.xFinal( new SqlFunction.Arguments(cx, null) ); + }catch(Exception e){ + CApi.sqlite3_result_error(cx, e); + } + } + + public void xDestroy(){ + impl.xDestroy(); + } + } + + /** + Internal-use adapter for wrapping this package's WindowFunction + for use with the org.sqlite.jni.capi.WindowFunction interface. + */ + final class WindowAdapter extends AggregateAdapter { + private final WindowFunction impl; + WindowAdapter(WindowFunction impl){ + super(impl); + this.impl = impl; + } + + /** + Proxies this.impl.xInverse(), adapting the call arguments to that + function's signature. If the proxied function throws, it is + translated to sqlite_result_error() with the exception's + message. + */ + public void xInverse(sqlite3_context cx, sqlite3_value[] args){ + try{ + impl.xInverse( new SqlFunction.Arguments(cx, args) ); + }catch(Exception e){ + CApi.sqlite3_result_error(cx, e); + } + } + + /** + As for the xValue() argument of the C API's sqlite3_create_window_function(). + If the proxied function throws, it is translated into a sqlite3_result_error(). + */ + public void xValue(sqlite3_context cx){ + try{ + impl.xValue( new SqlFunction.Arguments(cx, null) ); + }catch(Exception e){ + CApi.sqlite3_result_error(cx, e); + } + } + + public void xDestroy(){ + impl.xDestroy(); + } + } + +} diff --git a/ext/jni/src/org/sqlite/jni/wrapper1/Sqlite.java b/ext/jni/src/org/sqlite/jni/wrapper1/Sqlite.java new file mode 100644 index 0000000000..d259e0ce62 --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/wrapper1/Sqlite.java @@ -0,0 +1,1994 @@ +/* +** 2023-10-09 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file is part of the wrapper1 interface for sqlite3. +*/ +package org.sqlite.jni.wrapper1; +import java.nio.charset.StandardCharsets; +import org.sqlite.jni.capi.CApi; +import org.sqlite.jni.capi.sqlite3; +import org.sqlite.jni.capi.sqlite3_stmt; +import org.sqlite.jni.capi.sqlite3_backup; +import org.sqlite.jni.capi.sqlite3_blob; +import org.sqlite.jni.capi.OutputPointer; + +/** + This class represents a database connection, analog to the C-side + sqlite3 class but with added argument validation, exceptions, and + similar "smoothing of sharp edges" to make the API safe to use from + Java. It also acts as a namespace for other types for which + individual instances are tied to a specific database connection. +*/ +public final class Sqlite implements AutoCloseable { + private sqlite3 db; + private static final boolean JNI_SUPPORTS_NIO = + CApi.sqlite3_jni_supports_nio(); + + // Result codes + public static final int OK = CApi.SQLITE_OK; + public static final int ERROR = CApi.SQLITE_ERROR; + public static final int INTERNAL = CApi.SQLITE_INTERNAL; + public static final int PERM = CApi.SQLITE_PERM; + public static final int ABORT = CApi.SQLITE_ABORT; + public static final int BUSY = CApi.SQLITE_BUSY; + public static final int LOCKED = CApi.SQLITE_LOCKED; + public static final int NOMEM = CApi.SQLITE_NOMEM; + public static final int READONLY = CApi.SQLITE_READONLY; + public static final int INTERRUPT = CApi.SQLITE_INTERRUPT; + public static final int IOERR = CApi.SQLITE_IOERR; + public static final int CORRUPT = CApi.SQLITE_CORRUPT; + public static final int NOTFOUND = CApi.SQLITE_NOTFOUND; + public static final int FULL = CApi.SQLITE_FULL; + public static final int CANTOPEN = CApi.SQLITE_CANTOPEN; + public static final int PROTOCOL = CApi.SQLITE_PROTOCOL; + public static final int EMPTY = CApi.SQLITE_EMPTY; + public static final int SCHEMA = CApi.SQLITE_SCHEMA; + public static final int TOOBIG = CApi.SQLITE_TOOBIG; + public static final int CONSTRAINT = CApi. SQLITE_CONSTRAINT; + public static final int MISMATCH = CApi.SQLITE_MISMATCH; + public static final int MISUSE = CApi.SQLITE_MISUSE; + public static final int NOLFS = CApi.SQLITE_NOLFS; + public static final int AUTH = CApi.SQLITE_AUTH; + public static final int FORMAT = CApi.SQLITE_FORMAT; + public static final int RANGE = CApi.SQLITE_RANGE; + public static final int NOTADB = CApi.SQLITE_NOTADB; + public static final int NOTICE = CApi.SQLITE_NOTICE; + public static final int WARNING = CApi.SQLITE_WARNING; + public static final int ROW = CApi.SQLITE_ROW; + public static final int DONE = CApi.SQLITE_DONE; + public static final int ERROR_MISSING_COLLSEQ = CApi.SQLITE_ERROR_MISSING_COLLSEQ; + public static final int ERROR_RETRY = CApi.SQLITE_ERROR_RETRY; + public static final int ERROR_SNAPSHOT = CApi.SQLITE_ERROR_SNAPSHOT; + public static final int IOERR_READ = CApi.SQLITE_IOERR_READ; + public static final int IOERR_SHORT_READ = CApi.SQLITE_IOERR_SHORT_READ; + public static final int IOERR_WRITE = CApi.SQLITE_IOERR_WRITE; + public static final int IOERR_FSYNC = CApi.SQLITE_IOERR_FSYNC; + public static final int IOERR_DIR_FSYNC = CApi.SQLITE_IOERR_DIR_FSYNC; + public static final int IOERR_TRUNCATE = CApi.SQLITE_IOERR_TRUNCATE; + public static final int IOERR_FSTAT = CApi.SQLITE_IOERR_FSTAT; + public static final int IOERR_UNLOCK = CApi.SQLITE_IOERR_UNLOCK; + public static final int IOERR_RDLOCK = CApi.SQLITE_IOERR_RDLOCK; + public static final int IOERR_DELETE = CApi.SQLITE_IOERR_DELETE; + public static final int IOERR_BLOCKED = CApi.SQLITE_IOERR_BLOCKED; + public static final int IOERR_NOMEM = CApi.SQLITE_IOERR_NOMEM; + public static final int IOERR_ACCESS = CApi.SQLITE_IOERR_ACCESS; + public static final int IOERR_CHECKRESERVEDLOCK = CApi.SQLITE_IOERR_CHECKRESERVEDLOCK; + public static final int IOERR_LOCK = CApi.SQLITE_IOERR_LOCK; + public static final int IOERR_CLOSE = CApi.SQLITE_IOERR_CLOSE; + public static final int IOERR_DIR_CLOSE = CApi.SQLITE_IOERR_DIR_CLOSE; + public static final int IOERR_SHMOPEN = CApi.SQLITE_IOERR_SHMOPEN; + public static final int IOERR_SHMSIZE = CApi.SQLITE_IOERR_SHMSIZE; + public static final int IOERR_SHMLOCK = CApi.SQLITE_IOERR_SHMLOCK; + public static final int IOERR_SHMMAP = CApi.SQLITE_IOERR_SHMMAP; + public static final int IOERR_SEEK = CApi.SQLITE_IOERR_SEEK; + public static final int IOERR_DELETE_NOENT = CApi.SQLITE_IOERR_DELETE_NOENT; + public static final int IOERR_MMAP = CApi.SQLITE_IOERR_MMAP; + public static final int IOERR_GETTEMPPATH = CApi.SQLITE_IOERR_GETTEMPPATH; + public static final int IOERR_CONVPATH = CApi.SQLITE_IOERR_CONVPATH; + public static final int IOERR_VNODE = CApi.SQLITE_IOERR_VNODE; + public static final int IOERR_AUTH = CApi.SQLITE_IOERR_AUTH; + public static final int IOERR_BEGIN_ATOMIC = CApi.SQLITE_IOERR_BEGIN_ATOMIC; + public static final int IOERR_COMMIT_ATOMIC = CApi.SQLITE_IOERR_COMMIT_ATOMIC; + public static final int IOERR_ROLLBACK_ATOMIC = CApi.SQLITE_IOERR_ROLLBACK_ATOMIC; + public static final int IOERR_DATA = CApi.SQLITE_IOERR_DATA; + public static final int IOERR_CORRUPTFS = CApi.SQLITE_IOERR_CORRUPTFS; + public static final int LOCKED_SHAREDCACHE = CApi.SQLITE_LOCKED_SHAREDCACHE; + public static final int LOCKED_VTAB = CApi.SQLITE_LOCKED_VTAB; + public static final int BUSY_RECOVERY = CApi.SQLITE_BUSY_RECOVERY; + public static final int BUSY_SNAPSHOT = CApi.SQLITE_BUSY_SNAPSHOT; + public static final int BUSY_TIMEOUT = CApi.SQLITE_BUSY_TIMEOUT; + public static final int CANTOPEN_NOTEMPDIR = CApi.SQLITE_CANTOPEN_NOTEMPDIR; + public static final int CANTOPEN_ISDIR = CApi.SQLITE_CANTOPEN_ISDIR; + public static final int CANTOPEN_FULLPATH = CApi.SQLITE_CANTOPEN_FULLPATH; + public static final int CANTOPEN_CONVPATH = CApi.SQLITE_CANTOPEN_CONVPATH; + public static final int CANTOPEN_SYMLINK = CApi.SQLITE_CANTOPEN_SYMLINK; + public static final int CORRUPT_VTAB = CApi.SQLITE_CORRUPT_VTAB; + public static final int CORRUPT_SEQUENCE = CApi.SQLITE_CORRUPT_SEQUENCE; + public static final int CORRUPT_INDEX = CApi.SQLITE_CORRUPT_INDEX; + public static final int READONLY_RECOVERY = CApi.SQLITE_READONLY_RECOVERY; + public static final int READONLY_CANTLOCK = CApi.SQLITE_READONLY_CANTLOCK; + public static final int READONLY_ROLLBACK = CApi.SQLITE_READONLY_ROLLBACK; + public static final int READONLY_DBMOVED = CApi.SQLITE_READONLY_DBMOVED; + public static final int READONLY_CANTINIT = CApi.SQLITE_READONLY_CANTINIT; + public static final int READONLY_DIRECTORY = CApi.SQLITE_READONLY_DIRECTORY; + public static final int ABORT_ROLLBACK = CApi.SQLITE_ABORT_ROLLBACK; + public static final int CONSTRAINT_CHECK = CApi.SQLITE_CONSTRAINT_CHECK; + public static final int CONSTRAINT_COMMITHOOK = CApi.SQLITE_CONSTRAINT_COMMITHOOK; + public static final int CONSTRAINT_FOREIGNKEY = CApi.SQLITE_CONSTRAINT_FOREIGNKEY; + public static final int CONSTRAINT_FUNCTION = CApi.SQLITE_CONSTRAINT_FUNCTION; + public static final int CONSTRAINT_NOTNULL = CApi.SQLITE_CONSTRAINT_NOTNULL; + public static final int CONSTRAINT_PRIMARYKEY = CApi.SQLITE_CONSTRAINT_PRIMARYKEY; + public static final int CONSTRAINT_TRIGGER = CApi.SQLITE_CONSTRAINT_TRIGGER; + public static final int CONSTRAINT_UNIQUE = CApi.SQLITE_CONSTRAINT_UNIQUE; + public static final int CONSTRAINT_VTAB = CApi.SQLITE_CONSTRAINT_VTAB; + public static final int CONSTRAINT_ROWID = CApi.SQLITE_CONSTRAINT_ROWID; + public static final int CONSTRAINT_PINNED = CApi.SQLITE_CONSTRAINT_PINNED; + public static final int CONSTRAINT_DATATYPE = CApi.SQLITE_CONSTRAINT_DATATYPE; + public static final int NOTICE_RECOVER_WAL = CApi.SQLITE_NOTICE_RECOVER_WAL; + public static final int NOTICE_RECOVER_ROLLBACK = CApi.SQLITE_NOTICE_RECOVER_ROLLBACK; + public static final int WARNING_AUTOINDEX = CApi.SQLITE_WARNING_AUTOINDEX; + public static final int AUTH_USER = CApi.SQLITE_AUTH_USER; + public static final int OK_LOAD_PERMANENTLY = CApi.SQLITE_OK_LOAD_PERMANENTLY; + + // sqlite3_open() flags + public static final int OPEN_READWRITE = CApi.SQLITE_OPEN_READWRITE; + public static final int OPEN_CREATE = CApi.SQLITE_OPEN_CREATE; + public static final int OPEN_EXRESCODE = CApi.SQLITE_OPEN_EXRESCODE; + + // transaction state + public static final int TXN_NONE = CApi.SQLITE_TXN_NONE; + public static final int TXN_READ = CApi.SQLITE_TXN_READ; + public static final int TXN_WRITE = CApi.SQLITE_TXN_WRITE; + + // sqlite3_status() ops + public static final int STATUS_MEMORY_USED = CApi.SQLITE_STATUS_MEMORY_USED; + public static final int STATUS_PAGECACHE_USED = CApi.SQLITE_STATUS_PAGECACHE_USED; + public static final int STATUS_PAGECACHE_OVERFLOW = CApi.SQLITE_STATUS_PAGECACHE_OVERFLOW; + public static final int STATUS_MALLOC_SIZE = CApi.SQLITE_STATUS_MALLOC_SIZE; + public static final int STATUS_PARSER_STACK = CApi.SQLITE_STATUS_PARSER_STACK; + public static final int STATUS_PAGECACHE_SIZE = CApi.SQLITE_STATUS_PAGECACHE_SIZE; + public static final int STATUS_MALLOC_COUNT = CApi.SQLITE_STATUS_MALLOC_COUNT; + + // sqlite3_db_status() ops + public static final int DBSTATUS_LOOKASIDE_USED = CApi.SQLITE_DBSTATUS_LOOKASIDE_USED; + public static final int DBSTATUS_CACHE_USED = CApi.SQLITE_DBSTATUS_CACHE_USED; + public static final int DBSTATUS_SCHEMA_USED = CApi.SQLITE_DBSTATUS_SCHEMA_USED; + public static final int DBSTATUS_STMT_USED = CApi.SQLITE_DBSTATUS_STMT_USED; + public static final int DBSTATUS_LOOKASIDE_HIT = CApi.SQLITE_DBSTATUS_LOOKASIDE_HIT; + public static final int DBSTATUS_LOOKASIDE_MISS_SIZE = CApi.SQLITE_DBSTATUS_LOOKASIDE_MISS_SIZE; + public static final int DBSTATUS_LOOKASIDE_MISS_FULL = CApi.SQLITE_DBSTATUS_LOOKASIDE_MISS_FULL; + public static final int DBSTATUS_CACHE_HIT = CApi.SQLITE_DBSTATUS_CACHE_HIT; + public static final int DBSTATUS_CACHE_MISS = CApi.SQLITE_DBSTATUS_CACHE_MISS; + public static final int DBSTATUS_CACHE_WRITE = CApi.SQLITE_DBSTATUS_CACHE_WRITE; + public static final int DBSTATUS_DEFERRED_FKS = CApi.SQLITE_DBSTATUS_DEFERRED_FKS; + public static final int DBSTATUS_CACHE_USED_SHARED = CApi.SQLITE_DBSTATUS_CACHE_USED_SHARED; + public static final int DBSTATUS_CACHE_SPILL = CApi.SQLITE_DBSTATUS_CACHE_SPILL; + + // Limits + public static final int LIMIT_LENGTH = CApi.SQLITE_LIMIT_LENGTH; + public static final int LIMIT_SQL_LENGTH = CApi.SQLITE_LIMIT_SQL_LENGTH; + public static final int LIMIT_COLUMN = CApi.SQLITE_LIMIT_COLUMN; + public static final int LIMIT_EXPR_DEPTH = CApi.SQLITE_LIMIT_EXPR_DEPTH; + public static final int LIMIT_COMPOUND_SELECT = CApi.SQLITE_LIMIT_COMPOUND_SELECT; + public static final int LIMIT_VDBE_OP = CApi.SQLITE_LIMIT_VDBE_OP; + public static final int LIMIT_FUNCTION_ARG = CApi.SQLITE_LIMIT_FUNCTION_ARG; + public static final int LIMIT_ATTACHED = CApi.SQLITE_LIMIT_ATTACHED; + public static final int LIMIT_LIKE_PATTERN_LENGTH = CApi.SQLITE_LIMIT_LIKE_PATTERN_LENGTH; + public static final int LIMIT_VARIABLE_NUMBER = CApi.SQLITE_LIMIT_VARIABLE_NUMBER; + public static final int LIMIT_TRIGGER_DEPTH = CApi.SQLITE_LIMIT_TRIGGER_DEPTH; + public static final int LIMIT_WORKER_THREADS = CApi.SQLITE_LIMIT_WORKER_THREADS; + + // sqlite3_prepare_v3() flags + public static final int PREPARE_PERSISTENT = CApi.SQLITE_PREPARE_PERSISTENT; + public static final int PREPARE_NO_VTAB = CApi.SQLITE_PREPARE_NO_VTAB; + + // sqlite3_trace_v2() flags + public static final int TRACE_STMT = CApi.SQLITE_TRACE_STMT; + public static final int TRACE_PROFILE = CApi.SQLITE_TRACE_PROFILE; + public static final int TRACE_ROW = CApi.SQLITE_TRACE_ROW; + public static final int TRACE_CLOSE = CApi.SQLITE_TRACE_CLOSE; + public static final int TRACE_ALL = TRACE_STMT | TRACE_PROFILE | TRACE_ROW | TRACE_CLOSE; + + // sqlite3_db_config() ops + public static final int DBCONFIG_ENABLE_FKEY = CApi.SQLITE_DBCONFIG_ENABLE_FKEY; + public static final int DBCONFIG_ENABLE_TRIGGER = CApi.SQLITE_DBCONFIG_ENABLE_TRIGGER; + public static final int DBCONFIG_ENABLE_FTS3_TOKENIZER = CApi.SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER; + public static final int DBCONFIG_ENABLE_LOAD_EXTENSION = CApi.SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION; + public static final int DBCONFIG_NO_CKPT_ON_CLOSE = CApi.SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE; + public static final int DBCONFIG_ENABLE_QPSG = CApi.SQLITE_DBCONFIG_ENABLE_QPSG; + public static final int DBCONFIG_TRIGGER_EQP = CApi.SQLITE_DBCONFIG_TRIGGER_EQP; + public static final int DBCONFIG_RESET_DATABASE = CApi.SQLITE_DBCONFIG_RESET_DATABASE; + public static final int DBCONFIG_DEFENSIVE = CApi.SQLITE_DBCONFIG_DEFENSIVE; + public static final int DBCONFIG_WRITABLE_SCHEMA = CApi.SQLITE_DBCONFIG_WRITABLE_SCHEMA; + public static final int DBCONFIG_LEGACY_ALTER_TABLE = CApi.SQLITE_DBCONFIG_LEGACY_ALTER_TABLE; + public static final int DBCONFIG_DQS_DML = CApi.SQLITE_DBCONFIG_DQS_DML; + public static final int DBCONFIG_DQS_DDL = CApi.SQLITE_DBCONFIG_DQS_DDL; + public static final int DBCONFIG_ENABLE_VIEW = CApi.SQLITE_DBCONFIG_ENABLE_VIEW; + public static final int DBCONFIG_LEGACY_FILE_FORMAT = CApi.SQLITE_DBCONFIG_LEGACY_FILE_FORMAT; + public static final int DBCONFIG_TRUSTED_SCHEMA = CApi.SQLITE_DBCONFIG_TRUSTED_SCHEMA; + public static final int DBCONFIG_STMT_SCANSTATUS = CApi.SQLITE_DBCONFIG_STMT_SCANSTATUS; + public static final int DBCONFIG_REVERSE_SCANORDER = CApi.SQLITE_DBCONFIG_REVERSE_SCANORDER; + + // sqlite3_config() ops + public static final int CONFIG_SINGLETHREAD = CApi.SQLITE_CONFIG_SINGLETHREAD; + public static final int CONFIG_MULTITHREAD = CApi.SQLITE_CONFIG_MULTITHREAD; + public static final int CONFIG_SERIALIZED = CApi.SQLITE_CONFIG_SERIALIZED; + + // Encodings + public static final int UTF8 = CApi.SQLITE_UTF8; + public static final int UTF16 = CApi.SQLITE_UTF16; + public static final int UTF16LE = CApi.SQLITE_UTF16LE; + public static final int UTF16BE = CApi.SQLITE_UTF16BE; + /* We elide the UTF16_ALIGNED from this interface because it + is irrelevant for the Java interface. */ + + // SQL data type IDs + public static final int INTEGER = CApi.SQLITE_INTEGER; + public static final int FLOAT = CApi.SQLITE_FLOAT; + public static final int TEXT = CApi.SQLITE_TEXT; + public static final int BLOB = CApi.SQLITE_BLOB; + public static final int NULL = CApi.SQLITE_NULL; + + // Authorizer codes. + public static final int DENY = CApi.SQLITE_DENY; + public static final int IGNORE = CApi.SQLITE_IGNORE; + public static final int CREATE_INDEX = CApi.SQLITE_CREATE_INDEX; + public static final int CREATE_TABLE = CApi.SQLITE_CREATE_TABLE; + public static final int CREATE_TEMP_INDEX = CApi.SQLITE_CREATE_TEMP_INDEX; + public static final int CREATE_TEMP_TABLE = CApi.SQLITE_CREATE_TEMP_TABLE; + public static final int CREATE_TEMP_TRIGGER = CApi.SQLITE_CREATE_TEMP_TRIGGER; + public static final int CREATE_TEMP_VIEW = CApi.SQLITE_CREATE_TEMP_VIEW; + public static final int CREATE_TRIGGER = CApi.SQLITE_CREATE_TRIGGER; + public static final int CREATE_VIEW = CApi.SQLITE_CREATE_VIEW; + public static final int DELETE = CApi.SQLITE_DELETE; + public static final int DROP_INDEX = CApi.SQLITE_DROP_INDEX; + public static final int DROP_TABLE = CApi.SQLITE_DROP_TABLE; + public static final int DROP_TEMP_INDEX = CApi.SQLITE_DROP_TEMP_INDEX; + public static final int DROP_TEMP_TABLE = CApi.SQLITE_DROP_TEMP_TABLE; + public static final int DROP_TEMP_TRIGGER = CApi.SQLITE_DROP_TEMP_TRIGGER; + public static final int DROP_TEMP_VIEW = CApi.SQLITE_DROP_TEMP_VIEW; + public static final int DROP_TRIGGER = CApi.SQLITE_DROP_TRIGGER; + public static final int DROP_VIEW = CApi.SQLITE_DROP_VIEW; + public static final int INSERT = CApi.SQLITE_INSERT; + public static final int PRAGMA = CApi.SQLITE_PRAGMA; + public static final int READ = CApi.SQLITE_READ; + public static final int SELECT = CApi.SQLITE_SELECT; + public static final int TRANSACTION = CApi.SQLITE_TRANSACTION; + public static final int UPDATE = CApi.SQLITE_UPDATE; + public static final int ATTACH = CApi.SQLITE_ATTACH; + public static final int DETACH = CApi.SQLITE_DETACH; + public static final int ALTER_TABLE = CApi.SQLITE_ALTER_TABLE; + public static final int REINDEX = CApi.SQLITE_REINDEX; + public static final int ANALYZE = CApi.SQLITE_ANALYZE; + public static final int CREATE_VTABLE = CApi.SQLITE_CREATE_VTABLE; + public static final int DROP_VTABLE = CApi.SQLITE_DROP_VTABLE; + public static final int FUNCTION = CApi.SQLITE_FUNCTION; + public static final int SAVEPOINT = CApi.SQLITE_SAVEPOINT; + public static final int RECURSIVE = CApi.SQLITE_RECURSIVE; + + //! Used only by the open() factory functions. + private Sqlite(sqlite3 db){ + this.db = db; + } + + /** Maps org.sqlite.jni.capi.sqlite3 to Sqlite instances. */ + private static final java.util.Map nativeToWrapper + = new java.util.HashMap<>(); + + + /** + When any given thread is done using the SQLite library, calling + this will free up any native-side resources which may be + associated specifically with that thread. This is not strictly + necessary, in particular in applications which only use SQLite + from a single thread, but may help free some otherwise errant + resources. + + Calling into SQLite from a given thread after this has been + called in that thread is harmless. The library will simply start + to re-cache certain state for that thread. + + Contrariwise, failing to call this will effectively leak a small + amount of cached state for the thread, which may add up to + significant amounts if the application uses SQLite from many + threads. + + This must never be called while actively using SQLite from this + thread, e.g. from within a query loop or a callback which is + operating on behalf of the library. + */ + static void uncacheThread(){ + CApi.sqlite3_java_uncache_thread(); + } + + /** + Returns the Sqlite object associated with the given sqlite3 + object, or null if there is no such mapping. + */ + static Sqlite fromNative(sqlite3 low){ + synchronized(nativeToWrapper){ + return nativeToWrapper.get(low); + } + } + + /** + Returns a newly-opened db connection or throws SqliteException if + opening fails. All arguments are as documented for + sqlite3_open_v2(). + + Design question: do we want static factory functions or should + this be reformulated as a constructor? + */ + public static Sqlite open(String filename, int flags, String vfsName){ + final OutputPointer.sqlite3 out = new OutputPointer.sqlite3(); + final int rc = CApi.sqlite3_open_v2(filename, out, flags, vfsName); + final sqlite3 n = out.take(); + if( 0!=rc ){ + if( null==n ) throw new SqliteException(rc); + final SqliteException ex = new SqliteException(n); + n.close(); + throw ex; + } + final Sqlite rv = new Sqlite(n); + synchronized(nativeToWrapper){ + nativeToWrapper.put(n, rv); + } + runAutoExtensions(rv); + return rv; + } + + public static Sqlite open(String filename, int flags){ + return open(filename, flags, null); + } + + public static Sqlite open(String filename){ + return open(filename, OPEN_READWRITE|OPEN_CREATE, null); + } + + public static String libVersion(){ + return CApi.sqlite3_libversion(); + } + + public static int libVersionNumber(){ + return CApi.sqlite3_libversion_number(); + } + + public static String libSourceId(){ + return CApi.sqlite3_sourceid(); + } + + /** + Returns the value of the native library's build-time value of the + SQLITE_THREADSAFE build option. + */ + public static int libThreadsafe(){ + return CApi.sqlite3_threadsafe(); + } + + /** + Analog to sqlite3_compileoption_get(). + */ + public static String compileOptionGet(int n){ + return CApi.sqlite3_compileoption_get(n); + } + + /** + Analog to sqlite3_compileoption_used(). + */ + public static boolean compileOptionUsed(String optName){ + return CApi.sqlite3_compileoption_used(optName); + } + + private static final boolean hasNormalizeSql = + compileOptionUsed("ENABLE_NORMALIZE"); + + private static final boolean hasSqlLog = + compileOptionUsed("ENABLE_SQLLOG"); + + /** + Throws UnsupportedOperationException if check is false. + flag is expected to be the name of an SQLITE_ENABLE_... + build flag. + */ + private static void checkSupported(boolean check, String flag){ + if( !check ){ + throw new UnsupportedOperationException( + "Library was built without "+flag + ); + } + } + + /** + Analog to sqlite3_complete(). + */ + public static boolean isCompleteStatement(String sql){ + switch(CApi.sqlite3_complete(sql)){ + case 0: return false; + case CApi.SQLITE_MISUSE: + throw new IllegalArgumentException("Input may not be null."); + case CApi.SQLITE_NOMEM: + throw new OutOfMemoryError(); + default: + return true; + } + } + + public static int keywordCount(){ + return CApi.sqlite3_keyword_count(); + } + + public static boolean keywordCheck(String word){ + return CApi.sqlite3_keyword_check(word); + } + + public static String keywordName(int index){ + return CApi.sqlite3_keyword_name(index); + } + + public static boolean strglob(String glob, String txt){ + return 0==CApi.sqlite3_strglob(glob, txt); + } + + public static boolean strlike(String glob, String txt, char escChar){ + return 0==CApi.sqlite3_strlike(glob, txt, escChar); + } + + /** + Output object for use with status() and libStatus(). + */ + public static final class Status { + /** The current value for the requested status() or libStatus() metric. */ + long current; + /** The peak value for the requested status() or libStatus() metric. */ + long peak; + } + + /** + As per sqlite3_status64(), but returns its current and high-water + results as a Status object. Throws if the first argument is + not one of the STATUS_... constants. + */ + public static Status libStatus(int op, boolean resetStats){ + org.sqlite.jni.capi.OutputPointer.Int64 pCurrent = + new org.sqlite.jni.capi.OutputPointer.Int64(); + org.sqlite.jni.capi.OutputPointer.Int64 pHighwater = + new org.sqlite.jni.capi.OutputPointer.Int64(); + checkRcStatic( CApi.sqlite3_status64(op, pCurrent, pHighwater, resetStats) ); + final Status s = new Status(); + s.current = pCurrent.value; + s.peak = pHighwater.value; + return s; + } + + /** + As per sqlite3_db_status(), but returns its current and + high-water results as a Status object. Throws if the first + argument is not one of the DBSTATUS_... constants or on any other + misuse. + */ + public Status status(int op, boolean resetStats){ + org.sqlite.jni.capi.OutputPointer.Int32 pCurrent = + new org.sqlite.jni.capi.OutputPointer.Int32(); + org.sqlite.jni.capi.OutputPointer.Int32 pHighwater = + new org.sqlite.jni.capi.OutputPointer.Int32(); + checkRc( CApi.sqlite3_db_status(thisDb(), op, pCurrent, pHighwater, resetStats) ); + final Status s = new Status(); + s.current = pCurrent.value; + s.peak = pHighwater.value; + return s; + } + + @Override public void close(){ + if(null!=this.db){ + synchronized(nativeToWrapper){ + nativeToWrapper.remove(this.db); + } + this.db.close(); + this.db = null; + } + } + + /** + Returns this object's underlying native db handle, or null if + this instance has been closed. This is very specifically not + public. + */ + sqlite3 nativeHandle(){ return this.db; } + + private sqlite3 thisDb(){ + if( null==db || 0==db.getNativePointer() ){ + throw new IllegalArgumentException("This database instance is closed."); + } + return this.db; + } + + // private byte[] stringToUtf8(String s){ + // return s==null ? null : s.getBytes(StandardCharsets.UTF_8); + // } + + /** + If rc!=0, throws an SqliteException. If this db is currently + opened and has non-0 sqlite3_errcode(), the error state is + extracted from it, else only the string form of rc is used. It is + the caller's responsibility to filter out non-error codes such as + SQLITE_ROW and SQLITE_DONE before calling this. + + As a special case, if rc is SQLITE_NOMEM, an OutOfMemoryError is + thrown. + */ + private void checkRc(int rc){ + if( 0!=rc ){ + if( CApi.SQLITE_NOMEM==rc ){ + throw new OutOfMemoryError(); + }else if( null==db || 0==CApi.sqlite3_errcode(db) ){ + throw new SqliteException(rc); + }else{ + throw new SqliteException(db); + } + } + } + + /** + Like checkRc() but behaves as if that function were + called with a null db object. + */ + private static void checkRcStatic(int rc){ + if( 0!=rc ){ + if( CApi.SQLITE_NOMEM==rc ){ + throw new OutOfMemoryError(); + }else{ + throw new SqliteException(rc); + } + } + } + + /** + Toggles the use of extended result codes on or off. By default + they are turned off, but they can be enabled by default by + including the OPEN_EXRESCODE flag when opening a database. + + Because this API reports db-side errors using exceptions, + enabling this may change the values returned by + SqliteException.errcode(). + */ + public void useExtendedResultCodes(boolean on){ + checkRc( CApi.sqlite3_extended_result_codes(thisDb(), on) ); + } + + /** + Analog to sqlite3_prepare_v3(), this prepares the first SQL + statement from the given input string and returns it as a + Stmt. It throws an SqliteException if preparation fails or an + IllegalArgumentException if the input is empty (e.g. contains + only comments or whitespace). + + The first argument must be SQL input in UTF-8 encoding. + + prepFlags must be 0 or a bitmask of the PREPARE_... constants. + + For processing multiple statements from a single input, use + prepareMulti(). + + Design note: though the C-level API succeeds with a null + statement object for empty inputs, that approach is cumbersome to + use in higher-level APIs because every prepared statement has to + be checked for null before using it. + */ + public Stmt prepare(byte utf8Sql[], int prepFlags){ + final OutputPointer.sqlite3_stmt out = new OutputPointer.sqlite3_stmt(); + final int rc = CApi.sqlite3_prepare_v3(thisDb(), utf8Sql, prepFlags, out); + checkRc(rc); + final sqlite3_stmt q = out.take(); + if( null==q ){ + /* The C-level API treats input which is devoid of SQL + statements (e.g. all comments or an empty string) as success + but returns a NULL sqlite3_stmt object. In higher-level APIs, + wrapping a "successful NULL" object that way is tedious to + use because it forces clients and/or wrapper-level code to + check for that unusual case. In practice, higher-level + bindings are generally better-served by treating empty SQL + input as an error. */ + throw new IllegalArgumentException("Input contains no SQL statements."); + } + return new Stmt(this, q); + } + + /** + Equivalent to prepare(X, prepFlags), where X is + sql.getBytes(StandardCharsets.UTF_8). + */ + public Stmt prepare(String sql, int prepFlags){ + return prepare( sql.getBytes(StandardCharsets.UTF_8), prepFlags ); + } + + /** + Equivalent to prepare(sql, 0). + */ + public Stmt prepare(String sql){ + return prepare(sql, 0); + } + + + /** + Callback type for use with prepareMulti(). + */ + public interface PrepareMulti { + /** + Gets passed a Stmt which it may handle in arbitrary ways. + Ownership of st is passed to this function. It must throw on + error. + */ + void call(Sqlite.Stmt st); + } + + /** + A PrepareMulti implementation which calls another PrepareMulti + object and then finalizes its statement. + */ + public static class PrepareMultiFinalize implements PrepareMulti { + private final PrepareMulti pm; + /** + Proxies the given PrepareMulti via this object's call() method. + */ + public PrepareMultiFinalize(PrepareMulti proxy){ + this.pm = proxy; + } + /** + Passes st to the call() method of the object this one proxies, + then finalizes st, propagating any exceptions from call() after + finalizing st. + */ + @Override public void call(Stmt st){ + try{ pm.call(st); } + finally{ st.finalizeStmt(); } + } + } + + /** + Equivalent to prepareMulti(sql,0,visitor). + */ + public void prepareMulti(String sql, PrepareMulti visitor){ + prepareMulti( sql, 0, visitor ); + } + + /** + Equivalent to prepareMulti(X,prepFlags,visitor), where X is + sql.getBytes(StandardCharsets.UTF_8). + */ + public void prepareMulti(String sql, int prepFlags, PrepareMulti visitor){ + prepareMulti(sql.getBytes(StandardCharsets.UTF_8), prepFlags, visitor); + } + + /** + A variant of prepare() which can handle multiple SQL statements + in a single input string. For each statement in the given string, + the statement is passed to visitor.call() a single time, passing + ownership of the statement to that function. This function does + not step() or close() statements - those operations are left to + caller or the visitor function. + + Unlike prepare(), this function does not fail if the input + contains only whitespace or SQL comments. In that case it is up + to the caller to arrange for that to be an error (if desired). + + PrepareMultiFinalize offers a proxy which finalizes each + statement after it is passed to another client-defined visitor. + + Be aware that certain legal SQL constructs may fail in the + preparation phase, before the corresponding statement can be + stepped. Most notably, authorizer checks which disallow access to + something in a statement behave that way. + */ + public void prepareMulti(byte sqlUtf8[], int prepFlags, PrepareMulti visitor){ + int pos = 0, n = 1; + byte[] sqlChunk = sqlUtf8; + final org.sqlite.jni.capi.OutputPointer.sqlite3_stmt outStmt = + new org.sqlite.jni.capi.OutputPointer.sqlite3_stmt(); + final org.sqlite.jni.capi.OutputPointer.Int32 oTail = + new org.sqlite.jni.capi.OutputPointer.Int32(); + while( pos < sqlChunk.length ){ + sqlite3_stmt stmt; + if( pos>0 ){ + sqlChunk = java.util.Arrays.copyOfRange(sqlChunk, pos, sqlChunk.length); + } + if( 0==sqlChunk.length ) break; + checkRc( + CApi.sqlite3_prepare_v3(db, sqlChunk, prepFlags, outStmt, oTail) + ); + pos = oTail.value; + stmt = outStmt.take(); + if( null==stmt ){ + /* empty statement, e.g. only comments or whitespace, was parsed. */ + continue; + } + visitor.call(new Stmt(this, stmt)); + } + } + + public void createFunction(String name, int nArg, int eTextRep, ScalarFunction f){ + int rc = CApi.sqlite3_create_function(thisDb(), name, nArg, eTextRep, + new SqlFunction.ScalarAdapter(f)); + if( 0!=rc ) throw new SqliteException(db); + } + + public void createFunction(String name, int nArg, ScalarFunction f){ + this.createFunction(name, nArg, CApi.SQLITE_UTF8, f); + } + + public void createFunction(String name, int nArg, int eTextRep, AggregateFunction f){ + int rc = CApi.sqlite3_create_function(thisDb(), name, nArg, eTextRep, + new SqlFunction.AggregateAdapter(f)); + if( 0!=rc ) throw new SqliteException(db); + } + + public void createFunction(String name, int nArg, AggregateFunction f){ + this.createFunction(name, nArg, CApi.SQLITE_UTF8, f); + } + + public void createFunction(String name, int nArg, int eTextRep, WindowFunction f){ + int rc = CApi.sqlite3_create_function(thisDb(), name, nArg, eTextRep, + new SqlFunction.WindowAdapter(f)); + if( 0!=rc ) throw new SqliteException(db); + } + + public void createFunction(String name, int nArg, WindowFunction f){ + this.createFunction(name, nArg, CApi.SQLITE_UTF8, f); + } + + public long changes(){ + return CApi.sqlite3_changes64(thisDb()); + } + + public long totalChanges(){ + return CApi.sqlite3_total_changes64(thisDb()); + } + + public long lastInsertRowId(){ + return CApi.sqlite3_last_insert_rowid(thisDb()); + } + + public void setLastInsertRowId(long rowId){ + CApi.sqlite3_set_last_insert_rowid(thisDb(), rowId); + } + + public void interrupt(){ + CApi.sqlite3_interrupt(thisDb()); + } + + public boolean isInterrupted(){ + return CApi.sqlite3_is_interrupted(thisDb()); + } + + public boolean isAutoCommit(){ + return CApi.sqlite3_get_autocommit(thisDb()); + } + + /** + Analog to sqlite3_txn_state(). Returns one of TXN_NONE, TXN_READ, + or TXN_WRITE to denote this database's current transaction state + for the given schema name (or the most restrictive state of any + schema if zSchema is null). + */ + public int transactionState(String zSchema){ + return CApi.sqlite3_txn_state(thisDb(), zSchema); + } + + /** + Analog to sqlite3_db_name(). Returns null if passed an unknown + index. + */ + public String dbName(int dbNdx){ + return CApi.sqlite3_db_name(thisDb(), dbNdx); + } + + /** + Analog to sqlite3_db_filename(). Returns null if passed an + unknown db name. + */ + public String dbFileName(String dbName){ + return CApi.sqlite3_db_filename(thisDb(), dbName); + } + + /** + Analog to sqlite3_db_config() for the call forms which take one + of the boolean-type db configuration flags (namely the + DBCONFIG_... constants defined in this class). On success it + returns the result of that underlying call. Throws on error. + */ + public boolean dbConfig(int op, boolean on){ + org.sqlite.jni.capi.OutputPointer.Int32 pOut = + new org.sqlite.jni.capi.OutputPointer.Int32(); + checkRc( CApi.sqlite3_db_config(thisDb(), op, on ? 1 : 0, pOut) ); + return pOut.get()!=0; + } + + /** + Analog to the variant of sqlite3_db_config() for configuring the + SQLITE_DBCONFIG_MAINDBNAME option. Throws on error. + */ + public void setMainDbName(String name){ + checkRc( + CApi.sqlite3_db_config(thisDb(), CApi.SQLITE_DBCONFIG_MAINDBNAME, + name) + ); + } + + /** + Analog to sqlite3_db_readonly() but throws an SqliteException + with result code SQLITE_NOTFOUND if given an unknown database + name. + */ + public boolean readOnly(String dbName){ + final int rc = CApi.sqlite3_db_readonly(thisDb(), dbName); + if( 0==rc ) return false; + else if( rc>0 ) return true; + throw new SqliteException(CApi.SQLITE_NOTFOUND); + } + + /** + Analog to sqlite3_db_release_memory(). + */ + public void releaseMemory(){ + CApi.sqlite3_db_release_memory(thisDb()); + } + + /** + Analog to sqlite3_release_memory(). + */ + public static int libReleaseMemory(int n){ + return CApi.sqlite3_release_memory(n); + } + + /** + Analog to sqlite3_limit(). limitId must be one of the + LIMIT_... constants. + + Returns the old limit for the given option. If newLimit is + negative, it returns the old limit without modifying the limit. + + If sqlite3_limit() returns a negative value, this function throws + an SqliteException with the SQLITE_RANGE result code but no + further error info (because that case does not qualify as a + db-level error). Such errors may indicate an invalid argument + value or an invalid range for newLimit (the underlying function + does not differentiate between those). + */ + public int limit(int limitId, int newLimit){ + final int rc = CApi.sqlite3_limit(thisDb(), limitId, newLimit); + if( rc<0 ){ + throw new SqliteException(CApi.SQLITE_RANGE); + } + return rc; + } + + /** + Analog to sqlite3_errstr(). + */ + static String errstr(int resultCode){ + return CApi.sqlite3_errstr(resultCode); + } + + /** + A wrapper object for use with tableColumnMetadata(). They are + created and populated only via that interface. + */ + public final class TableColumnMetadata { + Boolean pNotNull = null; + Boolean pPrimaryKey = null; + Boolean pAutoinc = null; + String pzCollSeq = null; + String pzDataType = null; + + private TableColumnMetadata(){} + + public String getDataType(){ return pzDataType; } + public String getCollation(){ return pzCollSeq; } + public boolean isNotNull(){ return pNotNull; } + public boolean isPrimaryKey(){ return pPrimaryKey; } + public boolean isAutoincrement(){ return pAutoinc; } + } + + /** + Returns data about a database, table, and (optionally) column + (which may be null), as per sqlite3_table_column_metadata(). + Throws if passed invalid arguments, else returns the result as a + new TableColumnMetadata object. + */ + TableColumnMetadata tableColumnMetadata( + String zDbName, String zTableName, String zColumnName + ){ + org.sqlite.jni.capi.OutputPointer.String pzDataType + = new org.sqlite.jni.capi.OutputPointer.String(); + org.sqlite.jni.capi.OutputPointer.String pzCollSeq + = new org.sqlite.jni.capi.OutputPointer.String(); + org.sqlite.jni.capi.OutputPointer.Bool pNotNull + = new org.sqlite.jni.capi.OutputPointer.Bool(); + org.sqlite.jni.capi.OutputPointer.Bool pPrimaryKey + = new org.sqlite.jni.capi.OutputPointer.Bool(); + org.sqlite.jni.capi.OutputPointer.Bool pAutoinc + = new org.sqlite.jni.capi.OutputPointer.Bool(); + final int rc = CApi.sqlite3_table_column_metadata( + thisDb(), zDbName, zTableName, zColumnName, + pzDataType, pzCollSeq, pNotNull, pPrimaryKey, pAutoinc + ); + checkRc(rc); + TableColumnMetadata rv = new TableColumnMetadata(); + rv.pzDataType = pzDataType.value; + rv.pzCollSeq = pzCollSeq.value; + rv.pNotNull = pNotNull.value; + rv.pPrimaryKey = pPrimaryKey.value; + rv.pAutoinc = pAutoinc.value; + return rv; + } + + public interface TraceCallback { + /** + Called by sqlite3 for various tracing operations, as per + sqlite3_trace_v2(). Note that this interface elides the 2nd + argument to the native trace callback, as that role is better + filled by instance-local state. + +

    These callbacks may throw, in which case their exceptions are + converted to C-level error information. + +

    The 2nd argument to this function, if non-null, will be a an + Sqlite or Sqlite.Stmt object, depending on the first argument + (see below). + +

    The final argument to this function is the "X" argument + documented for sqlite3_trace() and sqlite3_trace_v2(). Its type + depends on value of the first argument: + +

    - SQLITE_TRACE_STMT: pNative is a Sqlite.Stmt. pX is a String + containing the prepared SQL. + +

    - SQLITE_TRACE_PROFILE: pNative is a sqlite3_stmt. pX is a Long + holding an approximate number of nanoseconds the statement took + to run. + +

    - SQLITE_TRACE_ROW: pNative is a sqlite3_stmt. pX is null. + +

    - SQLITE_TRACE_CLOSE: pNative is a sqlite3. pX is null. + */ + void call(int traceFlag, Object pNative, Object pX); + } + + /** + Analog to sqlite3_trace_v2(). traceMask must be a mask of the + TRACE_... constants. Pass a null callback to remove tracing. + + Throws on error. + */ + public void trace(int traceMask, TraceCallback callback){ + final Sqlite self = this; + final org.sqlite.jni.capi.TraceV2Callback tc = + (null==callback) ? null : new org.sqlite.jni.capi.TraceV2Callback(){ + @SuppressWarnings("unchecked") + @Override public int call(int flag, Object pNative, Object pX){ + switch(flag){ + case TRACE_ROW: + case TRACE_PROFILE: + case TRACE_STMT: + callback.call(flag, Sqlite.Stmt.fromNative((sqlite3_stmt)pNative), pX); + break; + case TRACE_CLOSE: + callback.call(flag, self, pX); + break; + } + return 0; + } + }; + checkRc( CApi.sqlite3_trace_v2(thisDb(), traceMask, tc) ); + } + + /** + Corresponds to the sqlite3_stmt class. Use Sqlite.prepare() to + create new instances. + */ + public static final class Stmt implements AutoCloseable { + private Sqlite _db; + private sqlite3_stmt stmt; + + /** Only called by the prepare() factory functions. */ + Stmt(Sqlite db, sqlite3_stmt stmt){ + this._db = db; + this.stmt = stmt; + synchronized(nativeToWrapper){ + nativeToWrapper.put(this.stmt, this); + } + } + + sqlite3_stmt nativeHandle(){ + return stmt; + } + + /** Maps org.sqlite.jni.capi.sqlite3_stmt to Stmt instances. */ + private static final java.util.Map nativeToWrapper + = new java.util.HashMap<>(); + + /** + Returns the Stmt object associated with the given sqlite3_stmt + object, or null if there is no such mapping. + */ + static Stmt fromNative(sqlite3_stmt low){ + synchronized(nativeToWrapper){ + return nativeToWrapper.get(low); + } + } + + /** + If this statement is still opened, its low-level handle is + returned, else an IllegalArgumentException is thrown. + */ + private sqlite3_stmt thisStmt(){ + if( null==stmt || 0==stmt.getNativePointer() ){ + throw new IllegalArgumentException("This Stmt has been finalized."); + } + return stmt; + } + + /** Throws if n is out of range of this statement's result column + count. Intended to be used by the columnXyz() methods. */ + private sqlite3_stmt checkColIndex(int n){ + if(n<0 || n>=columnCount()){ + throw new IllegalArgumentException("Column index "+n+" is out of range."); + } + return thisStmt(); + } + + /** + Corresponds to sqlite3_finalize(), but we cannot override the + name finalize() here because this one requires a different + signature. It does not throw on error here because "destructors + do not throw." If it returns non-0, the object is still + finalized, but the result code is an indication that something + went wrong in a prior call into the statement's API, as + documented for sqlite3_finalize(). + */ + public int finalizeStmt(){ + int rc = 0; + if( null!=stmt ){ + synchronized(nativeToWrapper){ + nativeToWrapper.remove(this.stmt); + } + CApi.sqlite3_finalize(stmt); + stmt = null; + _db = null; + } + return rc; + } + + @Override public void close(){ + finalizeStmt(); + } + + /** + Throws if rc is any value other than 0, SQLITE_ROW, or + SQLITE_DONE, else returns rc. Error state for the exception is + extracted from this statement object (if it's opened) or the + string form of rc. + */ + private int checkRc(int rc){ + switch(rc){ + case 0: + case CApi.SQLITE_ROW: + case CApi.SQLITE_DONE: return rc; + default: + if( null==stmt ) throw new SqliteException(rc); + else throw new SqliteException(this); + } + } + + /** + Works like sqlite3_step() but returns true for SQLITE_ROW, + false for SQLITE_DONE, and throws SqliteException for any other + result. + */ + public boolean step(){ + switch(checkRc(CApi.sqlite3_step(thisStmt()))){ + case CApi.SQLITE_ROW: return true; + case CApi.SQLITE_DONE: return false; + default: + throw new IllegalStateException( + "This \"cannot happen\": all possible result codes were checked already." + ); + } + } + + /** + Works like sqlite3_step(), returning the same result codes as + that function unless throwOnError is true, in which case it + will throw an SqliteException for any result codes other than + Sqlite.ROW or Sqlite.DONE. + + The utility of this overload over the no-argument one is the + ability to handle BUSY and LOCKED errors more easily. + */ + public int step(boolean throwOnError){ + final int rc = (null==stmt) + ? Sqlite.MISUSE + : CApi.sqlite3_step(stmt); + return throwOnError ? checkRc(rc) : rc; + } + + /** + Returns the Sqlite which prepared this statement, or null if + this statement has been finalized. + */ + public Sqlite getDb(){ return this._db; } + + /** + Works like sqlite3_reset() but throws on error. + */ + public void reset(){ + checkRc(CApi.sqlite3_reset(thisStmt())); + } + + public boolean isBusy(){ + return CApi.sqlite3_stmt_busy(thisStmt()); + } + + public boolean isReadOnly(){ + return CApi.sqlite3_stmt_readonly(thisStmt()); + } + + public String sql(){ + return CApi.sqlite3_sql(thisStmt()); + } + + public String expandedSql(){ + return CApi.sqlite3_expanded_sql(thisStmt()); + } + + /** + Analog to sqlite3_stmt_explain() but throws if op is invalid. + */ + public void explain(int op){ + checkRc(CApi.sqlite3_stmt_explain(thisStmt(), op)); + } + + /** + Analog to sqlite3_stmt_isexplain(). + */ + public int isExplain(){ + return CApi.sqlite3_stmt_isexplain(thisStmt()); + } + + /** + Analog to sqlite3_normalized_sql(), but throws + UnsupportedOperationException if the library was built without + the SQLITE_ENABLE_NORMALIZE flag. + */ + public String normalizedSql(){ + Sqlite.checkSupported(hasNormalizeSql, "SQLITE_ENABLE_NORMALIZE"); + return CApi.sqlite3_normalized_sql(thisStmt()); + } + + public void clearBindings(){ + CApi.sqlite3_clear_bindings( thisStmt() ); + } + public void bindInt(int ndx, int val){ + checkRc(CApi.sqlite3_bind_int(thisStmt(), ndx, val)); + } + public void bindInt64(int ndx, long val){ + checkRc(CApi.sqlite3_bind_int64(thisStmt(), ndx, val)); + } + public void bindDouble(int ndx, double val){ + checkRc(CApi.sqlite3_bind_double(thisStmt(), ndx, val)); + } + public void bindObject(int ndx, Object o){ + checkRc(CApi.sqlite3_bind_java_object(thisStmt(), ndx, o)); + } + public void bindNull(int ndx){ + checkRc(CApi.sqlite3_bind_null(thisStmt(), ndx)); + } + public int bindParameterCount(){ + return CApi.sqlite3_bind_parameter_count(thisStmt()); + } + public int bindParameterIndex(String paramName){ + return CApi.sqlite3_bind_parameter_index(thisStmt(), paramName); + } + public String bindParameterName(int ndx){ + return CApi.sqlite3_bind_parameter_name(thisStmt(), ndx); + } + public void bindText(int ndx, byte[] utf8){ + checkRc(CApi.sqlite3_bind_text(thisStmt(), ndx, utf8)); + } + public void bindText(int ndx, String asUtf8){ + checkRc(CApi.sqlite3_bind_text(thisStmt(), ndx, asUtf8)); + } + public void bindText16(int ndx, byte[] utf16){ + checkRc(CApi.sqlite3_bind_text16(thisStmt(), ndx, utf16)); + } + public void bindText16(int ndx, String asUtf16){ + checkRc(CApi.sqlite3_bind_text16(thisStmt(), ndx, asUtf16)); + } + public void bindZeroBlob(int ndx, int n){ + checkRc(CApi.sqlite3_bind_zeroblob(thisStmt(), ndx, n)); + } + public void bindBlob(int ndx, byte[] bytes){ + checkRc(CApi.sqlite3_bind_blob(thisStmt(), ndx, bytes)); + } + + public byte[] columnBlob(int ndx){ + return CApi.sqlite3_column_blob( checkColIndex(ndx), ndx ); + } + public byte[] columnText(int ndx){ + return CApi.sqlite3_column_text( checkColIndex(ndx), ndx ); + } + public String columnText16(int ndx){ + return CApi.sqlite3_column_text16( checkColIndex(ndx), ndx ); + } + public int columnBytes(int ndx){ + return CApi.sqlite3_column_bytes( checkColIndex(ndx), ndx ); + } + public int columnBytes16(int ndx){ + return CApi.sqlite3_column_bytes16( checkColIndex(ndx), ndx ); + } + public int columnInt(int ndx){ + return CApi.sqlite3_column_int( checkColIndex(ndx), ndx ); + } + public long columnInt64(int ndx){ + return CApi.sqlite3_column_int64( checkColIndex(ndx), ndx ); + } + public double columnDouble(int ndx){ + return CApi.sqlite3_column_double( checkColIndex(ndx), ndx ); + } + public int columnType(int ndx){ + return CApi.sqlite3_column_type( checkColIndex(ndx), ndx ); + } + public String columnDeclType(int ndx){ + return CApi.sqlite3_column_decltype( checkColIndex(ndx), ndx ); + } + /** + Analog to sqlite3_column_count() but throws if this statement + has been finalized. + */ + public int columnCount(){ + /* We cannot reliably cache the column count in a class + member because an ALTER TABLE from a separate statement + can invalidate that count and we have no way, short of + installing a COMMIT handler or the like, of knowing when + to re-read it. We cannot install such a handler without + interfering with a client's ability to do so. */ + return CApi.sqlite3_column_count(thisStmt()); + } + public int columnDataCount(){ + return CApi.sqlite3_data_count( thisStmt() ); + } + public Object columnObject(int ndx){ + return CApi.sqlite3_column_java_object( checkColIndex(ndx), ndx ); + } + public T columnObject(int ndx, Class type){ + return CApi.sqlite3_column_java_object( checkColIndex(ndx), ndx, type ); + } + public String columnName(int ndx){ + return CApi.sqlite3_column_name( checkColIndex(ndx), ndx ); + } + public String columnDatabaseName(int ndx){ + return CApi.sqlite3_column_database_name( checkColIndex(ndx), ndx ); + } + public String columnOriginName(int ndx){ + return CApi.sqlite3_column_origin_name( checkColIndex(ndx), ndx ); + } + public String columnTableName(int ndx){ + return CApi.sqlite3_column_table_name( checkColIndex(ndx), ndx ); + } + } /* Stmt class */ + + /** + Interface for auto-extensions, as per the + sqlite3_auto_extension() API. + + Design note: the chicken/egg timing of auto-extension execution + requires that this feature be entirely re-implemented in Java + because the C-level API has no access to the Sqlite type so + cannot pass on an object of that type while the database is being + opened. One side effect of this reimplementation is that this + class's list of auto-extensions is 100% independent of the + C-level list so, e.g., clearAutoExtensions() will have no effect + on auto-extensions added via the C-level API and databases opened + from that level of API will not be passed to this level's + AutoExtension instances. + */ + public interface AutoExtension { + public void call(Sqlite db); + } + + private static final java.util.Set autoExtensions = + new java.util.LinkedHashSet<>(); + + /** + Passes db to all auto-extensions. If any one of them throws, + db.close() is called before the exception is propagated. + */ + private static void runAutoExtensions(Sqlite db){ + AutoExtension list[]; + synchronized(autoExtensions){ + /* Avoid that modifications to the AutoExtension list from within + auto-extensions affect this execution of this list. */ + list = autoExtensions.toArray(new AutoExtension[0]); + } + try { + for( AutoExtension ax : list ) ax.call(db); + }catch(Exception e){ + db.close(); + throw e; + } + } + + /** + Analog to sqlite3_auto_extension(), adds the given object to the + list of auto-extensions if it is not already in that list. The + given object will be run as part of Sqlite.open(), and passed the + being-opened database. If the extension throws then open() will + fail. + + This API does not guaranty whether or not manipulations made to + the auto-extension list from within auto-extension callbacks will + affect the current traversal of the auto-extension list. Whether + or not they do is unspecified and subject to change between + versions. e.g. if an AutoExtension calls addAutoExtension(), + whether or not the new extension will be run on the being-opened + database is undefined. + + Note that calling Sqlite.open() from an auto-extension will + necessarily result in recursion loop and (eventually) a stack + overflow. + */ + public static void addAutoExtension( AutoExtension e ){ + if( null==e ){ + throw new IllegalArgumentException("AutoExtension may not be null."); + } + synchronized(autoExtensions){ + autoExtensions.add(e); + } + } + + /** + Removes the given object from the auto-extension list if it is in + that list, otherwise this has no side-effects beyond briefly + locking that list. + */ + public static void removeAutoExtension( AutoExtension e ){ + synchronized(autoExtensions){ + autoExtensions.remove(e); + } + } + + /** + Removes all auto-extensions which were added via addAutoExtension(). + */ + public static void clearAutoExtensions(){ + synchronized(autoExtensions){ + autoExtensions.clear(); + } + } + + /** + Encapsulates state related to the sqlite3 backup API. Use + Sqlite.initBackup() to create new instances. + */ + public static final class Backup implements AutoCloseable { + private sqlite3_backup b; + private Sqlite dbTo; + private Sqlite dbFrom; + + Backup(Sqlite dbDest, String schemaDest,Sqlite dbSrc, String schemaSrc){ + this.dbTo = dbDest; + this.dbFrom = dbSrc; + b = CApi.sqlite3_backup_init(dbDest.nativeHandle(), schemaDest, + dbSrc.nativeHandle(), schemaSrc); + if(null==b) toss(); + } + + private void toss(){ + int rc = CApi.sqlite3_errcode(dbTo.nativeHandle()); + if(0!=rc) throw new SqliteException(dbTo); + rc = CApi.sqlite3_errcode(dbFrom.nativeHandle()); + if(0!=rc) throw new SqliteException(dbFrom); + throw new SqliteException(CApi.SQLITE_ERROR); + } + + private sqlite3_backup getNative(){ + if( null==b ) throw new IllegalStateException("This Backup is already closed."); + return b; + } + /** + If this backup is still active, this completes the backup and + frees its native resources, otherwise it this is a no-op. + */ + public void finish(){ + if( null!=b ){ + CApi.sqlite3_backup_finish(b); + b = null; + dbTo = null; + dbFrom = null; + } + } + + /** Equivalent to finish(). */ + @Override public void close(){ + this.finish(); + } + + /** + Analog to sqlite3_backup_step(). Returns 0 if stepping succeeds + or, Sqlite.DONE if the end is reached, Sqlite.BUSY if one of + the databases is busy, Sqlite.LOCKED if one of the databases is + locked, and throws for any other result code or if this object + has been closed. Note that BUSY and LOCKED are not necessarily + permanent errors, so do not trigger an exception. + */ + public int step(int pageCount){ + final int rc = CApi.sqlite3_backup_step(getNative(), pageCount); + switch(rc){ + case 0: + case Sqlite.DONE: + case Sqlite.BUSY: + case Sqlite.LOCKED: + return rc; + default: + toss(); + return CApi.SQLITE_ERROR/*not reached*/; + } + } + + /** + Analog to sqlite3_backup_pagecount(). + */ + public int pageCount(){ + return CApi.sqlite3_backup_pagecount(getNative()); + } + + /** + Analog to sqlite3_backup_remaining(). + */ + public int remaining(){ + return CApi.sqlite3_backup_remaining(getNative()); + } + } + + /** + Analog to sqlite3_backup_init(). If schemaSrc is null, "main" is + assumed. Throws if either this db or dbSrc (the source db) are + not opened, if either of schemaDest or schemaSrc are null, or if + the underlying call to sqlite3_backup_init() fails. + + The returned object must eventually be cleaned up by either + arranging for it to be auto-closed (e.g. using + try-with-resources) or by calling its finish() method. + */ + public Backup initBackup(String schemaDest, Sqlite dbSrc, String schemaSrc){ + thisDb(); + dbSrc.thisDb(); + if( null==schemaSrc || null==schemaDest ){ + throw new IllegalArgumentException( + "Neither the source nor destination schema name may be null." + ); + } + return new Backup(this, schemaDest, dbSrc, schemaSrc); + } + + + /** + Callback type for use with createCollation(). + */ + public interface Collation { + /** + Called by the SQLite core to compare inputs. Implementations + must compare its two arguments using memcmp(3) semantics. + + Warning: the SQLite core has no mechanism for reporting errors + from custom collations and its workflow does not accommodate + propagation of exceptions from callbacks. Any exceptions thrown + from collations will be silently suppressed and sorting results + will be unpredictable. + */ + int call(byte[] lhs, byte[] rhs); + } + + /** + Analog to sqlite3_create_collation(). + + Throws if name is null or empty, c is null, or the encoding flag + is invalid. The encoding must be one of the UTF8, UTF16, UTF16LE, + or UTF16BE constants. + */ + public void createCollation(String name, int encoding, Collation c){ + thisDb(); + if( null==name || name.isEmpty()){ + throw new IllegalArgumentException("Collation name may not be null or empty."); + } + if( null==c ){ + throw new IllegalArgumentException("Collation may not be null."); + } + switch(encoding){ + case UTF8: + case UTF16: + case UTF16LE: + case UTF16BE: + break; + default: + throw new IllegalArgumentException("Invalid Collation encoding."); + } + checkRc( + CApi.sqlite3_create_collation( + thisDb(), name, encoding, new org.sqlite.jni.capi.CollationCallback(){ + @Override public int call(byte[] lhs, byte[] rhs){ + try{return c.call(lhs, rhs);} + catch(Exception e){return 0;} + } + @Override public void xDestroy(){} + } + ) + ); + } + + /** + Callback for use with onCollationNeeded(). + */ + public interface CollationNeeded { + /** + Must behave as documented for the callback for + sqlite3_collation_needed(). + + Warning: the C API has no mechanism for reporting or + propagating errors from this callback, so any exceptions it + throws are suppressed. + */ + void call(Sqlite db, int encoding, String collationName); + } + + /** + Sets up the given object to be called by the SQLite core when it + encounters a collation name which it does not know. Pass a null + object to disconnect the object from the core. This replaces any + existing collation-needed loader, or is a no-op if the given + object is already registered. Throws if registering the loader + fails. + */ + public void onCollationNeeded( CollationNeeded cn ){ + org.sqlite.jni.capi.CollationNeededCallback cnc = null; + if( null!=cn ){ + cnc = new org.sqlite.jni.capi.CollationNeededCallback(){ + @Override public void call(sqlite3 db, int encoding, String collationName){ + final Sqlite xdb = Sqlite.fromNative(db); + if(null!=xdb) cn.call(xdb, encoding, collationName); + } + }; + } + checkRc( CApi.sqlite3_collation_needed(thisDb(), cnc) ); + } + + /** + Callback for use with busyHandler(). + */ + public interface BusyHandler { + /** + Must function as documented for the C-level + sqlite3_busy_handler() callback argument, minus the (void*) + argument the C-level function requires. + + If this function throws, it is translated to a database-level + error. + */ + int call(int n); + } + + /** + Analog to sqlite3_busy_timeout(). + */ + public void setBusyTimeout(int ms){ + checkRc(CApi.sqlite3_busy_timeout(thisDb(), ms)); + } + + /** + Analog to sqlite3_busy_handler(). If b is null then any + current handler is cleared. + */ + public void setBusyHandler( BusyHandler b ){ + org.sqlite.jni.capi.BusyHandlerCallback bhc = null; + if( null!=b ){ + /*bhc = new org.sqlite.jni.capi.BusyHandlerCallback(){ + @Override public int call(int n){ + return b.call(n); + } + };*/ + bhc = b::call; + } + checkRc( CApi.sqlite3_busy_handler(thisDb(), bhc) ); + } + + public interface CommitHook { + /** + Must behave as documented for the C-level sqlite3_commit_hook() + callback. If it throws, the exception is translated into + a db-level error. + */ + int call(); + } + + /** + A level of indirection to permit setCommitHook() to have similar + semantics as the C API, returning the previous hook. The caveat + is that if the low-level API is used to install a hook, it will + have a different hook type than Sqlite.CommitHook so + setCommitHook() will return null instead of that object. + */ + private static class CommitHookProxy + implements org.sqlite.jni.capi.CommitHookCallback { + final CommitHook commitHook; + CommitHookProxy(CommitHook ch){ + this.commitHook = ch; + } + @Override public int call(){ + return commitHook.call(); + } + } + + /** + Analog to sqlite3_commit_hook(). Returns the previous hook, if + any (else null). Throws if this db is closed. + + Minor caveat: if a commit hook is set on this object's underlying + db handle using the lower-level SQLite API, this function may + return null when replacing it, despite there being a hook, + because it will have a different callback type. So long as the + handle is only manipulated via the high-level API, this caveat + does not apply. + */ + public CommitHook setCommitHook( CommitHook c ){ + CommitHookProxy chp = null; + if( null!=c ){ + chp = new CommitHookProxy(c); + } + final org.sqlite.jni.capi.CommitHookCallback rv = + CApi.sqlite3_commit_hook(thisDb(), chp); + return (rv instanceof CommitHookProxy) + ? ((CommitHookProxy)rv).commitHook + : null; + } + + + public interface RollbackHook { + /** + Must behave as documented for the C-level sqlite3_rollback_hook() + callback. If it throws, the exception is translated into + a db-level error. + */ + void call(); + } + + /** + A level of indirection to permit setRollbackHook() to have similar + semantics as the C API, returning the previous hook. The caveat + is that if the low-level API is used to install a hook, it will + have a different hook type than Sqlite.RollbackHook so + setRollbackHook() will return null instead of that object. + */ + private static class RollbackHookProxy + implements org.sqlite.jni.capi.RollbackHookCallback { + final RollbackHook rollbackHook; + RollbackHookProxy(RollbackHook ch){ + this.rollbackHook = ch; + } + @Override public void call(){rollbackHook.call();} + } + + /** + Analog to sqlite3_rollback_hook(). Returns the previous hook, if + any (else null). Throws if this db is closed. + + Minor caveat: if a rollback hook is set on this object's underlying + db handle using the lower-level SQLite API, this function may + return null when replacing it, despite there being a hook, + because it will have a different callback type. So long as the + handle is only manipulated via the high-level API, this caveat + does not apply. + */ + public RollbackHook setRollbackHook( RollbackHook c ){ + RollbackHookProxy chp = null; + if( null!=c ){ + chp = new RollbackHookProxy(c); + } + final org.sqlite.jni.capi.RollbackHookCallback rv = + CApi.sqlite3_rollback_hook(thisDb(), chp); + return (rv instanceof RollbackHookProxy) + ? ((RollbackHookProxy)rv).rollbackHook + : null; + } + + public interface UpdateHook { + /** + Must function as described for the C-level sqlite3_update_hook() + callback. + */ + void call(int opId, String dbName, String tableName, long rowId); + } + + /** + A level of indirection to permit setUpdateHook() to have similar + semantics as the C API, returning the previous hook. The caveat + is that if the low-level API is used to install a hook, it will + have a different hook type than Sqlite.UpdateHook so + setUpdateHook() will return null instead of that object. + */ + private static class UpdateHookProxy + implements org.sqlite.jni.capi.UpdateHookCallback { + final UpdateHook updateHook; + UpdateHookProxy(UpdateHook ch){ + this.updateHook = ch; + } + @Override public void call(int opId, String dbName, String tableName, long rowId){ + updateHook.call(opId, dbName, tableName, rowId); + } + } + + /** + Analog to sqlite3_update_hook(). Returns the previous hook, if + any (else null). Throws if this db is closed. + + Minor caveat: if a update hook is set on this object's underlying + db handle using the lower-level SQLite API, this function may + return null when replacing it, despite there being a hook, + because it will have a different callback type. So long as the + handle is only manipulated via the high-level API, this caveat + does not apply. + */ + public UpdateHook setUpdateHook( UpdateHook c ){ + UpdateHookProxy chp = null; + if( null!=c ){ + chp = new UpdateHookProxy(c); + } + final org.sqlite.jni.capi.UpdateHookCallback rv = + CApi.sqlite3_update_hook(thisDb(), chp); + return (rv instanceof UpdateHookProxy) + ? ((UpdateHookProxy)rv).updateHook + : null; + } + + + /** + Callback interface for use with setProgressHandler(). + */ + public interface ProgressHandler { + /** + Must behave as documented for the C-level sqlite3_progress_handler() + callback. If it throws, the exception is translated into + a db-level error. + */ + int call(); + } + + /** + Analog to sqlite3_progress_handler(), sets the current progress + handler or clears it if p is null. + + Note that this API, in contrast to setUpdateHook(), + setRollbackHook(), and setCommitHook(), cannot return the + previous handler. That inconsistency is part of the lower-level C + API. + */ + public void setProgressHandler( int n, ProgressHandler p ){ + org.sqlite.jni.capi.ProgressHandlerCallback phc = null; + if( null!=p ){ + /*phc = new org.sqlite.jni.capi.ProgressHandlerCallback(){ + @Override public int call(){ return p.call(); } + };*/ + phc = p::call; + } + CApi.sqlite3_progress_handler( thisDb(), n, phc ); + } + + + /** + Callback for use with setAuthorizer(). + */ + public interface Authorizer { + /** + Must function as described for the C-level + sqlite3_set_authorizer() callback. If it throws, the error is + converted to a db-level error and the exception is suppressed. + */ + int call(int opId, String s1, String s2, String s3, String s4); + } + + /** + Analog to sqlite3_set_authorizer(), this sets the current + authorizer callback, or clears if it passed null. + */ + public void setAuthorizer( Authorizer a ) { + org.sqlite.jni.capi.AuthorizerCallback ac = null; + if( null!=a ){ + /*ac = new org.sqlite.jni.capi.AuthorizerCallback(){ + @Override public int call(int opId, String s1, String s2, String s3, String s4){ + return a.call(opId, s1, s2, s3, s4); + } + };*/ + ac = a::call; + } + checkRc( CApi.sqlite3_set_authorizer( thisDb(), ac ) ); + } + + /** + Object type for use with blobOpen() + */ + public final class Blob implements AutoCloseable { + private Sqlite db; + private sqlite3_blob b; + Blob(Sqlite db, sqlite3_blob b){ + this.db = db; + this.b = b; + } + + /** + If this blob is still opened, its low-level handle is + returned, else an IllegalArgumentException is thrown. + */ + private sqlite3_blob thisBlob(){ + if( null==b || 0==b.getNativePointer() ){ + throw new IllegalArgumentException("This Blob has been finalized."); + } + return b; + } + + /** + Analog to sqlite3_blob_close(). + */ + @Override public void close(){ + if( null!=b ){ + CApi.sqlite3_blob_close(b); + b = null; + db = null; + } + } + + /** + Throws if the JVM does not have JNI-level support for + ByteBuffer. + */ + private void checkNio(){ + if( !Sqlite.JNI_SUPPORTS_NIO ){ + throw new UnsupportedOperationException( + "This JVM does not support JNI access to ByteBuffer." + ); + } + } + /** + Analog to sqlite3_blob_reopen() but throws on error. + */ + public void reopen(long newRowId){ + db.checkRc( CApi.sqlite3_blob_reopen(thisBlob(), newRowId) ); + } + + /** + Analog to sqlite3_blob_write() but throws on error. + */ + public void write( byte[] bytes, int atOffset ){ + db.checkRc( CApi.sqlite3_blob_write(thisBlob(), bytes, atOffset) ); + } + + /** + Analog to sqlite3_blob_read() but throws on error. + */ + public void read( byte[] dest, int atOffset ){ + db.checkRc( CApi.sqlite3_blob_read(thisBlob(), dest, atOffset) ); + } + + /** + Analog to sqlite3_blob_bytes(). + */ + public int bytes(){ + return CApi.sqlite3_blob_bytes(thisBlob()); + } + } + + /** + Analog to sqlite3_blob_open(). Returns a Blob object for the + given database, table, column, and rowid. The blob is opened for + read-write mode if writeable is true, else it is read-only. + + The returned object must eventually be freed, before this + database is closed, by either arranging for it to be auto-closed + or calling its close() method. + + Throws on error. + */ + public Blob blobOpen(String dbName, String tableName, String columnName, + long iRow, boolean writeable){ + final OutputPointer.sqlite3_blob out = new OutputPointer.sqlite3_blob(); + checkRc( + CApi.sqlite3_blob_open(thisDb(), dbName, tableName, columnName, + iRow, writeable ? 1 : 0, out) + ); + return new Blob(this, out.take()); + } + + /** + Callback for use with libConfigLog(). + */ + public interface ConfigLog { + /** + Must function as described for a C-level callback for + sqlite3_config()'s SQLITE_CONFIG_LOG callback, with the slight + signature change. Any exceptions thrown from this callback are + necessarily suppressed. + */ + void call(int errCode, String msg); + } + + /** + Analog to sqlite3_config() with the SQLITE_CONFIG_LOG option, + this sets or (if log is null) clears the current logger. + */ + public static void libConfigLog(ConfigLog log){ + final org.sqlite.jni.capi.ConfigLogCallback l = + null==log + ? null + /*: new org.sqlite.jni.capi.ConfigLogCallback() { + @Override public void call(int errCode, String msg){ + log.call(errCode, msg); + } + };*/ + : log::call; + checkRcStatic(CApi.sqlite3_config(l)); + } + + /** + Callback for use with libConfigSqlLog(). + */ + public interface ConfigSqlLog { + /** + Must function as described for a C-level callback for + sqlite3_config()'s SQLITE_CONFIG_SQLLOG callback, with the + slight signature change. Any exceptions thrown from this + callback are necessarily suppressed. + */ + void call(Sqlite db, String msg, int msgType); + } + + /** + Analog to sqlite3_config() with the SQLITE_CONFIG_SQLLOG option, + this sets or (if log is null) clears the current logger. + + If SQLite is built without SQLITE_ENABLE_SQLLOG defined then this + will throw an UnsupportedOperationException. + */ + public static void libConfigSqlLog(ConfigSqlLog log){ + Sqlite.checkSupported(hasNormalizeSql, "SQLITE_ENABLE_SQLLOG"); + final org.sqlite.jni.capi.ConfigSqlLogCallback l = + null==log + ? null + : new org.sqlite.jni.capi.ConfigSqlLogCallback() { + @Override public void call(sqlite3 db, String msg, int msgType){ + try{ + log.call(fromNative(db), msg, msgType); + }catch(Exception e){ + /* Suppressed */ + } + } + }; + checkRcStatic(CApi.sqlite3_config(l)); + } + + /** + Analog to the C-level sqlite3_config() with one of the + SQLITE_CONFIG_... constants defined as CONFIG_... in this + class. Throws on error, including passing of an unknown option or + if a specified option is not supported by the underlying build of + the SQLite library. + */ + public static void libConfigOp( int op ){ + checkRcStatic(CApi.sqlite3_config(op)); + } + +} diff --git a/ext/jni/src/org/sqlite/jni/wrapper1/SqliteException.java b/ext/jni/src/org/sqlite/jni/wrapper1/SqliteException.java new file mode 100644 index 0000000000..9b4440f190 --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/wrapper1/SqliteException.java @@ -0,0 +1,85 @@ +/* +** 2023-10-09 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file is part of the wrapper1 interface for sqlite3. +*/ +package org.sqlite.jni.wrapper1; +import org.sqlite.jni.capi.CApi; +import org.sqlite.jni.capi.sqlite3; + +/** + A wrapper for communicating C-level (sqlite3*) instances with + Java. These wrappers do not own their associated pointer, they + simply provide a type-safe way to communicate it between Java + and C via JNI. +*/ +public final class SqliteException extends java.lang.RuntimeException { + private int errCode = CApi.SQLITE_ERROR; + private int xerrCode = CApi.SQLITE_ERROR; + private int errOffset = -1; + private int sysErrno = 0; + + /** + Records the given error string and uses SQLITE_ERROR for both the + error code and extended error code. + */ + public SqliteException(String msg){ + super(msg); + } + + /** + Uses sqlite3_errstr(sqlite3ResultCode) for the error string and + sets both the error code and extended error code to the given + value. This approach includes no database-level information and + systemErrno() will be 0, so is intended only for use with sqlite3 + APIs for which a result code is not an error but which the + higher-level wrapper should treat as one. + */ + public SqliteException(int sqlite3ResultCode){ + super(CApi.sqlite3_errstr(sqlite3ResultCode)); + errCode = xerrCode = sqlite3ResultCode; + } + + /** + Records the current error state of db (which must not be null and + must refer to an opened db object). Note that this does not close + the db. + + Design note: closing the db on error is really only useful during + a failed db-open operation, and the place(s) where that can + happen are inside this library, not client-level code. + */ + SqliteException(sqlite3 db){ + super(CApi.sqlite3_errmsg(db)); + errCode = CApi.sqlite3_errcode(db); + xerrCode = CApi.sqlite3_extended_errcode(db); + errOffset = CApi.sqlite3_error_offset(db); + sysErrno = CApi.sqlite3_system_errno(db); + } + + /** + Records the current error state of db (which must not be null and + must refer to an open database). + */ + public SqliteException(Sqlite db){ + this(db.nativeHandle()); + } + + public SqliteException(Sqlite.Stmt stmt){ + this(stmt.getDb()); + } + + public int errcode(){ return errCode; } + public int extendedErrcode(){ return xerrCode; } + public int errorOffset(){ return errOffset; } + public int systemErrno(){ return sysErrno; } + +} diff --git a/ext/jni/src/org/sqlite/jni/wrapper1/Tester2.java b/ext/jni/src/org/sqlite/jni/wrapper1/Tester2.java new file mode 100644 index 0000000000..528e1f61c6 --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/wrapper1/Tester2.java @@ -0,0 +1,1212 @@ +/* +** 2023-10-09 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains a set of tests for the sqlite3 JNI bindings. +*/ +package org.sqlite.jni.wrapper1; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import org.sqlite.jni.capi.CApi; + +/** + An annotation for Tester2 tests which we do not want to run in + reflection-driven test mode because either they are not suitable + for multi-threaded threaded mode or we have to control their execution + order. +*/ +@java.lang.annotation.Retention(java.lang.annotation.RetentionPolicy.RUNTIME) +@java.lang.annotation.Target({java.lang.annotation.ElementType.METHOD}) +@interface ManualTest{} +/** + Annotation for Tester2 tests which mark those which must be skipped + in multi-threaded mode. +*/ +@java.lang.annotation.Retention(java.lang.annotation.RetentionPolicy.RUNTIME) +@java.lang.annotation.Target({java.lang.annotation.ElementType.METHOD}) +@interface SingleThreadOnly{} + +public class Tester2 implements Runnable { + //! True when running in multi-threaded mode. + private static boolean mtMode = false; + //! True to sleep briefly between tests. + private static boolean takeNaps = false; + //! True to shuffle the order of the tests. + private static boolean shuffle = false; + //! True to dump the list of to-run tests to stdout. + private static int listRunTests = 0; + //! True to squelch all out() and outln() output. + private static boolean quietMode = false; + //! Total number of runTests() calls. + private static int nTestRuns = 0; + //! List of test*() methods to run. + private static List testMethods = null; + //! List of exceptions collected by run() + private static final List listErrors = new ArrayList<>(); + private static final class Metrics { + //! Number of times createNewDb() (or equivalent) is invoked. + volatile int dbOpen = 0; + } + + //! Instance ID. + private final Integer tId; + + Tester2(Integer id){ + tId = id; + } + + static final Metrics metrics = new Metrics(); + + public static synchronized void outln(){ + if( !quietMode ){ + System.out.println(); + } + } + + public static synchronized void outPrefix(){ + if( !quietMode ){ + System.out.print(Thread.currentThread().getName()+": "); + } + } + + public static synchronized void outln(Object val){ + if( !quietMode ){ + outPrefix(); + System.out.println(val); + } + } + + public static synchronized void out(Object val){ + if( !quietMode ){ + System.out.print(val); + } + } + + @SuppressWarnings("unchecked") + public static synchronized void out(Object... vals){ + if( !quietMode ){ + outPrefix(); + for(Object v : vals) out(v); + } + } + + @SuppressWarnings("unchecked") + public static synchronized void outln(Object... vals){ + if( !quietMode ){ + out(vals); out("\n"); + } + } + + static volatile int affirmCount = 0; + public static synchronized int affirm(Boolean v, String comment){ + ++affirmCount; + if( false ) assert( v /* prefer assert over exception if it's enabled because + the JNI layer sometimes has to suppress exceptions, + so they might be squelched on their way back to the + top. */); + if( !v ) throw new RuntimeException(comment); + return affirmCount; + } + + public static void affirm(Boolean v){ + affirm(v, "Affirmation failed."); + } + + + public static void execSql(Sqlite db, String sql[]){ + execSql(db, String.join("", sql)); + } + + /** + Executes all SQL statements in the given string. If throwOnError + is true then it will throw for any prepare/step errors, else it + will return the corresponding non-0 result code. + */ + public static int execSql(Sqlite dbw, boolean throwOnError, String sql){ + final ValueHolder rv = new ValueHolder<>(0); + final Sqlite.PrepareMulti pm = new Sqlite.PrepareMulti(){ + @Override public void call(Sqlite.Stmt stmt){ + try{ + while( Sqlite.ROW == (rv.value = stmt.step(throwOnError)) ){} + } + finally{ stmt.finalizeStmt(); } + } + }; + try { + dbw.prepareMulti(sql, pm); + }catch(SqliteException se){ + if( throwOnError ){ + throw se; + }else{ + /* This error (likely) happened in the prepare() phase and we + need to preempt it. */ + rv.value = se.errcode(); + } + } + return (rv.value==Sqlite.DONE) ? 0 : rv.value; + } + + static void execSql(Sqlite db, String sql){ + execSql(db, true, sql); + } + + @SingleThreadOnly /* because it's thread-agnostic */ + private void test1(){ + affirm(Sqlite.libVersionNumber() == CApi.SQLITE_VERSION_NUMBER); + } + + private void nap() throws InterruptedException { + if( takeNaps ){ + Thread.sleep(java.util.concurrent.ThreadLocalRandom.current().nextInt(3, 17), 0); + } + } + + Sqlite openDb(String name){ + final Sqlite db = Sqlite.open(name, Sqlite.OPEN_READWRITE| + Sqlite.OPEN_CREATE| + Sqlite.OPEN_EXRESCODE); + ++metrics.dbOpen; + return db; + } + + Sqlite openDb(){ return openDb(":memory:"); } + + void testOpenDb1(){ + Sqlite db = openDb(); + affirm( 0!=db.nativeHandle().getNativePointer() ); + affirm( "main".equals( db.dbName(0) ) ); + db.setMainDbName("foo"); + affirm( "foo".equals( db.dbName(0) ) ); + affirm( db.dbConfig(Sqlite.DBCONFIG_DEFENSIVE, true) + /* The underlying function has different mangled names in jdk8 + vs jdk19, and this call is here to ensure that the build + fails if it cannot find both names. */ ); + affirm( !db.dbConfig(Sqlite.DBCONFIG_DEFENSIVE, false) ); + SqliteException ex = null; + try{ db.dbConfig(0, false); } + catch(SqliteException e){ ex = e; } + affirm( null!=ex ); + ex = null; + db.close(); + affirm( null==db.nativeHandle() ); + + try{ db = openDb("/no/such/dir/.../probably"); } + catch(SqliteException e){ ex = e; } + affirm( ex!=null ); + affirm( ex.errcode() != 0 ); + affirm( ex.extendedErrcode() != 0 ); + affirm( ex.errorOffset() < 0 ); + // there's no reliable way to predict what ex.systemErrno() might be + } + + void testPrepare1(){ + try (Sqlite db = openDb()) { + Sqlite.Stmt stmt = db.prepare("SELECT ?1"); + Exception e = null; + affirm( null!=stmt.nativeHandle() ); + affirm( db == stmt.getDb() ); + affirm( 1==stmt.bindParameterCount() ); + affirm( "?1".equals(stmt.bindParameterName(1)) ); + affirm( null==stmt.bindParameterName(2) ); + stmt.bindInt64(1, 1); + stmt.bindDouble(1, 1.1); + stmt.bindObject(1, db); + stmt.bindNull(1); + stmt.bindText(1, new byte[] {32,32,32}); + stmt.bindText(1, "123"); + stmt.bindText16(1, "123".getBytes(StandardCharsets.UTF_16)); + stmt.bindText16(1, "123"); + stmt.bindZeroBlob(1, 8); + stmt.bindBlob(1, new byte[] {1,2,3,4}); + stmt.bindInt(1, 17); + try{ stmt.bindInt(2,1); } + catch(Exception ex){ e = ex; } + affirm( null!=e ); + e = null; + affirm( stmt.step() ); + try{ stmt.columnInt(1); } + catch(Exception ex){ e = ex; } + affirm( null!=e ); + e = null; + affirm( 17 == stmt.columnInt(0) ); + affirm( 17L == stmt.columnInt64(0) ); + affirm( 17.0 == stmt.columnDouble(0) ); + affirm( "17".equals(stmt.columnText16(0)) ); + affirm( !stmt.step() ); + stmt.reset(); + affirm( Sqlite.ROW==stmt.step(false) ); + affirm( !stmt.step() ); + affirm( 0 == stmt.finalizeStmt() ); + affirm( null==stmt.nativeHandle() ); + + stmt = db.prepare("SELECT ?"); + stmt.bindObject(1, db); + affirm( Sqlite.ROW == stmt.step(false) ); + affirm( db==stmt.columnObject(0) ); + affirm( db==stmt.columnObject(0, Sqlite.class ) ); + affirm( null==stmt.columnObject(0, Sqlite.Stmt.class ) ); + affirm( 0==stmt.finalizeStmt() ) + /* getting a non-0 out of sqlite3_finalize() is tricky */; + affirm( null==stmt.nativeHandle() ); + } + } + + void testUdfScalar(){ + final ValueHolder xDestroyCalled = new ValueHolder<>(0); + try (Sqlite db = openDb()) { + execSql(db, "create table t(a); insert into t(a) values(1),(2),(3)"); + final ValueHolder vh = new ValueHolder<>(0); + final ScalarFunction f = new ScalarFunction(){ + public void xFunc(SqlFunction.Arguments args){ + affirm( db == args.getDb() ); + for( SqlFunction.Arguments.Arg arg : args ){ + vh.value += arg.getInt(); + } + args.resultInt(vh.value); + } + public void xDestroy(){ + ++xDestroyCalled.value; + } + }; + db.createFunction("myfunc", -1, f); + Sqlite.Stmt q = db.prepare("select myfunc(1,2,3)"); + affirm( q.step() ); + affirm( 6 == vh.value ); + affirm( 6 == q.columnInt(0) ); + q.finalizeStmt(); + affirm( 0 == xDestroyCalled.value ); + vh.value = 0; + q = db.prepare("select myfunc(-1,-2,-3)"); + affirm( q.step() ); + affirm( -6 == vh.value ); + affirm( -6 == q.columnInt(0) ); + affirm( 0 == xDestroyCalled.value ); + q.finalizeStmt(); + } + affirm( 1 == xDestroyCalled.value ); + } + + void testUdfAggregate(){ + final ValueHolder xDestroyCalled = new ValueHolder<>(0); + Sqlite.Stmt q = null; + try (Sqlite db = openDb()) { + execSql(db, "create table t(a); insert into t(a) values(1),(2),(3)"); + final AggregateFunction f = new AggregateFunction(){ + public void xStep(SqlFunction.Arguments args){ + final ValueHolder agg = this.getAggregateState(args, 0); + for( SqlFunction.Arguments.Arg arg : args ){ + agg.value += arg.getInt(); + } + } + public void xFinal(SqlFunction.Arguments args){ + final Integer v = this.takeAggregateState(args); + if( null==v ) args.resultNull(); + else args.resultInt(v); + } + public void xDestroy(){ + ++xDestroyCalled.value; + } + }; + db.createFunction("summer", 1, f); + q = db.prepare( + "with cte(v) as ("+ + "select 3 union all select 5 union all select 7"+ + ") select summer(v), summer(v+1) from cte" + /* ------------------^^^^^^^^^^^ ensures that we're handling + sqlite3_aggregate_context() properly. */ + ); + affirm( q.step() ); + affirm( 15==q.columnInt(0) ); + q.finalizeStmt(); + q = null; + affirm( 0 == xDestroyCalled.value ); + db.createFunction("summerN", -1, f); + + q = db.prepare("select summerN(1,8,9), summerN(2,3,4)"); + affirm( q.step() ); + affirm( 18==q.columnInt(0) ); + affirm( 9==q.columnInt(1) ); + q.finalizeStmt(); + q = null; + + }/*db*/ + finally{ + if( null!=q ) q.finalizeStmt(); + } + affirm( 2 == xDestroyCalled.value + /* because we've bound the same instance twice */ ); + } + + private void testUdfWindow(){ + final Sqlite db = openDb(); + /* Example window function, table, and results taken from: + https://sqlite.org/windowfunctions.html#udfwinfunc */ + final WindowFunction func = new WindowFunction(){ + //! Impl of xStep() and xInverse() + private void xStepInverse(SqlFunction.Arguments args, int v){ + this.getAggregateState(args,0).value += v; + } + @Override public void xStep(SqlFunction.Arguments args){ + this.xStepInverse(args, args.getInt(0)); + } + @Override public void xInverse(SqlFunction.Arguments args){ + this.xStepInverse(args, -args.getInt(0)); + } + //! Impl of xFinal() and xValue() + private void xFinalValue(SqlFunction.Arguments args, Integer v){ + if(null == v) args.resultNull(); + else args.resultInt(v); + } + @Override public void xFinal(SqlFunction.Arguments args){ + xFinalValue(args, this.takeAggregateState(args)); + affirm( null == this.getAggregateState(args,null).value ); + } + @Override public void xValue(SqlFunction.Arguments args){ + xFinalValue(args, this.getAggregateState(args,null).value); + } + }; + db.createFunction("winsumint", 1, func); + execSql(db, new String[] { + "CREATE TEMP TABLE twin(x, y); INSERT INTO twin VALUES", + "('a', 4),('b', 5),('c', 3),('d', 8),('e', 1)" + }); + final Sqlite.Stmt stmt = db.prepare( + "SELECT x, winsumint(y) OVER ("+ + "ORDER BY x ROWS BETWEEN 1 PRECEDING AND 1 FOLLOWING"+ + ") AS sum_y "+ + "FROM twin ORDER BY x;" + ); + int n = 0; + while( stmt.step() ){ + final String s = stmt.columnText16(0); + final int i = stmt.columnInt(1); + switch(++n){ + case 1: affirm( "a".equals(s) && 9==i ); break; + case 2: affirm( "b".equals(s) && 12==i ); break; + case 3: affirm( "c".equals(s) && 16==i ); break; + case 4: affirm( "d".equals(s) && 12==i ); break; + case 5: affirm( "e".equals(s) && 9==i ); break; + default: affirm( false /* cannot happen */ ); + } + } + stmt.close(); + affirm( 5 == n ); + db.close(); + } + + private void testKeyword(){ + final int n = Sqlite.keywordCount(); + affirm( n>0 ); + affirm( !Sqlite.keywordCheck("_nope_") ); + affirm( Sqlite.keywordCheck("seLect") ); + affirm( null!=Sqlite.keywordName(0) ); + affirm( null!=Sqlite.keywordName(n-1) ); + affirm( null==Sqlite.keywordName(n) ); + } + + + private void testExplain(){ + final Sqlite db = openDb(); + Sqlite.Stmt q = db.prepare("SELECT 1"); + affirm( 0 == q.isExplain() ); + q.explain(0); + affirm( 0 == q.isExplain() ); + q.explain(1); + affirm( 1 == q.isExplain() ); + q.explain(2); + affirm( 2 == q.isExplain() ); + Exception ex = null; + try{ + q.explain(-1); + }catch(Exception e){ + ex = e; + } + affirm( ex instanceof SqliteException ); + q.finalizeStmt(); + db.close(); + } + + + private void testTrace(){ + final Sqlite db = openDb(); + final ValueHolder counter = new ValueHolder<>(0); + /* Ensure that characters outside of the UTF BMP survive the trip + from Java to sqlite3 and back to Java. (At no small efficiency + penalty.) */ + final String nonBmpChar = "😃"; + db.trace( + Sqlite.TRACE_ALL, + new Sqlite.TraceCallback(){ + @Override public void call(int traceFlag, Object pNative, Object x){ + ++counter.value; + //outln("TRACE "+traceFlag+" pNative = "+pNative.getClass().getName()); + switch(traceFlag){ + case Sqlite.TRACE_STMT: + affirm(pNative instanceof Sqlite.Stmt); + //outln("TRACE_STMT sql = "+x); + affirm(x instanceof String); + affirm( ((String)x).indexOf(nonBmpChar) > 0 ); + break; + case Sqlite.TRACE_PROFILE: + affirm(pNative instanceof Sqlite.Stmt); + affirm(x instanceof Long); + //outln("TRACE_PROFILE time = "+x); + break; + case Sqlite.TRACE_ROW: + affirm(pNative instanceof Sqlite.Stmt); + affirm(null == x); + //outln("TRACE_ROW = "+sqlite3_column_text16((sqlite3_stmt)pNative, 0)); + break; + case Sqlite.TRACE_CLOSE: + affirm(pNative instanceof Sqlite); + affirm(null == x); + break; + default: + affirm(false /*cannot happen*/); + break; + } + } + }); + execSql(db, "SELECT coalesce(null,null,'"+nonBmpChar+"'); "+ + "SELECT 'w"+nonBmpChar+"orld'"); + affirm( 6 == counter.value ); + db.close(); + affirm( 7 == counter.value ); + } + + private void testStatus(){ + final Sqlite db = openDb(); + execSql(db, "create table t(a); insert into t values(1),(2),(3)"); + + Sqlite.Status s = Sqlite.libStatus(Sqlite.STATUS_MEMORY_USED, false); + affirm( s.current > 0 ); + affirm( s.peak >= s.current ); + + s = db.status(Sqlite.DBSTATUS_SCHEMA_USED, false); + affirm( s.current > 0 ); + affirm( s.peak == 0 /* always 0 for SCHEMA_USED */ ); + + db.close(); + } + + @SingleThreadOnly /* because multiple threads legitimately make these + results unpredictable */ + private synchronized void testAutoExtension(){ + final ValueHolder val = new ValueHolder<>(0); + final ValueHolder toss = new ValueHolder<>(null); + final Sqlite.AutoExtension ax = new Sqlite.AutoExtension(){ + @Override public void call(Sqlite db){ + ++val.value; + if( null!=toss.value ){ + throw new RuntimeException(toss.value); + } + } + }; + Sqlite.addAutoExtension(ax); + openDb().close(); + affirm( 1==val.value ); + openDb().close(); + affirm( 2==val.value ); + Sqlite.clearAutoExtensions(); + openDb().close(); + affirm( 2==val.value ); + + Sqlite.addAutoExtension( ax ); + Sqlite.addAutoExtension( ax ); // Must not add a second entry + Sqlite.addAutoExtension( ax ); // or a third one + openDb().close(); + affirm( 3==val.value ); + + Sqlite db = openDb(); + affirm( 4==val.value ); + execSql(db, "ATTACH ':memory:' as foo"); + affirm( 4==val.value, "ATTACH uses the same connection, not sub-connections." ); + db.close(); + db = null; + + Sqlite.removeAutoExtension(ax); + openDb().close(); + affirm( 4==val.value ); + Sqlite.addAutoExtension(ax); + Exception err = null; + toss.value = "Throwing from auto_extension."; + try{ + openDb(); + }catch(Exception e){ + err = e; + } + affirm( err!=null ); + affirm( err.getMessage().contains(toss.value) ); + toss.value = null; + + val.value = 0; + final Sqlite.AutoExtension ax2 = new Sqlite.AutoExtension(){ + @Override public void call(Sqlite db){ + ++val.value; + } + }; + Sqlite.addAutoExtension(ax2); + openDb().close(); + affirm( 2 == val.value ); + Sqlite.removeAutoExtension(ax); + openDb().close(); + affirm( 3 == val.value ); + Sqlite.addAutoExtension(ax); + openDb().close(); + affirm( 5 == val.value ); + Sqlite.removeAutoExtension(ax2); + openDb().close(); + affirm( 6 == val.value ); + Sqlite.addAutoExtension(ax2); + openDb().close(); + affirm( 8 == val.value ); + + Sqlite.clearAutoExtensions(); + openDb().close(); + affirm( 8 == val.value ); + } + + private void testBackup(){ + final Sqlite dbDest = openDb(); + + try (Sqlite dbSrc = openDb()) { + execSql(dbSrc, new String[]{ + "pragma page_size=512; VACUUM;", + "create table t(a);", + "insert into t(a) values(1),(2),(3);" + }); + Exception e = null; + try { + dbSrc.initBackup("main",dbSrc,"main"); + }catch(Exception x){ + e = x; + } + affirm( e instanceof SqliteException ); + e = null; + try (Sqlite.Backup b = dbDest.initBackup("main",dbSrc,"main")) { + affirm( null!=b ); + int rc; + while( Sqlite.DONE!=(rc = b.step(1)) ){ + affirm( 0==rc ); + } + affirm( b.pageCount() > 0 ); + b.finish(); + } + } + + try (Sqlite.Stmt q = dbDest.prepare("SELECT sum(a) from t")) { + q.step(); + affirm( q.columnInt(0) == 6 ); + } + dbDest.close(); + } + + private void testCollation(){ + final Sqlite db = openDb(); + execSql(db, "CREATE TABLE t(a); INSERT INTO t(a) VALUES('a'),('b'),('c')"); + final Sqlite.Collation myCollation = new Sqlite.Collation() { + private final String myState = + "this is local state. There is much like it, but this is mine."; + @Override + // Reverse-sorts its inputs... + public int call(byte[] lhs, byte[] rhs){ + int len = lhs.length > rhs.length ? rhs.length : lhs.length; + int c = 0, i = 0; + for(i = 0; i < len; ++i){ + c = lhs[i] - rhs[i]; + if(0 != c) break; + } + if(0==c){ + if(i < lhs.length) c = 1; + else if(i < rhs.length) c = -1; + } + return -c; + } + }; + final Sqlite.CollationNeeded collLoader = new Sqlite.CollationNeeded(){ + @Override + public void call(Sqlite dbArg, int eTextRep, String collationName){ + affirm(dbArg == db); + db.createCollation("reversi", eTextRep, myCollation); + } + }; + db.onCollationNeeded(collLoader); + Sqlite.Stmt stmt = db.prepare("SELECT a FROM t ORDER BY a COLLATE reversi"); + int counter = 0; + while( stmt.step() ){ + final String val = stmt.columnText16(0); + ++counter; + switch(counter){ + case 1: affirm("c".equals(val)); break; + case 2: affirm("b".equals(val)); break; + case 3: affirm("a".equals(val)); break; + } + } + affirm(3 == counter); + stmt.finalizeStmt(); + stmt = db.prepare("SELECT a FROM t ORDER BY a"); + counter = 0; + while( stmt.step() ){ + final String val = stmt.columnText16(0); + ++counter; + //outln("Non-REVERSI'd row#"+counter+": "+val); + switch(counter){ + case 3: affirm("c".equals(val)); break; + case 2: affirm("b".equals(val)); break; + case 1: affirm("a".equals(val)); break; + } + } + affirm(3 == counter); + stmt.finalizeStmt(); + db.onCollationNeeded(null); + db.close(); + } + + @SingleThreadOnly /* because threads inherently break this test */ + private void testBusy(){ + final String dbName = "_busy-handler.db"; + try{ + Sqlite db1 = openDb(dbName); + ++metrics.dbOpen; + execSql(db1, "CREATE TABLE IF NOT EXISTS t(a)"); + Sqlite db2 = openDb(dbName); + ++metrics.dbOpen; + + final ValueHolder xBusyCalled = new ValueHolder<>(0); + Sqlite.BusyHandler handler = new Sqlite.BusyHandler(){ + @Override public int call(int n){ + return n > 2 ? 0 : ++xBusyCalled.value; + } + }; + db2.setBusyHandler(handler); + + // Force a locked condition... + execSql(db1, "BEGIN EXCLUSIVE"); + int rc = 0; + SqliteException ex = null; + try{ + db2.prepare("SELECT * from t"); + }catch(SqliteException x){ + ex = x; + } + affirm( null!=ex ); + affirm( Sqlite.BUSY == ex.errcode() ); + affirm( 3 == xBusyCalled.value ); + db1.close(); + db2.close(); + }finally{ + try{(new java.io.File(dbName)).delete();} + catch(Exception e){/* ignore */} + } + } + + private void testCommitHook(){ + final Sqlite db = openDb(); + final ValueHolder counter = new ValueHolder<>(0); + final ValueHolder hookResult = new ValueHolder<>(0); + final Sqlite.CommitHook theHook = new Sqlite.CommitHook(){ + @Override public int call(){ + ++counter.value; + return hookResult.value; + } + }; + Sqlite.CommitHook oldHook = db.setCommitHook(theHook); + affirm( null == oldHook ); + execSql(db, "CREATE TABLE t(a); INSERT INTO t(a) VALUES('a'),('b'),('c')"); + affirm( 2 == counter.value ); + execSql(db, "BEGIN; SELECT 1; SELECT 2; COMMIT;"); + affirm( 2 == counter.value /* NOT invoked if no changes are made */ ); + execSql(db, "BEGIN; update t set a='d' where a='c'; COMMIT;"); + affirm( 3 == counter.value ); + oldHook = db.setCommitHook(theHook); + affirm( theHook == oldHook ); + execSql(db, "BEGIN; update t set a='e' where a='d'; COMMIT;"); + affirm( 4 == counter.value ); + oldHook = db.setCommitHook(null); + affirm( theHook == oldHook ); + execSql(db, "BEGIN; update t set a='f' where a='e'; COMMIT;"); + affirm( 4 == counter.value ); + oldHook = db.setCommitHook(null); + affirm( null == oldHook ); + execSql(db, "BEGIN; update t set a='g' where a='f'; COMMIT;"); + affirm( 4 == counter.value ); + + final Sqlite.CommitHook newHook = new Sqlite.CommitHook(){ + @Override public int call(){return 0;} + }; + oldHook = db.setCommitHook(newHook); + affirm( null == oldHook ); + execSql(db, "BEGIN; update t set a='h' where a='g'; COMMIT;"); + affirm( 4 == counter.value ); + oldHook = db.setCommitHook(theHook); + affirm( newHook == oldHook ); + execSql(db, "BEGIN; update t set a='i' where a='h'; COMMIT;"); + affirm( 5 == counter.value ); + hookResult.value = Sqlite.ERROR; + int rc = execSql(db, false, "BEGIN; update t set a='j' where a='i'; COMMIT;"); + affirm( Sqlite.CONSTRAINT_COMMITHOOK == rc ); + affirm( 6 == counter.value ); + db.close(); + } + + private void testRollbackHook(){ + final Sqlite db = openDb(); + final ValueHolder counter = new ValueHolder<>(0); + final Sqlite.RollbackHook theHook = new Sqlite.RollbackHook(){ + @Override public void call(){ + ++counter.value; + } + }; + Sqlite.RollbackHook oldHook = db.setRollbackHook(theHook); + affirm( null == oldHook ); + execSql(db, "CREATE TABLE t(a); INSERT INTO t(a) VALUES('a'),('b'),('c')"); + affirm( 0 == counter.value ); + execSql(db, false, "BEGIN; SELECT 1; SELECT 2; ROLLBACK;"); + affirm( 1 == counter.value /* contra to commit hook, is invoked if no changes are made */ ); + + final Sqlite.RollbackHook newHook = new Sqlite.RollbackHook(){ + @Override public void call(){} + }; + oldHook = db.setRollbackHook(newHook); + affirm( theHook == oldHook ); + execSql(db, false, "BEGIN; SELECT 1; ROLLBACK;"); + affirm( 1 == counter.value ); + oldHook = db.setRollbackHook(theHook); + affirm( newHook == oldHook ); + execSql(db, false, "BEGIN; SELECT 1; ROLLBACK;"); + affirm( 2 == counter.value ); + int rc = execSql(db, false, "BEGIN; SELECT 1; ROLLBACK;"); + affirm( 0 == rc ); + affirm( 3 == counter.value ); + db.close(); + } + + private void testUpdateHook(){ + final Sqlite db = openDb(); + final ValueHolder counter = new ValueHolder<>(0); + final ValueHolder expectedOp = new ValueHolder<>(0); + final Sqlite.UpdateHook theHook = new Sqlite.UpdateHook(){ + @Override + public void call(int opId, String dbName, String tableName, long rowId){ + ++counter.value; + if( 0!=expectedOp.value ){ + affirm( expectedOp.value == opId ); + } + } + }; + Sqlite.UpdateHook oldHook = db.setUpdateHook(theHook); + affirm( null == oldHook ); + expectedOp.value = Sqlite.INSERT; + execSql(db, "CREATE TABLE t(a); INSERT INTO t(a) VALUES('a'),('b'),('c')"); + affirm( 3 == counter.value ); + expectedOp.value = Sqlite.UPDATE; + execSql(db, "update t set a='d' where a='c';"); + affirm( 4 == counter.value ); + oldHook = db.setUpdateHook(theHook); + affirm( theHook == oldHook ); + expectedOp.value = Sqlite.DELETE; + execSql(db, "DELETE FROM t where a='d'"); + affirm( 5 == counter.value ); + oldHook = db.setUpdateHook(null); + affirm( theHook == oldHook ); + execSql(db, "update t set a='e' where a='b';"); + affirm( 5 == counter.value ); + oldHook = db.setUpdateHook(null); + affirm( null == oldHook ); + + final Sqlite.UpdateHook newHook = new Sqlite.UpdateHook(){ + @Override public void call(int opId, String dbName, String tableName, long rowId){ + } + }; + oldHook = db.setUpdateHook(newHook); + affirm( null == oldHook ); + execSql(db, "update t set a='h' where a='a'"); + affirm( 5 == counter.value ); + oldHook = db.setUpdateHook(theHook); + affirm( newHook == oldHook ); + expectedOp.value = Sqlite.UPDATE; + execSql(db, "update t set a='i' where a='h'"); + affirm( 6 == counter.value ); + db.close(); + } + + private void testProgress(){ + final Sqlite db = openDb(); + final ValueHolder counter = new ValueHolder<>(0); + db.setProgressHandler(1, new Sqlite.ProgressHandler(){ + @Override public int call(){ + ++counter.value; + return 0; + } + }); + execSql(db, "SELECT 1; SELECT 2;"); + affirm( counter.value > 0 ); + int nOld = counter.value; + db.setProgressHandler(0, null); + execSql(db, "SELECT 1; SELECT 2;"); + affirm( nOld == counter.value ); + db.close(); + } + + private void testAuthorizer(){ + final Sqlite db = openDb(); + final ValueHolder counter = new ValueHolder<>(0); + final ValueHolder authRc = new ValueHolder<>(0); + final Sqlite.Authorizer auth = new Sqlite.Authorizer(){ + public int call(int op, String s0, String s1, String s2, String s3){ + ++counter.value; + //outln("xAuth(): "+s0+" "+s1+" "+s2+" "+s3); + return authRc.value; + } + }; + execSql(db, "CREATE TABLE t(a); INSERT INTO t(a) VALUES('a'),('b'),('c')"); + db.setAuthorizer(auth); + execSql(db, "UPDATE t SET a=1"); + affirm( 1 == counter.value ); + authRc.value = Sqlite.DENY; + int rc = execSql(db, false, "UPDATE t SET a=2"); + affirm( Sqlite.AUTH==rc ); + db.setAuthorizer(null); + rc = execSql(db, false, "UPDATE t SET a=2"); + affirm( 0==rc ); + db.close(); + } + + private void testBlobOpen(){ + final Sqlite db = openDb(); + + execSql(db, "CREATE TABLE T(a BLOB);" + +"INSERT INTO t(rowid,a) VALUES(1, 'def'),(2, 'XYZ');" + ); + Sqlite.Blob b = db.blobOpen("main", "t", "a", + db.lastInsertRowId(), true); + affirm( 3==b.bytes() ); + b.write(new byte[] {100, 101, 102 /*"DEF"*/}, 0); + b.close(); + Sqlite.Stmt stmt = db.prepare("SELECT length(a), a FROM t ORDER BY a"); + affirm( stmt.step() ); + affirm( 3 == stmt.columnInt(0) ); + affirm( "def".equals(stmt.columnText16(1)) ); + stmt.finalizeStmt(); + + b = db.blobOpen("main", "t", "a", db.lastInsertRowId(), false); + final byte[] tgt = new byte[3]; + b.read( tgt, 0 ); + affirm( 100==tgt[0] && 101==tgt[1] && 102==tgt[2], "DEF" ); + execSql(db,"UPDATE t SET a=zeroblob(10) WHERE rowid=2"); + b.close(); + b = db.blobOpen("main", "t", "a", db.lastInsertRowId(), true); + byte[] bw = new byte[]{ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9 + }; + b.write(bw, 0); + byte[] br = new byte[10]; + b.read(br, 0); + for( int i = 0; i < br.length; ++i ){ + affirm(bw[i] == br[i]); + } + b.close(); + db.close(); + } + + void testPrepareMulti(){ + final ValueHolder fCount = new ValueHolder<>(0); + final ValueHolder mCount = new ValueHolder<>(0); + try (Sqlite db = openDb()) { + execSql(db, "create table t(a); insert into t(a) values(1),(2),(3)"); + db.createFunction("counter", -1, new ScalarFunction(){ + @Override public void xFunc(SqlFunction.Arguments args){ + ++fCount.value; + args.resultNull(); + } + } + ); + final Sqlite.PrepareMulti pm = new Sqlite.PrepareMultiFinalize( + new Sqlite.PrepareMulti() { + @Override public void call(Sqlite.Stmt q){ + ++mCount.value; + while(q.step()){} + } + } + ); + final String sql = "select counter(*) from t;"+ + "select counter(*) from t; /* comment */"+ + "select counter(*) from t; -- comment\n" + ; + db.prepareMulti(sql, pm); + } + affirm( 3 == mCount.value ); + affirm( 9 == fCount.value ); + } + + + /* Copy/paste/rename this to add new tests. */ + private void _testTemplate(){ + try (Sqlite db = openDb()) { + Sqlite.Stmt stmt = db.prepare("SELECT 1"); + stmt.finalizeStmt(); + } + } + + private void runTests(boolean fromThread) throws Exception { + List mlist = testMethods; + affirm( null!=mlist ); + if( shuffle ){ + mlist = new ArrayList<>( testMethods.subList(0, testMethods.size()) ); + java.util.Collections.shuffle(mlist); + } + if( (!fromThread && listRunTests>0) || listRunTests>1 ){ + synchronized(this.getClass()){ + if( !fromThread ){ + out("Initial test"," list: "); + for(java.lang.reflect.Method m : testMethods){ + out(m.getName()+" "); + } + outln(); + outln("(That list excludes some which are hard-coded to run.)"); + } + out("Running"," tests: "); + for(java.lang.reflect.Method m : mlist){ + out(m.getName()+" "); + } + outln(); + } + } + for(java.lang.reflect.Method m : mlist){ + nap(); + try{ + m.invoke(this); + }catch(java.lang.reflect.InvocationTargetException e){ + outln("FAILURE: ",m.getName(),"(): ", e.getCause()); + throw e; + } + } + synchronized( this.getClass() ){ + ++nTestRuns; + } + } + + public void run() { + try { + runTests(0!=this.tId); + }catch(Exception e){ + synchronized( listErrors ){ + listErrors.add(e); + } + }finally{ + Sqlite.uncacheThread(); + } + } + + /** + Runs the basic sqlite3 JNI binding sanity-check suite. + + CLI flags: + + -q|-quiet: disables most test output. + + -t|-thread N: runs the tests in N threads + concurrently. Default=1. + + -r|-repeat N: repeats the tests in a loop N times, each one + consisting of the -thread value's threads. + + -shuffle: randomizes the order of most of the test functions. + + -naps: sleep small random intervals between tests in order to add + some chaos for cross-thread contention. + + -list-tests: outputs the list of tests being run, minus some + which are hard-coded. In multi-threaded mode, use this twice to + to emit the list run by each thread (which may differ from the initial + list, in particular if -shuffle is used). + + -fail: forces an exception to be thrown during the test run. Use + with -shuffle to make its appearance unpredictable. + + -v: emit some developer-mode info at the end. + */ + public static void main(String[] args) throws Exception { + int nThread = 1; + int nRepeat = 1; + boolean doSomethingForDev = false; + boolean forceFail = false; + boolean sqlLog = false; + boolean configLog = false; + boolean squelchTestOutput = false; + for( int i = 0; i < args.length; ){ + String arg = args[i++]; + if(arg.startsWith("-")){ + arg = arg.replaceFirst("-+",""); + if(arg.equals("v")){ + doSomethingForDev = true; + //listBoundMethods(); + }else if(arg.equals("t") || arg.equals("thread")){ + nThread = Integer.parseInt(args[i++]); + }else if(arg.equals("r") || arg.equals("repeat")){ + nRepeat = Integer.parseInt(args[i++]); + }else if(arg.equals("shuffle")){ + shuffle = true; + }else if(arg.equals("list-tests")){ + ++listRunTests; + }else if(arg.equals("fail")){ + forceFail = true; + }else if(arg.equals("sqllog")){ + sqlLog = true; + }else if(arg.equals("configlog")){ + configLog = true; + }else if(arg.equals("naps")){ + takeNaps = true; + }else if(arg.equals("q") || arg.equals("quiet")){ + squelchTestOutput = true; + }else{ + throw new IllegalArgumentException("Unhandled flag:"+arg); + } + } + } + + if( sqlLog ){ + if( Sqlite.compileOptionUsed("ENABLE_SQLLOG") ){ + Sqlite.libConfigSqlLog( new Sqlite.ConfigSqlLog() { + @Override public void call(Sqlite db, String msg, int op){ + switch(op){ + case 0: outln("Opening db: ",db); break; + case 1: outln("SQL ",db,": ",msg); break; + case 2: outln("Closing db: ",db); break; + } + } + } + ); + }else{ + outln("WARNING: -sqllog is not active because library was built ", + "without SQLITE_ENABLE_SQLLOG."); + } + } + if( configLog ){ + Sqlite.libConfigLog( new Sqlite.ConfigLog() { + @Override public void call(int code, String msg){ + outln("ConfigLog: ",Sqlite.errstr(code),": ", msg); + } + } + ); + } + + quietMode = squelchTestOutput; + outln("If you just saw warning messages regarding CallStaticObjectMethod, ", + "you are very likely seeing the side effects of a known openjdk8 ", + "bug. It is unsightly but does not affect the library."); + + { + // Build list of tests to run from the methods named test*(). + testMethods = new ArrayList<>(); + int nSkipped = 0; + for(final java.lang.reflect.Method m : Tester2.class.getDeclaredMethods()){ + final String name = m.getName(); + if( name.equals("testFail") ){ + if( forceFail ){ + testMethods.add(m); + } + }else if( !m.isAnnotationPresent( ManualTest.class ) ){ + if( nThread>1 && m.isAnnotationPresent( SingleThreadOnly.class ) ){ + if( 0==nSkipped++ ){ + out("Skipping tests in multi-thread mode:"); + } + out(" "+name+"()"); + }else if( name.startsWith("test") ){ + testMethods.add(m); + } + } + } + if( nSkipped>0 ) out("\n"); + } + + final long timeStart = System.currentTimeMillis(); + outln("libversion_number: ", + Sqlite.libVersionNumber(),"\n", + Sqlite.libVersion(),"\n",Sqlite.libSourceId(),"\n", + "SQLITE_THREADSAFE=",CApi.sqlite3_threadsafe()); + final boolean showLoopCount = (nRepeat>1 && nThread>1); + if( showLoopCount ){ + outln("Running ",nRepeat," loop(s) with ",nThread," thread(s) each."); + } + if( takeNaps ) outln("Napping between tests is enabled."); + int nLoop = 0; + for( int n = 0; n < nRepeat; ++n ){ + ++nLoop; + if( showLoopCount ) out((1==nLoop ? "" : " ")+nLoop); + if( nThread<=1 ){ + new Tester2(0).runTests(false); + continue; + } + Tester2.mtMode = true; + final ExecutorService ex = Executors.newFixedThreadPool( nThread ); + for( int i = 0; i < nThread; ++i ){ + ex.submit( new Tester2(i), i ); + } + ex.shutdown(); + try{ + ex.awaitTermination(nThread*200, java.util.concurrent.TimeUnit.MILLISECONDS); + ex.shutdownNow(); + }catch (InterruptedException ie){ + ex.shutdownNow(); + Thread.currentThread().interrupt(); + } + if( !listErrors.isEmpty() ){ + quietMode = false; + outln("TEST ERRORS:"); + Exception err = null; + for( Exception e : listErrors ){ + e.printStackTrace(); + if( null==err ) err = e; + } + if( null!=err ) throw err; + } + } + if( showLoopCount ) outln(); + quietMode = false; + + final long timeEnd = System.currentTimeMillis(); + outln("Tests done. Metrics across ",nTestRuns," total iteration(s):"); + outln("\tAssertions checked: ",affirmCount); + outln("\tDatabases opened: ",metrics.dbOpen); + if( doSomethingForDev ){ + CApi.sqlite3_jni_internal_details(); + } + affirm( 0==Sqlite.libReleaseMemory(1) ); + CApi.sqlite3_shutdown(); + int nMethods = 0; + int nNatives = 0; + int nCanonical = 0; + final java.lang.reflect.Method[] declaredMethods = + CApi.class.getDeclaredMethods(); + for(java.lang.reflect.Method m : declaredMethods){ + final int mod = m.getModifiers(); + if( 0!=(mod & java.lang.reflect.Modifier.STATIC) ){ + final String name = m.getName(); + if(name.startsWith("sqlite3_")){ + ++nMethods; + if( 0!=(mod & java.lang.reflect.Modifier.NATIVE) ){ + ++nNatives; + } + } + } + } + outln("\tCApi.sqlite3_*() methods: "+ + nMethods+" total, with "+ + nNatives+" native, "+ + (nMethods - nNatives)+" Java" + ); + outln("\tTotal test time = " + +(timeEnd - timeStart)+"ms"); + } +} diff --git a/ext/jni/src/org/sqlite/jni/wrapper1/ValueHolder.java b/ext/jni/src/org/sqlite/jni/wrapper1/ValueHolder.java new file mode 100644 index 0000000000..7549bb97b2 --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/wrapper1/ValueHolder.java @@ -0,0 +1,25 @@ +/* +** 2023-10-16 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file contains the ValueHolder utility class. +*/ +package org.sqlite.jni.wrapper1; + +/** + A helper class which simply holds a single value. Its primary use + is for communicating values out of anonymous callbacks, as doing so + requires a "final" reference. +*/ +public class ValueHolder { + public T value; + public ValueHolder(){} + public ValueHolder(T v){value = v;} +} diff --git a/ext/jni/src/org/sqlite/jni/wrapper1/WindowFunction.java b/ext/jni/src/org/sqlite/jni/wrapper1/WindowFunction.java new file mode 100644 index 0000000000..a3905567d4 --- /dev/null +++ b/ext/jni/src/org/sqlite/jni/wrapper1/WindowFunction.java @@ -0,0 +1,42 @@ +/* +** 2023-10-16 +** +** The author disclaims copyright to this source code. In place of +** a legal notice, here is a blessing: +** +** May you do good and not evil. +** May you find forgiveness for yourself and forgive others. +** May you share freely, never taking more than you give. +** +************************************************************************* +** This file is part of the wrapper1 interface for sqlite3. +*/ +package org.sqlite.jni.wrapper1; + +/** + A SqlFunction implementation for window functions. The T type + represents the type of data accumulated by this function while it + works. e.g. a SUM()-like UDF might use Integer or Long and a + CONCAT()-like UDF might use a StringBuilder or a List. +*/ +public abstract class WindowFunction extends AggregateFunction { + + /** + As for the xInverse() argument of the C API's + sqlite3_create_window_function(). If this function throws, the + exception is reported via sqlite3_result_error(). + */ + public abstract void xInverse(SqlFunction.Arguments args); + + /** + As for the xValue() argument of the C API's + sqlite3_create_window_function(). If this function throws, it is + translated into sqlite3_result_error(). + + Note that the passed-in object will not actually contain any + arguments for xValue() but will contain the context object needed + for setting the call's result or error state. + */ + public abstract void xValue(SqlFunction.Arguments args); + +} diff --git a/ext/jni/src/tests/000-000-sanity.test b/ext/jni/src/tests/000-000-sanity.test new file mode 100644 index 0000000000..4ccbece31c --- /dev/null +++ b/ext/jni/src/tests/000-000-sanity.test @@ -0,0 +1,53 @@ +/* +** This is a comment. There are many like it but this one is mine. +** +** SCRIPT_MODULE_NAME: sanity-check +** xMIXED_MODULE_NAME: mixed-module +** xMODULE_NAME: module-name +** xREQUIRED_PROPERTIES: small fast reliable +** xREQUIRED_PROPERTIES: RECURSIVE_TRIGGERS +** xREQUIRED_PROPERTIES: TEMPSTORE_FILE TEMPSTORE_MEM +** xREQUIRED_PROPERTIES: AUTOVACUUM INCRVACUUM +** +*/ +--print starting up 😃 +--close all +--oom +--db 0 +--new my.db +--null zilch +--testcase 1.0 +SELECT 1, null; +--result 1 zilch +--glob *zil* +--notglob *ZIL* +SELECT 1, 2; +intentional error; +--run +--testcase json-1 +SELECT json_array(1,2,3) +--json [1,2,3] +--testcase tableresult-1 + select 1, 'a'; + select 2, 'b'; +--tableresult + # [a-z] + 2 b +--end +--testcase json-block-1 + select json_array(1,2,3); + select json_object('a',1,'b',2); +--json-block + [1,2,3] + {"a":1,"b":2} +--end +--testcase col-names-on +--column-names 1 + select 1 as 'a', 2 as 'b'; +--result a 1 b 2 +--testcase col-names-off +--column-names 0 + select 1 as 'a', 2 as 'b'; +--result 1 2 +--close +--print reached the end 😃 diff --git a/ext/jni/src/tests/000-001-ignored.test b/ext/jni/src/tests/000-001-ignored.test new file mode 100644 index 0000000000..5af852e197 --- /dev/null +++ b/ext/jni/src/tests/000-001-ignored.test @@ -0,0 +1,9 @@ +/* +** This script must be marked as ignored because it contains +** content which triggers that condition. +** +** SCRIPT_MODULE_NAME: ignored +** +*/ + +| diff --git a/ext/jni/src/tests/900-001-fts.test b/ext/jni/src/tests/900-001-fts.test new file mode 100644 index 0000000000..65285e86b0 --- /dev/null +++ b/ext/jni/src/tests/900-001-fts.test @@ -0,0 +1,12 @@ +/* +** SCRIPT_MODULE_NAME: fts5-sanity-checks +** xREQUIRED_PROPERTIES: FTS5 +** +*/ + +--testcase 1.0 +CREATE VIRTUAL TABLE email USING fts5(sender, title, body); +insert into email values('fred','Help!','Dear Sir...'); +insert into email values('barney','Assistance','Dear Madam...'); +select * from email where email match 'assistance'; +--result barney Assistance {Dear Madam...} diff --git a/ext/lsm1/Makefile b/ext/lsm1/Makefile deleted file mode 100644 index 7056432d2d..0000000000 --- a/ext/lsm1/Makefile +++ /dev/null @@ -1,56 +0,0 @@ -# -# This Makefile is designed for use with main.mk in the root directory of -# this project. After including main.mk, the users makefile should contain: -# -# LSMDIR=$(TOP)/ext/lsm1/ -# LSMOPTS=-fPIC -# include $(LSMDIR)/Makefile -# -# The most useful targets are [lsmtest] and [lsm.so]. -# - -LSMOBJ = \ - lsm_ckpt.o \ - lsm_file.o \ - lsm_log.o \ - lsm_main.o \ - lsm_mem.o \ - lsm_mutex.o \ - lsm_shared.o \ - lsm_sorted.o \ - lsm_str.o \ - lsm_tree.o \ - lsm_unix.o \ - lsm_win32.o \ - lsm_varint.o \ - lsm_vtab.o - -LSMHDR = \ - $(LSMDIR)/lsm.h \ - $(LSMDIR)/lsmInt.h - -LSMTESTSRC = $(LSMDIR)/lsm-test/lsmtest1.c $(LSMDIR)/lsm-test/lsmtest2.c \ - $(LSMDIR)/lsm-test/lsmtest3.c $(LSMDIR)/lsm-test/lsmtest4.c \ - $(LSMDIR)/lsm-test/lsmtest5.c $(LSMDIR)/lsm-test/lsmtest6.c \ - $(LSMDIR)/lsm-test/lsmtest7.c $(LSMDIR)/lsm-test/lsmtest8.c \ - $(LSMDIR)/lsm-test/lsmtest9.c \ - $(LSMDIR)/lsm-test/lsmtest_datasource.c \ - $(LSMDIR)/lsm-test/lsmtest_func.c $(LSMDIR)/lsm-test/lsmtest_io.c \ - $(LSMDIR)/lsm-test/lsmtest_main.c $(LSMDIR)/lsm-test/lsmtest_mem.c \ - $(LSMDIR)/lsm-test/lsmtest_tdb.c $(LSMDIR)/lsm-test/lsmtest_tdb3.c \ - $(LSMDIR)/lsm-test/lsmtest_util.c $(LSMDIR)/lsm-test/lsmtest_win32.c - - -# all: lsm.so - -LSMOPTS += -fPIC -DLSM_MUTEX_PTHREADS=1 -I$(LSMDIR) -DHAVE_ZLIB - -lsm.so: $(LSMOBJ) - $(TCCX) -shared -fPIC -o lsm.so $(LSMOBJ) - -%.o: $(LSMDIR)/%.c $(LSMHDR) sqlite3.h - $(TCCX) $(LSMOPTS) -c $< - -lsmtest$(EXE): $(LSMOBJ) $(LSMTESTSRC) $(LSMTESTHDR) sqlite3.o - # $(TCPPX) -c $(TOP)/lsm-test/lsmtest_tdb2.cc - $(TCCX) $(LSMOPTS) $(LSMTESTSRC) $(LSMOBJ) sqlite3.o -o lsmtest$(EXE) $(THREADLIB) -lz diff --git a/ext/lsm1/Makefile.msc b/ext/lsm1/Makefile.msc deleted file mode 100644 index 3e5a3b3310..0000000000 --- a/ext/lsm1/Makefile.msc +++ /dev/null @@ -1,102 +0,0 @@ -# -# This Makefile is designed for use with Makefile.msc in the root directory -# of this project. The Makefile.msc should contain: -# -# LSMDIR=$(TOP)\ext\lsm1 -# !INCLUDE $(LSMDIR)\Makefile.msc -# -# The most useful targets are [lsmtest.exe] and [lsm.dll]. -# - -LSMOBJ = \ - lsm_ckpt.lo \ - lsm_file.lo \ - lsm_log.lo \ - lsm_main.lo \ - lsm_mem.lo \ - lsm_mutex.lo \ - lsm_shared.lo \ - lsm_sorted.lo \ - lsm_str.lo \ - lsm_tree.lo \ - lsm_unix.lo \ - lsm_win32.lo \ - lsm_varint.lo \ - lsm_vtab.lo - -LSMHDR = \ - $(LSMDIR)\lsm.h \ - $(LSMDIR)\lsmInt.h - -LSMTESTSRC = $(LSMDIR)\lsm-test\lsmtest1.c $(LSMDIR)\lsm-test\lsmtest2.c \ - $(LSMDIR)\lsm-test\lsmtest3.c $(LSMDIR)\lsm-test\lsmtest4.c \ - $(LSMDIR)\lsm-test\lsmtest5.c $(LSMDIR)\lsm-test\lsmtest6.c \ - $(LSMDIR)\lsm-test\lsmtest7.c $(LSMDIR)\lsm-test\lsmtest8.c \ - $(LSMDIR)\lsm-test\lsmtest9.c \ - $(LSMDIR)\lsm-test\lsmtest_datasource.c \ - $(LSMDIR)\lsm-test\lsmtest_func.c $(LSMDIR)\lsm-test\lsmtest_io.c \ - $(LSMDIR)\lsm-test\lsmtest_main.c $(LSMDIR)\lsm-test\lsmtest_mem.c \ - $(LSMDIR)\lsm-test\lsmtest_tdb.c $(LSMDIR)\lsm-test\lsmtest_tdb3.c \ - $(LSMDIR)\lsm-test\lsmtest_util.c $(LSMDIR)\lsm-test\lsmtest_win32.c - -# all: lsm.dll lsmtest.exe - -LSMOPTS = $(NO_WARN) -DLSM_MUTEX_WIN32=1 -I$(LSMDIR) - -!IF $(DEBUG)>2 -LSMOPTS = $(LSMOPTS) -DLSM_DEBUG=1 -!ENDIF - -!IF $(MEMDEBUG)!=0 -LSMOPTS = $(LSMOPTS) -DLSM_DEBUG_MEM=1 -!ENDIF - -lsm_ckpt.lo: $(LSMDIR)\lsm_ckpt.c $(LSMHDR) $(SQLITE3H) - $(LTCOMPILE) $(LSMOPTS) -c $(LSMDIR)\lsm_ckpt.c - -lsm_file.lo: $(LSMDIR)\lsm_file.c $(LSMHDR) $(SQLITE3H) - $(LTCOMPILE) $(LSMOPTS) -c $(LSMDIR)\lsm_file.c - -lsm_log.lo: $(LSMDIR)\lsm_log.c $(LSMHDR) $(SQLITE3H) - $(LTCOMPILE) $(LSMOPTS) -c $(LSMDIR)\lsm_log.c - -lsm_main.lo: $(LSMDIR)\lsm_main.c $(LSMHDR) $(SQLITE3H) - $(LTCOMPILE) $(LSMOPTS) -c $(LSMDIR)\lsm_main.c - -lsm_mem.lo: $(LSMDIR)\lsm_mem.c $(LSMHDR) $(SQLITE3H) - $(LTCOMPILE) $(LSMOPTS) -c $(LSMDIR)\lsm_mem.c - -lsm_mutex.lo: $(LSMDIR)\lsm_mutex.c $(LSMHDR) $(SQLITE3H) - $(LTCOMPILE) $(LSMOPTS) -c $(LSMDIR)\lsm_mutex.c - -lsm_shared.lo: $(LSMDIR)\lsm_shared.c $(LSMHDR) $(SQLITE3H) - $(LTCOMPILE) $(LSMOPTS) -c $(LSMDIR)\lsm_shared.c - -lsm_sorted.lo: $(LSMDIR)\lsm_sorted.c $(LSMHDR) $(SQLITE3H) - $(LTCOMPILE) $(LSMOPTS) -c $(LSMDIR)\lsm_sorted.c - -lsm_str.lo: $(LSMDIR)\lsm_str.c $(LSMHDR) $(SQLITE3H) - $(LTCOMPILE) $(LSMOPTS) -c $(LSMDIR)\lsm_str.c - -lsm_tree.lo: $(LSMDIR)\lsm_tree.c $(LSMHDR) $(SQLITE3H) - $(LTCOMPILE) $(LSMOPTS) -c $(LSMDIR)\lsm_tree.c - -lsm_unix.lo: $(LSMDIR)\lsm_unix.c $(LSMHDR) $(SQLITE3H) - $(LTCOMPILE) $(LSMOPTS) -c $(LSMDIR)\lsm_unix.c - -lsm_win32.lo: $(LSMDIR)\lsm_win32.c $(LSMHDR) $(SQLITE3H) - $(LTCOMPILE) $(LSMOPTS) -c $(LSMDIR)\lsm_win32.c - -lsm_varint.lo: $(LSMDIR)\lsm_varint.c $(LSMHDR) $(SQLITE3H) - $(LTCOMPILE) $(LSMOPTS) -c $(LSMDIR)\lsm_varint.c - -lsm_vtab.lo: $(LSMDIR)\lsm_vtab.c $(LSMHDR) $(SQLITE3H) - $(LTCOMPILE) $(LSMOPTS) -c $(LSMDIR)\lsm_vtab.c - -lsm.dll: $(LSMOBJ) - $(LD) $(LDFLAGS) $(LTLINKOPTS) $(LTLIBPATHS) /DLL /OUT:$@ $(LSMOBJ) - copy /Y $@ $(LSMDIR)\$@ - -lsmtest.exe: $(LSMOBJ) $(LSMTESTSRC) $(LSMTESTHDR) $(LIBOBJ) - $(LTLINK) $(LSMOPTS) $(LSMTESTSRC) /link $(LSMOBJ) $(LIBOBJ) - copy /Y $@ $(LSMDIR)\$@ diff --git a/ext/lsm1/lsm-test/README b/ext/lsm1/lsm-test/README deleted file mode 100644 index 80654ee97e..0000000000 --- a/ext/lsm1/lsm-test/README +++ /dev/null @@ -1,40 +0,0 @@ - - -Organization of test case files: - - lsmtest1.c: Data tests. Tests that perform many inserts and deletes on a - database file, then verify that the contents of the database can - be queried. - - lsmtest2.c: Crash tests. Tests that attempt to verify that the database - recovers correctly following an application or system crash. - - lsmtest3.c: Rollback tests. Tests that focus on the explicit rollback of - transactions and sub-transactions. - - lsmtest4.c: Multi-client tests. - - lsmtest5.c: Multi-client tests with a different thread for each client. - - lsmtest6.c: OOM injection tests. - - lsmtest7.c: API tests. - - lsmtest8.c: Writer crash tests. Tests in this file attempt to verify that - the system recovers and other clients proceed unaffected if - a process fails in the middle of a write transaction. - - The difference from lsmtest2.c is that this file tests - live-recovery (recovery from a failure that occurs while other - clients are still running) whereas lsmtest2.c tests recovery - from a system or power failure. - - lsmtest9.c: More data tests. These focus on testing that calling - lsm_work(nMerge=1) to compact the database does not corrupt it. - In other words, that databases containing block-redirects - can be read and written. - - - - - diff --git a/ext/lsm1/lsm-test/lsmtest.h b/ext/lsm1/lsm-test/lsmtest.h deleted file mode 100644 index ca60424add..0000000000 --- a/ext/lsm1/lsm-test/lsmtest.h +++ /dev/null @@ -1,303 +0,0 @@ - -#ifndef __WRAPPER_INT_H_ -#define __WRAPPER_INT_H_ - -#include "lsmtest_tdb.h" -#include "sqlite3.h" -#include "lsm.h" - -#include -#include -#include -#include -#include -#ifndef _WIN32 -# include -#endif -#include -#include -#include -#include -#include -#include - -#ifdef __cplusplus -extern "C" { -#endif - -#ifdef _WIN32 -# include "windows.h" -# define gettimeofday win32GetTimeOfDay -# define F_OK (0) -# define sleep(sec) Sleep(1000 * (sec)) -# define usleep(usec) Sleep(((usec) + 999) / 1000) -# ifdef _MSC_VER -# include -# define snprintf _snprintf -# define fsync(fd) FlushFileBuffers((HANDLE)_get_osfhandle((fd))) -# define fdatasync(fd) FlushFileBuffers((HANDLE)_get_osfhandle((fd))) -# define __va_copy(dst,src) ((dst) = (src)) -# define ftruncate(fd,sz) ((_chsize_s((fd), (sz))==0) ? 0 : -1) -# else -# error Unsupported C compiler for Windows. -# endif -int win32GetTimeOfDay(struct timeval *, void *); -#endif - -#ifndef _LSM_INT_H -typedef unsigned int u32; -typedef unsigned char u8; -typedef long long int i64; -typedef unsigned long long int u64; -#endif - - -#define ArraySize(x) ((int)(sizeof(x) / sizeof((x)[0]))) - -#define MIN(x,y) ((x)<(y) ? (x) : (y)) -#define MAX(x,y) ((x)>(y) ? (x) : (y)) - -#define unused_parameter(x) (void)(x) - -#define TESTDB_DEFAULT_PAGE_SIZE 4096 -#define TESTDB_DEFAULT_CACHE_SIZE 2048 - -#ifndef _O_BINARY -# define _O_BINARY (0) -#endif - -/* -** Ideally, these should be in wrapper.c. But they are here instead so that -** they can be used by the C++ database wrappers in wrapper2.cc. -*/ -typedef struct DatabaseMethods DatabaseMethods; -struct TestDb { - DatabaseMethods const *pMethods; /* Database methods */ - const char *zLibrary; /* Library name for tdb_open() */ -}; -struct DatabaseMethods { - int (*xClose)(TestDb *); - int (*xWrite)(TestDb *, void *, int , void *, int); - int (*xDelete)(TestDb *, void *, int); - int (*xDeleteRange)(TestDb *, void *, int, void *, int); - int (*xFetch)(TestDb *, void *, int, void **, int *); - int (*xScan)(TestDb *, void *, int, void *, int, void *, int, - void (*)(void *, void *, int , void *, int) - ); - int (*xBegin)(TestDb *, int); - int (*xCommit)(TestDb *, int); - int (*xRollback)(TestDb *, int); -}; - -/* -** Functions in wrapper2.cc (a C++ source file). wrapper2.cc contains the -** wrapper for Kyoto Cabinet. Kyoto cabinet has a C API, but -** the primary interface is the C++ API. -*/ -int test_kc_open(const char*, const char *zFilename, int bClear, TestDb **ppDb); -int test_kc_close(TestDb *); -int test_kc_write(TestDb *, void *, int , void *, int); -int test_kc_delete(TestDb *, void *, int); -int test_kc_delete_range(TestDb *, void *, int, void *, int); -int test_kc_fetch(TestDb *, void *, int, void **, int *); -int test_kc_scan(TestDb *, void *, int, void *, int, void *, int, - void (*)(void *, void *, int , void *, int) -); - -int test_mdb_open(const char*, const char *zFile, int bClear, TestDb **ppDb); -int test_mdb_close(TestDb *); -int test_mdb_write(TestDb *, void *, int , void *, int); -int test_mdb_delete(TestDb *, void *, int); -int test_mdb_fetch(TestDb *, void *, int, void **, int *); -int test_mdb_scan(TestDb *, void *, int, void *, int, void *, int, - void (*)(void *, void *, int , void *, int) -); - -/* -** Functions in wrapper3.c. This file contains the tdb wrapper for lsm. -** The wrapper for lsm is a bit more involved than the others, as it -** includes code for a couple of different lsm configurations, and for -** various types of fault injection and robustness testing. -*/ -int test_lsm_open(const char*, const char *zFile, int bClear, TestDb **ppDb); -int test_lsm_lomem_open(const char*, const char*, int bClear, TestDb **ppDb); -int test_lsm_lomem2_open(const char*, const char*, int bClear, TestDb **ppDb); -int test_lsm_zip_open(const char*, const char*, int bClear, TestDb **ppDb); -int test_lsm_small_open(const char*, const char*, int bClear, TestDb **ppDb); -int test_lsm_mt2(const char*, const char *zFile, int bClear, TestDb **ppDb); -int test_lsm_mt3(const char*, const char *zFile, int bClear, TestDb **ppDb); - -int tdb_lsm_configure(lsm_db *, const char *); - -/* Functions in lsmtest_tdb4.c */ -int test_bt_open(const char*, const char *zFile, int bClear, TestDb **ppDb); -int test_fbt_open(const char*, const char *zFile, int bClear, TestDb **ppDb); -int test_fbts_open(const char*, const char *zFile, int bClear, TestDb **ppDb); - - -/* Functions in testutil.c. */ -int testPrngInit(void); -u32 testPrngValue(u32 iVal); -void testPrngArray(u32 iVal, u32 *aOut, int nOut); -void testPrngString(u32 iVal, char *aOut, int nOut); - -void testErrorInit(int argc, char **); -void testPrintError(const char *zFormat, ...); -void testPrintUsage(const char *zArgs); -void testPrintFUsage(const char *zFormat, ...); -void testTimeInit(void); -int testTimeGet(void); - -/* Functions in testmem.c. */ -void testMallocInstall(lsm_env *pEnv); -void testMallocUninstall(lsm_env *pEnv); -void testMallocCheck(lsm_env *pEnv, int *, int *, FILE *); -void testMallocOom(lsm_env *pEnv, int, int, void(*)(void*), void *); -void testMallocOomEnable(lsm_env *pEnv, int); - -/* lsmtest.c */ -TestDb *testOpen(const char *zSystem, int, int *pRc); -void testReopen(TestDb **ppDb, int *pRc); -void testClose(TestDb **ppDb); - -void testFetch(TestDb *, void *, int, void *, int, int *); -void testWrite(TestDb *, void *, int, void *, int, int *); -void testDelete(TestDb *, void *, int, int *); -void testDeleteRange(TestDb *, void *, int, void *, int, int *); -void testWriteStr(TestDb *, const char *, const char *zVal, int *pRc); -void testFetchStr(TestDb *, const char *, const char *, int *pRc); - -void testBegin(TestDb *pDb, int iTrans, int *pRc); -void testCommit(TestDb *pDb, int iTrans, int *pRc); - -void test_failed(void); - -char *testMallocPrintf(const char *zFormat, ...); -char *testMallocVPrintf(const char *zFormat, va_list ap); -int testGlobMatch(const char *zPattern, const char *zStr); - -void testScanCompare(TestDb *, TestDb *, int, void *, int, void *, int, int *); -void testFetchCompare(TestDb *, TestDb *, void *, int, int *); - -void *testMalloc(int); -void *testMallocCopy(void *pCopy, int nByte); -void *testRealloc(void *, int); -void testFree(void *); - -/* lsmtest_bt.c */ -int do_bt(int nArg, char **azArg); - -/* testio.c */ -int testVfsConfigureDb(TestDb *pDb); - -/* testfunc.c */ -int do_show(int nArg, char **azArg); -int do_work(int nArg, char **azArg); - -/* testio.c */ -int do_io(int nArg, char **azArg); - -/* lsmtest2.c */ -void do_crash_test(const char *zPattern, int *pRc); -int do_rollback_test(int nArg, char **azArg); - -/* test3.c */ -void test_rollback(const char *zSystem, const char *zPattern, int *pRc); - -/* test4.c */ -void test_mc(const char *zSystem, const char *zPattern, int *pRc); - -/* test5.c */ -void test_mt(const char *zSystem, const char *zPattern, int *pRc); - -/* lsmtest6.c */ -void test_oom(const char *zPattern, int *pRc); -void testDeleteLsmdb(const char *zFile); - -void testSaveDb(const char *zFile, const char *zAuxExt); -void testRestoreDb(const char *zFile, const char *zAuxExt); -void testCopyLsmdb(const char *zFrom, const char *zTo); - -/* lsmtest7.c */ -void test_api(const char *zPattern, int *pRc); - -/* lsmtest8.c */ -void do_writer_crash_test(const char *zPattern, int *pRc); - -/************************************************************************* -** Interface to functionality in test_datasource.c. -*/ -typedef struct Datasource Datasource; -typedef struct DatasourceDefn DatasourceDefn; - -struct DatasourceDefn { - int eType; /* A TEST_DATASOURCE_* value */ - int nMinKey; /* Minimum key size */ - int nMaxKey; /* Maximum key size */ - int nMinVal; /* Minimum value size */ - int nMaxVal; /* Maximum value size */ -}; - -#define TEST_DATASOURCE_RANDOM 1 -#define TEST_DATASOURCE_SEQUENCE 2 - -char *testDatasourceName(const DatasourceDefn *); -Datasource *testDatasourceNew(const DatasourceDefn *); -void testDatasourceFree(Datasource *); -void testDatasourceEntry(Datasource *, int, void **, int *, void **, int *); -/* End of test_datasource.c interface. -*************************************************************************/ -void testDatasourceFetch( - TestDb *pDb, /* Database handle */ - Datasource *pData, - int iKey, - int *pRc /* IN/OUT: Error code */ -); - -void testWriteDatasource(TestDb *, Datasource *, int, int *); -void testWriteDatasourceRange(TestDb *, Datasource *, int, int, int *); -void testDeleteDatasource(TestDb *, Datasource *, int, int *); -void testDeleteDatasourceRange(TestDb *, Datasource *, int, int, int *); - - -/* test1.c */ -void test_data_1(const char *, const char *, int *pRc); -void test_data_2(const char *, const char *, int *pRc); -void test_data_3(const char *, const char *, int *pRc); -void testDbContents(TestDb *, Datasource *, int, int, int, int, int, int *); -void testCaseProgress(int, int, int, int *); -int testCaseNDot(void); - -void testCompareDb(Datasource *, int, int, TestDb *, TestDb *, int *); -int testControlDb(TestDb **ppDb); - -typedef struct CksumDb CksumDb; -CksumDb *testCksumArrayNew(Datasource *, int, int, int); -char *testCksumArrayGet(CksumDb *, int); -void testCksumArrayFree(CksumDb *); -void testCaseStart(int *pRc, char *zFmt, ...); -void testCaseFinish(int rc); -void testCaseSkip(void); -int testCaseBegin(int *, const char *, const char *, ...); - -#define TEST_CKSUM_BYTES 29 -int testCksumDatabase(TestDb *pDb, char *zOut); -int testCountDatabase(TestDb *pDb); -void testCompareInt(int, int, int *); -void testCompareStr(const char *z1, const char *z2, int *pRc); - -/* lsmtest9.c */ -void test_data_4(const char *, const char *, int *pRc); - - -/* -** Similar to the Tcl_GetIndexFromObjStruct() Tcl library function. -*/ -#define testArgSelect(w,x,y,z) testArgSelectX(w,x,sizeof(w[0]),y,z) -int testArgSelectX(void *, const char *, int, const char *, int *); - -#ifdef __cplusplus -} /* End of the 'extern "C"' block */ -#endif - -#endif diff --git a/ext/lsm1/lsm-test/lsmtest1.c b/ext/lsm1/lsm-test/lsmtest1.c deleted file mode 100644 index 1ce2cc0588..0000000000 --- a/ext/lsm1/lsm-test/lsmtest1.c +++ /dev/null @@ -1,656 +0,0 @@ - -#include "lsmtest.h" - -#define DATA_SEQUENTIAL TEST_DATASOURCE_SEQUENCE -#define DATA_RANDOM TEST_DATASOURCE_RANDOM - -typedef struct Datatest1 Datatest1; -typedef struct Datatest2 Datatest2; - -/* -** An instance of the following structure contains parameters used to -** customize the test function in this file. Test procedure: -** -** 1. Create a data-source based on the "datasource definition" vars. -** -** 2. Insert nRow key value pairs into the database. -** -** 3. Delete all keys from the database. Deletes are done in the same -** order as the inserts. -** -** During steps 2 and 3 above, after each Datatest1.nVerify inserts or -** deletes, the following: -** -** a. Run Datasource.nTest key lookups and check the results are as expected. -** -** b. If Datasource.bTestScan is true, run a handful (8) of range -** queries (scanning forwards and backwards). Check that the results -** are as expected. -** -** c. Close and reopen the database. Then run (a) and (b) again. -*/ -struct Datatest1 { - /* Datasource definition */ - DatasourceDefn defn; - - /* Test procedure parameters */ - int nRow; /* Number of rows to insert then delete */ - int nVerify; /* How often to verify the db contents */ - int nTest; /* Number of keys to test (0==all) */ - int bTestScan; /* True to do scan tests */ -}; - -/* -** An instance of the following data structure is used to describe the -** second type of test case in this file. The chief difference between -** these tests and those described by Datatest1 is that these tests also -** experiment with range-delete operations. Tests proceed as follows: -** -** 1. Open the datasource described by Datatest2.defn. -** -** 2. Open a connection on an empty database. -** -** 3. Do this Datatest2.nIter times: -** -** a) Insert Datatest2.nWrite key-value pairs from the datasource. -** -** b) Select two pseudo-random keys and use them as the start -** and end points of a range-delete operation. -** -** c) Verify that the contents of the database are as expected (see -** below for details). -** -** d) Close and then reopen the database handle. -** -** e) Verify that the contents of the database are still as expected. -** -** The inserts and range deletes are run twice - once on the database being -** tested and once using a control system (sqlite3, kc etc. - something that -** works). In order to verify that the contents of the db being tested are -** correct, the test runs a bunch of scans and lookups on both the test and -** control databases. If the results are the same, the test passes. -*/ -struct Datatest2 { - DatasourceDefn defn; - int nRange; - int nWrite; /* Number of writes per iteration */ - int nIter; /* Total number of iterations to run */ -}; - -/* -** Generate a unique name for the test case pTest with database system -** zSystem. -*/ -static char *getName(const char *zSystem, int bRecover, Datatest1 *pTest){ - char *zRet; - char *zData; - zData = testDatasourceName(&pTest->defn); - zRet = testMallocPrintf("data.%s.%s.rec=%d.%d.%d", - zSystem, zData, bRecover, pTest->nRow, pTest->nVerify - ); - testFree(zData); - return zRet; -} - -int testControlDb(TestDb **ppDb){ -#ifdef HAVE_KYOTOCABINET - return tdb_open("kyotocabinet", "tmp.db", 1, ppDb); -#else - return tdb_open("sqlite3", "", 1, ppDb); -#endif -} - -void testDatasourceFetch( - TestDb *pDb, /* Database handle */ - Datasource *pData, - int iKey, - int *pRc /* IN/OUT: Error code */ -){ - void *pKey; int nKey; /* Database key to query for */ - void *pVal; int nVal; /* Expected result of query */ - - testDatasourceEntry(pData, iKey, &pKey, &nKey, &pVal, &nVal); - testFetch(pDb, pKey, nKey, pVal, nVal, pRc); -} - -/* -** This function is called to test that the contents of database pDb -** are as expected. In this case, expected is defined as containing -** key-value pairs iFirst through iLast, inclusive, from data source -** pData. In other words, a loop like the following could be used to -** construct a database with identical contents from scratch. -** -** for(i=iFirst; i<=iLast; i++){ -** testDatasourceEntry(pData, i, &pKey, &nKey, &pVal, &nVal); -** // insert (pKey, nKey) -> (pVal, nVal) into database -** } -** -** The key domain consists of keys 0 to (nRow-1), inclusive, from -** data source pData. For both scan and lookup tests, keys are selected -** pseudo-randomly from within this set. -** -** This function runs nLookupTest lookup tests and nScanTest scan tests. -** -** A lookup test consists of selecting a key from the domain and querying -** pDb for it. The test fails if the presence of the key and, if present, -** the associated value do not match the expectations defined above. -** -** A scan test involves selecting a key from the domain and running -** the following queries: -** -** 1. Scan all keys equal to or greater than the key, in ascending order. -** 2. Scan all keys equal to or smaller than the key, in descending order. -** -** Additionally, if nLookupTest is greater than zero, the following are -** run once: -** -** 1. Scan all keys in the db, in ascending order. -** 2. Scan all keys in the db, in descending order. -** -** As you would assume, the test fails if the returned values do not match -** expectations. -*/ -void testDbContents( - TestDb *pDb, /* Database handle being tested */ - Datasource *pData, /* pDb contains data from here */ - int nRow, /* Size of key domain */ - int iFirst, /* Index of first key from pData in pDb */ - int iLast, /* Index of last key from pData in pDb */ - int nLookupTest, /* Number of lookup tests to run */ - int nScanTest, /* Number of scan tests to run */ - int *pRc /* IN/OUT: Error code */ -){ - int j; - int rc = *pRc; - - if( rc==0 && nScanTest ){ - TestDb *pDb2 = 0; - - /* Open a control db (i.e. one that we assume works) */ - rc = testControlDb(&pDb2); - - for(j=iFirst; rc==0 && j<=iLast; j++){ - void *pKey; int nKey; /* Database key to insert */ - void *pVal; int nVal; /* Database value to insert */ - testDatasourceEntry(pData, j, &pKey, &nKey, &pVal, &nVal); - rc = tdb_write(pDb2, pKey, nKey, pVal, nVal); - } - - if( rc==0 ){ - int iKey1; - int iKey2; - void *pKey1; int nKey1; /* Start key */ - void *pKey2; int nKey2; /* Final key */ - - iKey1 = testPrngValue((iFirst<<8) + (iLast<<16)) % nRow; - iKey2 = testPrngValue((iLast<<8) + (iFirst<<16)) % nRow; - testDatasourceEntry(pData, iKey1, &pKey2, &nKey1, 0, 0); - pKey1 = testMalloc(nKey1+1); - memcpy(pKey1, pKey2, nKey1+1); - testDatasourceEntry(pData, iKey2, &pKey2, &nKey2, 0, 0); - - testScanCompare(pDb2, pDb, 0, 0, 0, 0, 0, &rc); - testScanCompare(pDb2, pDb, 0, 0, 0, pKey2, nKey2, &rc); - testScanCompare(pDb2, pDb, 0, pKey1, nKey1, 0, 0, &rc); - testScanCompare(pDb2, pDb, 0, pKey1, nKey1, pKey2, nKey2, &rc); - testScanCompare(pDb2, pDb, 1, 0, 0, 0, 0, &rc); - testScanCompare(pDb2, pDb, 1, 0, 0, pKey2, nKey2, &rc); - testScanCompare(pDb2, pDb, 1, pKey1, nKey1, 0, 0, &rc); - testScanCompare(pDb2, pDb, 1, pKey1, nKey1, pKey2, nKey2, &rc); - testFree(pKey1); - } - tdb_close(pDb2); - } - - /* Test some lookups. */ - for(j=0; rc==0 && j=nRow ){ - iKey = j; - }else{ - iKey = testPrngValue(j + (iFirst<<8) + (iLast<<16)) % nRow; - } - - testDatasourceEntry(pData, iKey, &pKey, &nKey, &pVal, &nVal); - if( iFirst>iKey || iKey>iLast ){ - pVal = 0; - nVal = -1; - } - - testFetch(pDb, pKey, nKey, pVal, nVal, &rc); - } - - *pRc = rc; -} - -/* -** This function should be called during long running test cases to output -** the progress dots (...) to stdout. -*/ -void testCaseProgress(int i, int n, int nDot, int *piDot){ - int iDot = *piDot; - while( iDot < ( ((nDot*2+1) * i) / (n*2) ) ){ - printf("."); - fflush(stdout); - iDot++; - } - *piDot = iDot; -} - -int testCaseNDot(void){ return 20; } - -#if 0 -static void printScanCb( - void *pCtx, void *pKey, int nKey, void *pVal, int nVal -){ - printf("%s\n", (char *)pKey); - fflush(stdout); -} -#endif - -void testReopenRecover(TestDb **ppDb, int *pRc){ - if( *pRc==0 ){ - const char *zLib = tdb_library_name(*ppDb); - const char *zDflt = tdb_default_db(zLib); - testCopyLsmdb(zDflt, "bak.db"); - testClose(ppDb); - testCopyLsmdb("bak.db", zDflt); - *pRc = tdb_open(zLib, 0, 0, ppDb); - } -} - - -static void doDataTest1( - const char *zSystem, /* Database system to test */ - int bRecover, - Datatest1 *p, /* Structure containing test parameters */ - int *pRc /* OUT: Error code */ -){ - int i; - int iDot; - int rc = LSM_OK; - Datasource *pData; - TestDb *pDb; - int iToggle = 0; - - /* Start the test case, open a database and allocate the datasource. */ - pDb = testOpen(zSystem, 1, &rc); - pData = testDatasourceNew(&p->defn); - - i = 0; - iDot = 0; - while( rc==LSM_OK && inRow ){ - - /* Insert some data */ - testWriteDatasourceRange(pDb, pData, i, p->nVerify, &rc); - i += p->nVerify; - - if( iToggle ) testBegin(pDb, 1, &rc); - /* Check that the db content is correct. */ - testDbContents(pDb, pData, p->nRow, 0, i-1, p->nTest, p->bTestScan, &rc); - if( iToggle ) testCommit(pDb, 0, &rc); - iToggle = (iToggle+1)%2; - - if( bRecover ){ - testReopenRecover(&pDb, &rc); - }else{ - testReopen(&pDb, &rc); - } - - /* Check that the db content is still correct. */ - testDbContents(pDb, pData, p->nRow, 0, i-1, p->nTest, p->bTestScan, &rc); - - /* Update the progress dots... */ - testCaseProgress(i, p->nRow, testCaseNDot()/2, &iDot); - } - - i = 0; - iDot = 0; - while( rc==LSM_OK && inRow ){ - - /* Delete some entries */ - testDeleteDatasourceRange(pDb, pData, i, p->nVerify, &rc); - i += p->nVerify; - - /* Check that the db content is correct. */ - testDbContents(pDb, pData, p->nRow, i, p->nRow-1,p->nTest,p->bTestScan,&rc); - - /* Close and reopen the database. */ - if( bRecover ){ - testReopenRecover(&pDb, &rc); - }else{ - testReopen(&pDb, &rc); - } - - /* Check that the db content is still correct. */ - testDbContents(pDb, pData, p->nRow, i, p->nRow-1,p->nTest,p->bTestScan,&rc); - - /* Update the progress dots... */ - testCaseProgress(i, p->nRow, testCaseNDot()/2, &iDot); - } - - /* Free the datasource, close the database and finish the test case. */ - testDatasourceFree(pData); - tdb_close(pDb); - testCaseFinish(rc); - *pRc = rc; -} - - -void test_data_1( - const char *zSystem, /* Database system name */ - const char *zPattern, /* Run test cases that match this pattern */ - int *pRc /* IN/OUT: Error code */ -){ - Datatest1 aTest[] = { - { {DATA_RANDOM, 500,600, 1000,2000}, 1000, 100, 10, 0}, - { {DATA_RANDOM, 20,25, 100,200}, 1000, 250, 1000, 1}, - { {DATA_RANDOM, 8,10, 100,200}, 1000, 250, 1000, 1}, - { {DATA_RANDOM, 8,10, 10,20}, 1000, 250, 1000, 1}, - { {DATA_RANDOM, 8,10, 1000,2000}, 1000, 250, 1000, 1}, - { {DATA_RANDOM, 8,100, 10000,20000}, 100, 25, 100, 1}, - { {DATA_RANDOM, 80,100, 10,20}, 1000, 250, 1000, 1}, - { {DATA_RANDOM, 5000,6000, 10,20}, 100, 25, 100, 1}, - { {DATA_SEQUENTIAL, 5,10, 10,20}, 1000, 250, 1000, 1}, - { {DATA_SEQUENTIAL, 5,10, 100,200}, 1000, 250, 1000, 1}, - { {DATA_SEQUENTIAL, 5,10, 1000,2000}, 1000, 250, 1000, 1}, - { {DATA_SEQUENTIAL, 5,100, 10000,20000}, 100, 25, 100, 1}, - { {DATA_RANDOM, 10,10, 100,100}, 100000, 1000, 100, 0}, - { {DATA_SEQUENTIAL, 10,10, 100,100}, 100000, 1000, 100, 0}, - }; - - int i; - int bRecover; - - for(bRecover=0; bRecover<2; bRecover++){ - if( bRecover==1 && memcmp(zSystem, "lsm", 3) ) break; - for(i=0; *pRc==LSM_OK && idefn); - rc = testControlDb(&pControl); - - if( tdb_lsm(pDb) ){ - int nBuf = 32 * 1024 * 1024; - lsm_config(tdb_lsm(pDb), LSM_CONFIG_AUTOFLUSH, &nBuf); - } - - for(i=0; rc==0 && inIter; i++){ - void *pKey1; int nKey1; - void *pKey2; int nKey2; - int ii; - int nRange = MIN(p->nIter*p->nWrite, p->nRange); - - for(ii=0; rc==0 && iinWrite; ii++){ - int iKey = (i*p->nWrite + ii) % p->nRange; - testWriteDatasource(pControl, pData, iKey, &rc); - testWriteDatasource(pDb, pData, iKey, &rc); - } - - testDatasourceEntry(pData, i+1000000, &pKey1, &nKey1, 0, 0); - pKey1 = testMallocCopy(pKey1, nKey1); - testDatasourceEntry(pData, i+2000000, &pKey2, &nKey2, 0, 0); - - testDeleteRange(pDb, pKey1, nKey1, pKey2, nKey2, &rc); - testDeleteRange(pControl, pKey1, nKey1, pKey2, nKey2, &rc); - testFree(pKey1); - - testCompareDb(pData, nRange, i, pControl, pDb, &rc); - if( bRecover ){ - testReopenRecover(&pDb, &rc); - }else{ - testReopen(&pDb, &rc); - } - testCompareDb(pData, nRange, i, pControl, pDb, &rc); - - /* Update the progress dots... */ - testCaseProgress(i, p->nIter, testCaseNDot(), &iDot); - } - - testClose(&pDb); - testClose(&pControl); - testDatasourceFree(pData); - testCaseFinish(rc); - *pRc = rc; -} - -static char *getName2(const char *zSystem, int bRecover, Datatest2 *pTest){ - char *zRet; - char *zData; - zData = testDatasourceName(&pTest->defn); - zRet = testMallocPrintf("data2.%s.%s.rec=%d.%d.%d.%d", - zSystem, zData, bRecover, pTest->nRange, pTest->nWrite, pTest->nIter - ); - testFree(zData); - return zRet; -} - -void test_data_2( - const char *zSystem, /* Database system name */ - const char *zPattern, /* Run test cases that match this pattern */ - int *pRc /* IN/OUT: Error code */ -){ - Datatest2 aTest[] = { - /* defn, nRange, nWrite, nIter */ - { {DATA_RANDOM, 20,25, 100,200}, 10000, 10, 50 }, - { {DATA_RANDOM, 20,25, 100,200}, 10000, 200, 50 }, - { {DATA_RANDOM, 20,25, 100,200}, 100, 10, 1000 }, - { {DATA_RANDOM, 20,25, 100,200}, 100, 200, 50 }, - }; - - int i; - int bRecover; - - for(bRecover=0; bRecover<2; bRecover++){ - if( bRecover==1 && memcmp(zSystem, "lsm", 3) ) break; - for(i=0; *pRc==LSM_OK && i> 24) & 0xFF; - aBuf[1] = (iVal >> 16) & 0xFF; - aBuf[2] = (iVal >> 8) & 0xFF; - aBuf[3] = (iVal >> 0) & 0xFF; -} - -void dt3PutKey(u8 *aBuf, int iKey){ - assert( iKey<100000 && iKey>=0 ); - sprintf((char *)aBuf, "%.5d", iKey); -} - -static void doDataTest3( - const char *zSystem, /* Database system to test */ - Datatest3 *p, /* Structure containing test parameters */ - int *pRc /* OUT: Error code */ -){ - int iDot = 0; - int rc = *pRc; - TestDb *pDb; - u8 *abPresent; /* Array of boolean */ - char *aVal; /* Buffer to hold values */ - int i; - u32 iSeq = 10; /* prng counter */ - - abPresent = (u8 *)testMalloc(p->nRange+1); - aVal = (char *)testMalloc(p->nValMax+1); - pDb = testOpen(zSystem, 1, &rc); - - for(i=0; inIter && rc==0; i++){ - int ii; - - testCaseProgress(i, p->nIter, testCaseNDot(), &iDot); - - /* Perform nWrite inserts */ - for(ii=0; iinWrite; ii++){ - u8 aKey[6]; - u32 iKey; - int nVal; - - iKey = (testPrngValue(iSeq++) % p->nRange) + 1; - nVal = (testPrngValue(iSeq++) % (p->nValMax - p->nValMin)) + p->nValMin; - testPrngString(testPrngValue(iSeq++), aVal, nVal); - dt3PutKey(aKey, iKey); - - testWrite(pDb, aKey, sizeof(aKey)-1, aVal, nVal, &rc); - abPresent[iKey] = 1; - } - - /* Perform nDelete deletes */ - for(ii=0; iinDelete; ii++){ - u8 aKey1[6]; - u8 aKey2[6]; - u32 iKey; - - iKey = (testPrngValue(iSeq++) % p->nRange) + 1; - dt3PutKey(aKey1, iKey-1); - dt3PutKey(aKey2, iKey+1); - - testDeleteRange(pDb, aKey1, sizeof(aKey1)-1, aKey2, sizeof(aKey2)-1, &rc); - abPresent[iKey] = 0; - } - - testReopen(&pDb, &rc); - - for(ii=1; rc==0 && ii<=p->nRange; ii++){ - int nDbVal; - void *pDbVal; - u8 aKey[6]; - int dbrc; - - dt3PutKey(aKey, ii); - dbrc = tdb_fetch(pDb, aKey, sizeof(aKey)-1, &pDbVal, &nDbVal); - testCompareInt(0, dbrc, &rc); - - if( abPresent[ii] ){ - testCompareInt(1, (nDbVal>0), &rc); - }else{ - testCompareInt(1, (nDbVal<0), &rc); - } - } - } - - testClose(&pDb); - testCaseFinish(rc); - *pRc = rc; -} - -static char *getName3(const char *zSystem, Datatest3 *p){ - return testMallocPrintf("data3.%s.%d.%d.%d.%d.(%d..%d)", - zSystem, p->nRange, p->nIter, p->nWrite, p->nDelete, - p->nValMin, p->nValMax - ); -} - -void test_data_3( - const char *zSystem, /* Database system name */ - const char *zPattern, /* Run test cases that match this pattern */ - int *pRc /* IN/OUT: Error code */ -){ - Datatest3 aTest[] = { - /* nRange, nIter, nWrite, nDelete, nValMin, nValMax */ - { 100, 1000, 5, 5, 50, 100 }, - { 100, 1000, 2, 2, 5, 10 }, - }; - - int i; - - for(i=0; *pRc==LSM_OK && inRow++; - for(i=0; icksum1 += ((u8 *)pKey)[i]; - p->cksum2 += p->cksum1; - } - for(i=0; icksum1 += ((u8 *)pVal)[i]; - p->cksum2 += p->cksum1; - } -} - -/* -** tdb_scan() callback used by testCountDatabase() -*/ -static void scanCountDb( - void *pCtx, - void *pKey, int nKey, - void *pVal, int nVal -){ - Cksum *p = (Cksum *)pCtx; - p->nRow++; - - unused_parameter(pKey); - unused_parameter(nKey); - unused_parameter(pVal); - unused_parameter(nVal); -} - - -/* -** Iterate through the entire contents of database pDb. Write a checksum -** string based on the db contents into buffer zOut before returning. A -** checksum string is at most 29 (TEST_CKSUM_BYTES) bytes in size: -** -** * 32-bit integer (10 bytes) -** * 1 space (1 byte) -** * 32-bit hex (8 bytes) -** * 1 space (1 byte) -** * 32-bit hex (8 bytes) -** * nul-terminator (1 byte) -** -** The number of entries in the database is returned. -*/ -int testCksumDatabase( - TestDb *pDb, /* Database handle */ - char *zOut /* Buffer to write checksum to */ -){ - Cksum cksum; - memset(&cksum, 0, sizeof(Cksum)); - tdb_scan(pDb, (void *)&cksum, 0, 0, 0, 0, 0, scanCksumDb); - sprintf(zOut, "%d %x %x", - cksum.nRow, (u32)cksum.cksum1, (u32)cksum.cksum2 - ); - assert( strlen(zOut)0 ); */ - if( testrc==0 ) testrc = lsm_checkpoint(db, 0); - } - tdb_close(pDb); - - /* Check that the database content is still correct */ - testCompareCksumLsmdb(DBNAME, - bCompress, testCksumArrayGet(pCksumDb, nRow), 0, pRc); - } - - testCksumArrayFree(pCksumDb); - testDatasourceFree(pData); -} - -/* -** This test verifies that if a system crash occurs while committing a -** transaction to the log file, no earlier transactions are lost or damaged. -*/ -static void crash_test2(int bCompress, int *pRc){ - const char *DBNAME = "testdb.lsm"; - const DatasourceDefn defn = {TEST_DATASOURCE_RANDOM, 12, 16, 1000, 1000}; - - const int nIter = 200; - const int nInsert = 20; - - int i; - int iDot = 0; - Datasource *pData; - CksumDb *pCksumDb; - TestDb *pDb; - - /* Allocate datasource. And calculate the expected checksums. */ - pData = testDatasourceNew(&defn); - pCksumDb = testCksumArrayNew(pData, 100, 100+nInsert, 1); - - /* Setup and save the initial database. */ - testSetupSavedLsmdb("", DBNAME, pData, 100, pRc); - - for(i=0; izTest) ){ - p->x(p->bCompress, pRc); - testCaseFinish(*pRc); - } - } -} diff --git a/ext/lsm1/lsm-test/lsmtest3.c b/ext/lsm1/lsm-test/lsmtest3.c deleted file mode 100644 index 760dec300f..0000000000 --- a/ext/lsm1/lsm-test/lsmtest3.c +++ /dev/null @@ -1,238 +0,0 @@ - - -/* -** This file contains tests related to the explicit rollback of database -** transactions and sub-transactions. -*/ - - -/* -** Repeat 2000 times (until the db contains 100,000 entries): -** -** 1. Open a transaction and insert 500 rows, opening a nested -** sub-transaction each 100 rows. -** -** 2. Roll back to each sub-transaction savepoint. Check the database -** checksum looks Ok. -** -** 3. Every second iteration, roll back the main transaction. Check the -** db checksum is correct. Every other iteration, commit the main -** transaction (increasing the size of the db by 100 rows). -*/ - - -#include "lsmtest.h" - -struct CksumDb { - int nFirst; - int nLast; - int nStep; - char **azCksum; -}; - -CksumDb *testCksumArrayNew( - Datasource *pData, - int nFirst, - int nLast, - int nStep -){ - TestDb *pDb; - CksumDb *pRet; - int i; - int nEntry; - int rc = 0; - - assert( nLast>=nFirst && ((nLast-nFirst)%nStep)==0 ); - - pRet = malloc(sizeof(CksumDb)); - memset(pRet, 0, sizeof(CksumDb)); - pRet->nFirst = nFirst; - pRet->nLast = nLast; - pRet->nStep = nStep; - nEntry = 1 + ((nLast - nFirst) / nStep); - - /* Allocate space so that azCksum is an array of nEntry pointers to - ** buffers each TEST_CKSUM_BYTES in size. */ - pRet->azCksum = (char **)malloc(nEntry * (sizeof(char *) + TEST_CKSUM_BYTES)); - for(i=0; iazCksum[nEntry]); - pRet->azCksum[i] = &pStart[i * TEST_CKSUM_BYTES]; - } - - tdb_open("lsm", "tempdb.lsm", 1, &pDb); - testWriteDatasourceRange(pDb, pData, 0, nFirst, &rc); - for(i=0; iazCksum[i]); - if( i==nEntry ) break; - testWriteDatasourceRange(pDb, pData, nFirst+i*nStep, nStep, &rc); - } - - tdb_close(pDb); - - return pRet; -} - -char *testCksumArrayGet(CksumDb *p, int nRow){ - int i; - assert( nRow>=p->nFirst ); - assert( nRow<=p->nLast ); - assert( ((nRow-p->nFirst) % p->nStep)==0 ); - - i = (nRow - p->nFirst) / p->nStep; - return p->azCksum[i]; -} - -void testCksumArrayFree(CksumDb *p){ - free(p->azCksum); - memset(p, 0x55, sizeof(*p)); - free(p); -} - -/* End of CksumDb code. -**************************************************************************/ - -/* -** Test utility function. Write key-value pair $i from datasource pData -** into database pDb. -*/ -void testWriteDatasource(TestDb *pDb, Datasource *pData, int i, int *pRc){ - void *pKey; int nKey; - void *pVal; int nVal; - testDatasourceEntry(pData, i, &pKey, &nKey, &pVal, &nVal); - testWrite(pDb, pKey, nKey, pVal, nVal, pRc); -} - -/* -** Test utility function. Delete datasource pData key $i from database pDb. -*/ -void testDeleteDatasource(TestDb *pDb, Datasource *pData, int i, int *pRc){ - void *pKey; int nKey; - testDatasourceEntry(pData, i, &pKey, &nKey, 0, 0); - testDelete(pDb, pKey, nKey, pRc); -} - -/* -** This function inserts nWrite key/value pairs into database pDb - the -** nWrite key value pairs starting at iFirst from data source pData. -*/ -void testWriteDatasourceRange( - TestDb *pDb, /* Database to write to */ - Datasource *pData, /* Data source to read values from */ - int iFirst, /* Index of first key/value pair */ - int nWrite, /* Number of key/value pairs to write */ - int *pRc /* IN/OUT: Error code */ -){ - int i; - for(i=0; i2 && rc==0; iTrans--){ - tdb_rollback(pDb, iTrans); - nCurrent -= 100; - testCksumDatabase(pDb, zCksum); - testCompareStr(zCksum, testCksumArrayGet(pCksum, nCurrent), &rc); - } - - if( i%2 ){ - tdb_rollback(pDb, 0); - nCurrent -= 100; - testCksumDatabase(pDb, zCksum); - testCompareStr(zCksum, testCksumArrayGet(pCksum, nCurrent), &rc); - }else{ - tdb_commit(pDb, 0); - } - } - testCaseFinish(rc); - - skip_rollback_test: - tdb_close(pDb); - testCksumArrayFree(pCksum); - return rc; -} - -void test_rollback( - const char *zSystem, - const char *zPattern, - int *pRc -){ - if( *pRc==0 ){ - int bRun = 1; - - if( zPattern ){ - char *zName = getName(zSystem); - bRun = testGlobMatch(zPattern, zName); - testFree(zName); - } - - if( bRun ){ - DatasourceDefn defn = { TEST_DATASOURCE_RANDOM, 10, 15, 50, 100 }; - Datasource *pData = testDatasourceNew(&defn); - *pRc = rollback_test_1(zSystem, pData); - testDatasourceFree(pData); - } - } -} diff --git a/ext/lsm1/lsm-test/lsmtest4.c b/ext/lsm1/lsm-test/lsmtest4.c deleted file mode 100644 index a47241db92..0000000000 --- a/ext/lsm1/lsm-test/lsmtest4.c +++ /dev/null @@ -1,127 +0,0 @@ - -/* -** This file contains test cases involving multiple database clients. -*/ - -#include "lsmtest.h" - -/* -** The following code implements test cases "mc1.*". -** -** This test case uses one writer and $nReader readers. All connections -** are driven by a single thread. All connections are opened at the start -** of the test and remain open until the test is finished. -** -** The test consists of $nStep steps. Each step the following is performed: -** -** 1. The writer inserts $nWriteStep records into the db. -** -** 2. The writer checks that the contents of the db are as expected. -** -** 3. Each reader that currently has an open read transaction also checks -** that the contents of the db are as expected (according to the snapshot -** the read transaction is reading - see below). -** -** After step 1, reader 1 opens a read transaction. After step 2, reader -** 2 opens a read transaction, and so on. At step ($nReader+1), reader 1 -** closes the current read transaction and opens a new one. And so on. -** The result is that at step N (for N > $nReader), there exists a reader -** with an open read transaction reading the snapshot committed following -** steps (N-$nReader-1) to N. -*/ -typedef struct Mctest Mctest; -struct Mctest { - DatasourceDefn defn; /* Datasource to use */ - int nStep; /* Total number of steps in test */ - int nWriteStep; /* Number of rows to insert each step */ - int nReader; /* Number of read connections */ -}; -static void do_mc_test( - const char *zSystem, /* Database system to test */ - Mctest *pTest, - int *pRc /* IN/OUT: return code */ -){ - const int nDomain = pTest->nStep * pTest->nWriteStep; - Datasource *pData; /* Source of data */ - TestDb *pDb; /* First database connection (writer) */ - int iReader; /* Used to iterate through aReader */ - int iStep; /* Current step in test */ - int iDot = 0; /* Current step in test */ - - /* Array of reader connections */ - struct Reader { - TestDb *pDb; /* Connection handle */ - int iLast; /* Current snapshot contains keys 0..iLast */ - } *aReader; - - /* Create a data source */ - pData = testDatasourceNew(&pTest->defn); - - /* Open the writer connection */ - pDb = testOpen(zSystem, 1, pRc); - - /* Allocate aReader */ - aReader = (struct Reader *)testMalloc(sizeof(aReader[0]) * pTest->nReader); - for(iReader=0; iReadernReader; iReader++){ - aReader[iReader].pDb = testOpen(zSystem, 0, pRc); - } - - for(iStep=0; iStepnStep; iStep++){ - int iLast; - int iBegin; /* Start read trans using aReader[iBegin] */ - - /* Insert nWriteStep more records into the database */ - int iFirst = iStep*pTest->nWriteStep; - testWriteDatasourceRange(pDb, pData, iFirst, pTest->nWriteStep, pRc); - - /* Check that the db is Ok according to the writer */ - iLast = (iStep+1) * pTest->nWriteStep - 1; - testDbContents(pDb, pData, nDomain, 0, iLast, iLast, 1, pRc); - - /* Have reader (iStep % nReader) open a read transaction here. */ - iBegin = (iStep % pTest->nReader); - if( iBeginnReader && aReader[iReader].iLast; iReader++){ - iLast = aReader[iReader].iLast; - testDbContents( - aReader[iReader].pDb, pData, nDomain, 0, iLast, iLast, 1, pRc - ); - } - - /* Report progress */ - testCaseProgress(iStep, pTest->nStep, testCaseNDot(), &iDot); - } - - /* Close all readers */ - for(iReader=0; iReadernReader; iReader++){ - testClose(&aReader[iReader].pDb); - } - testFree(aReader); - - /* Close the writer-connection and free the datasource */ - testClose(&pDb); - testDatasourceFree(pData); -} - - -void test_mc( - const char *zSystem, /* Database system name */ - const char *zPattern, /* Run test cases that match this pattern */ - int *pRc /* IN/OUT: Error code */ -){ - int i; - Mctest aTest[] = { - { { TEST_DATASOURCE_RANDOM, 10,10, 100,100 }, 100, 10, 5 }, - }; - - for(i=0; i "k.0000000045". -** -** As well as the key/value pairs, the database also contains checksum -** entries. The checksums form a hierarchy - for every F key/value -** entries there is one level 1 checksum. And for each F level 1 checksums -** there is one level 2 checksum. And so on. -** -** Checksum keys are encoded as the two byte "c." followed by the -** checksum level, followed by a 10 digit decimal number containing -** the value of the first key that contributes to the checksum value. -** For example, assuming F==10, the level 1 checksum that spans keys -** 10 to 19 is "c.1.0000000010". -** -** Clients may perform one of two operations on the database: a read -** or a write. -** -** READ OPERATIONS: -** -** A read operation scans a range of F key/value pairs. It computes -** the expected checksum and then compares the computed value to the -** actual value stored in the level 1 checksum entry. It then scans -** the group of F level 1 checksums, and compares the computed checksum -** to the associated level 2 checksum value, and so on until the -** highest level checksum value has been verified. -** -** If a checksum ever fails to match the expected value, the test -** has failed. -** -** WRITE OPERATIONS: -** -** A write operation involves writing (possibly clobbering) a single -** key/value pair. The associated level 1 checksum is then recalculated -** updated. Then the level 2 checksum, and so on until the highest -** level checksum has been modified. -** -** All updates occur inside a single transaction. -** -** INTERFACE: -** -** The interface used by test cases to read and write the db consists -** of type DbParameters and the following functions: -** -** dbReadOperation() -** dbWriteOperation() -*/ - -#include "lsmtest.h" - -typedef struct DbParameters DbParameters; -struct DbParameters { - int nFanout; /* Checksum fanout (F) */ - int nKey; /* Size of key space (N) */ -}; - -#define DB_KEY_BYTES (2+5+10+1) - -/* -** Argument aBuf[] must point to a buffer at least DB_KEY_BYTES in size. -** This function populates the buffer with a nul-terminated key string -** corresponding to key iKey. -*/ -static void dbFormatKey( - DbParameters *pParam, - int iLevel, - int iKey, /* Key value */ - char *aBuf /* Write key string here */ -){ - if( iLevel==0 ){ - snprintf(aBuf, DB_KEY_BYTES, "k.%.10d", iKey); - }else{ - int f = 1; - int i; - for(i=0; inFanout; - snprintf(aBuf, DB_KEY_BYTES, "c.%d.%.10d", iLevel, f*(iKey/f)); - } -} - -/* -** Argument aBuf[] must point to a buffer at least DB_KEY_BYTES in size. -** This function populates the buffer with the string representation of -** checksum value iVal. -*/ -static void dbFormatCksumValue(u32 iVal, char *aBuf){ - snprintf(aBuf, DB_KEY_BYTES, "%.10u", iVal); -} - -/* -** Return the highest level of checksum in the database described -** by *pParam. -*/ -static int dbMaxLevel(DbParameters *pParam){ - int iMax; - int n = 1; - for(iMax=0; nnKey; iMax++){ - n = n * pParam->nFanout; - } - return iMax; -} - -static void dbCksum( - void *pCtx, /* IN/OUT: Pointer to u32 containing cksum */ - void *pKey, int nKey, /* Database key. Unused. */ - void *pVal, int nVal /* Database value. Checksum this. */ -){ - u8 *aVal = (u8 *)pVal; - u32 *pCksum = (u32 *)pCtx; - u32 cksum = *pCksum; - int i; - - unused_parameter(pKey); - unused_parameter(nKey); - - for(i=0; inFanout entries at level -** iLevel-1. -*/ -static u32 dbComputeCksum( - DbParameters *pParam, /* Database parameters */ - TestDb *pDb, /* Database connection handle */ - int iLevel, /* Level of checksum to compute */ - int iKey, /* Compute checksum for this key */ - int *pRc /* IN/OUT: Error code */ -){ - u32 cksum = 0; - if( *pRc==0 ){ - int nFirst; - int nLast; - int iFirst = 0; - int iLast = 0; - int i; - int f = 1; - char zFirst[DB_KEY_BYTES]; - char zLast[DB_KEY_BYTES]; - - assert( iLevel>=1 ); - for(i=0; inFanout; - - iFirst = f*(iKey/f); - iLast = iFirst + f - 1; - dbFormatKey(pParam, iLevel-1, iFirst, zFirst); - dbFormatKey(pParam, iLevel-1, iLast, zLast); - nFirst = strlen(zFirst); - nLast = strlen(zLast); - - *pRc = tdb_scan(pDb, (u32*)&cksum, 0, zFirst, nFirst, zLast, nLast,dbCksum); - } - - return cksum; -} - -static void dbReadOperation( - DbParameters *pParam, /* Database parameters */ - TestDb *pDb, /* Database connection handle */ - void (*xDelay)(void *), - void *pDelayCtx, - int iKey, /* Key to read */ - int *pRc /* IN/OUT: Error code */ -){ - const int iMax = dbMaxLevel(pParam); - int i; - - if( tdb_transaction_support(pDb) ) testBegin(pDb, 1, pRc); - for(i=1; *pRc==0 && i<=iMax; i++){ - char zCksum[DB_KEY_BYTES]; - char zKey[DB_KEY_BYTES]; - u32 iCksum = 0; - - iCksum = dbComputeCksum(pParam, pDb, i, iKey, pRc); - if( iCksum ){ - if( xDelay && i==1 ) xDelay(pDelayCtx); - dbFormatCksumValue(iCksum, zCksum); - dbFormatKey(pParam, i, iKey, zKey); - testFetchStr(pDb, zKey, zCksum, pRc); - } - } - if( tdb_transaction_support(pDb) ) testCommit(pDb, 0, pRc); -} - -static int dbWriteOperation( - DbParameters *pParam, /* Database parameters */ - TestDb *pDb, /* Database connection handle */ - int iKey, /* Key to write to */ - const char *zValue, /* Nul-terminated value to write */ - int *pRc /* IN/OUT: Error code */ -){ - const int iMax = dbMaxLevel(pParam); - char zKey[DB_KEY_BYTES]; - int i; - int rc; - - assert( iKey>=0 && iKeynKey ); - dbFormatKey(pParam, 0, iKey, zKey); - - /* Open a write transaction. This may fail - SQLITE4_BUSY */ - if( *pRc==0 && tdb_transaction_support(pDb) ){ - rc = tdb_begin(pDb, 2); - if( rc==5 ) return 0; - *pRc = rc; - } - - testWriteStr(pDb, zKey, zValue, pRc); - for(i=1; i<=iMax; i++){ - char zCksum[DB_KEY_BYTES]; - u32 iCksum = 0; - - iCksum = dbComputeCksum(pParam, pDb, i, iKey, pRc); - dbFormatCksumValue(iCksum, zCksum); - dbFormatKey(pParam, i, iKey, zKey); - testWriteStr(pDb, zKey, zCksum, pRc); - } - if( tdb_transaction_support(pDb) ) testCommit(pDb, 0, pRc); - return 1; -} - -/************************************************************************* -** The following block contains testXXX() functions that implement a -** wrapper around the systems native multi-thread support. There are no -** synchronization primitives - just functions to launch and join -** threads. Wrapper functions are: -** -** testThreadSupport() -** -** testThreadInit() -** testThreadShutdown() -** testThreadLaunch() -** testThreadWait() -** -** testThreadSetHalt() -** testThreadGetHalt() -** testThreadSetResult() -** testThreadGetResult() -** -** testThreadEnterMutex() -** testThreadLeaveMutex() -*/ -typedef struct ThreadSet ThreadSet; -#ifdef LSM_MUTEX_PTHREADS - -#include -#include - -typedef struct Thread Thread; -struct Thread { - int rc; - char *zMsg; - pthread_t id; - void (*xMain)(ThreadSet *, int, void *); - void *pCtx; - ThreadSet *pThreadSet; -}; - -struct ThreadSet { - int bHalt; /* Halt flag */ - int nThread; /* Number of threads */ - Thread *aThread; /* Array of Thread structures */ - pthread_mutex_t mutex; /* Mutex used for cheating */ -}; - -/* -** Return true if this build supports threads, or false otherwise. If -** this function returns false, no other testThreadXXX() functions should -** be called. -*/ -static int testThreadSupport(){ return 1; } - -/* -** Allocate and return a thread-set handle with enough space allocated -** to handle up to nMax threads. Each call to this function should be -** matched by a call to testThreadShutdown() to delete the object. -*/ -static ThreadSet *testThreadInit(int nMax){ - int nByte; /* Total space to allocate */ - ThreadSet *p; /* Return value */ - - nByte = sizeof(ThreadSet) + sizeof(struct Thread) * nMax; - p = (ThreadSet *)testMalloc(nByte); - p->nThread = nMax; - p->aThread = (Thread *)&p[1]; - pthread_mutex_init(&p->mutex, 0); - - return p; -} - -/* -** Delete a thread-set object and release all resources held by it. -*/ -static void testThreadShutdown(ThreadSet *p){ - int i; - for(i=0; inThread; i++){ - testFree(p->aThread[i].zMsg); - } - pthread_mutex_destroy(&p->mutex); - testFree(p); -} - -static void *ttMain(void *pArg){ - Thread *pThread = (Thread *)pArg; - int iThread; - iThread = (pThread - pThread->pThreadSet->aThread); - pThread->xMain(pThread->pThreadSet, iThread, pThread->pCtx); - return 0; -} - -/* -** Launch a new thread. -*/ -static int testThreadLaunch( - ThreadSet *p, - int iThread, - void (*xMain)(ThreadSet *, int, void *), - void *pCtx -){ - int rc; - Thread *pThread; - - assert( iThread>=0 && iThreadnThread ); - - pThread = &p->aThread[iThread]; - assert( pThread->pThreadSet==0 ); - pThread->xMain = xMain; - pThread->pCtx = pCtx; - pThread->pThreadSet = p; - rc = pthread_create(&pThread->id, 0, ttMain, (void *)pThread); - - return rc; -} - -/* -** Set the thread-set "halt" flag. -*/ -static void testThreadSetHalt(ThreadSet *pThreadSet){ - pThreadSet->bHalt = 1; -} - -/* -** Return the current value of the thread-set "halt" flag. -*/ -static int testThreadGetHalt(ThreadSet *pThreadSet){ - return pThreadSet->bHalt; -} - -static void testThreadSleep(ThreadSet *pThreadSet, int nMs){ - int nRem = nMs; - while( nRem>0 && testThreadGetHalt(pThreadSet)==0 ){ - usleep(50000); - nRem -= 50; - } -} - -/* -** Wait for all threads launched to finish before returning. If nMs -** is greater than zero, set the "halt" flag to tell all threads -** to halt after waiting nMs milliseconds. -*/ -static void testThreadWait(ThreadSet *pThreadSet, int nMs){ - int i; - - testThreadSleep(pThreadSet, nMs); - testThreadSetHalt(pThreadSet); - for(i=0; inThread; i++){ - Thread *pThread = &pThreadSet->aThread[i]; - if( pThread->xMain ){ - pthread_join(pThread->id, 0); - } - } -} - -/* -** Set the result for thread iThread. -*/ -static void testThreadSetResult( - ThreadSet *pThreadSet, /* Thread-set handle */ - int iThread, /* Set result for this thread */ - int rc, /* Result error code */ - char *zFmt, /* Result string format */ - ... /* Result string formatting args... */ -){ - va_list ap; - - testFree(pThreadSet->aThread[iThread].zMsg); - pThreadSet->aThread[iThread].rc = rc; - pThreadSet->aThread[iThread].zMsg = 0; - if( zFmt ){ - va_start(ap, zFmt); - pThreadSet->aThread[iThread].zMsg = testMallocVPrintf(zFmt, ap); - va_end(ap); - } -} - -/* -** Retrieve the result for thread iThread. -*/ -static int testThreadGetResult( - ThreadSet *pThreadSet, /* Thread-set handle */ - int iThread, /* Get result for this thread */ - const char **pzRes /* OUT: Pointer to result string */ -){ - if( pzRes ) *pzRes = pThreadSet->aThread[iThread].zMsg; - return pThreadSet->aThread[iThread].rc; -} - -/* -** Enter and leave the test case mutex. -*/ -#if 0 -static void testThreadEnterMutex(ThreadSet *p){ - pthread_mutex_lock(&p->mutex); -} -static void testThreadLeaveMutex(ThreadSet *p){ - pthread_mutex_unlock(&p->mutex); -} -#endif -#endif - -#if !defined(LSM_MUTEX_PTHREADS) -static int testThreadSupport(){ return 0; } - -#define testThreadInit(a) 0 -#define testThreadShutdown(a) -#define testThreadLaunch(a,b,c,d) 0 -#define testThreadWait(a,b) -#define testThreadSetHalt(a) -#define testThreadGetHalt(a) 0 -#define testThreadGetResult(a,b,c) 0 -#define testThreadSleep(a,b) 0 - -static void testThreadSetResult(ThreadSet *a, int b, int c, char *d, ...){ - unused_parameter(a); - unused_parameter(b); - unused_parameter(c); - unused_parameter(d); -} -#endif -/* End of threads wrapper. -*************************************************************************/ - -/************************************************************************* -** Below this point is the third part of this file - the implementation -** of the mt1.* tests. -*/ -typedef struct Mt1Test Mt1Test; -struct Mt1Test { - DbParameters param; /* Description of database to read/write */ - int nReadwrite; /* Number of read/write threads */ - int nFastReader; /* Number of fast reader threads */ - int nSlowReader; /* Number of slow reader threads */ - int nMs; /* How long to run for */ - const char *zSystem; /* Database system to test */ -}; - -typedef struct Mt1DelayCtx Mt1DelayCtx; -struct Mt1DelayCtx { - ThreadSet *pSet; /* Threadset to sleep within */ - int nMs; /* Sleep in ms */ -}; - -static void xMt1Delay(void *pCtx){ - Mt1DelayCtx *p = (Mt1DelayCtx *)pCtx; - testThreadSleep(p->pSet, p->nMs); -} - -#define MT1_THREAD_RDWR 0 -#define MT1_THREAD_SLOW 1 -#define MT1_THREAD_FAST 2 - -static void xMt1Work(lsm_db *pDb, void *pCtx){ -#if 0 - char *z = 0; - lsm_info(pDb, LSM_INFO_DB_STRUCTURE, &z); - printf("%s\n", z); - fflush(stdout); -#endif -} - -/* -** This is the main() proc for all threads in test case "mt1". -*/ -static void mt1Main(ThreadSet *pThreadSet, int iThread, void *pCtx){ - Mt1Test *p = (Mt1Test *)pCtx; /* Test parameters */ - Mt1DelayCtx delay; - int nRead = 0; /* Number of calls to dbReadOperation() */ - int nWrite = 0; /* Number of completed database writes */ - int rc = 0; /* Error code */ - int iPrng; /* Prng argument variable */ - TestDb *pDb; /* Database handle */ - int eType; - - delay.pSet = pThreadSet; - delay.nMs = 0; - if( iThreadnReadwrite ){ - eType = MT1_THREAD_RDWR; - }else if( iThread<(p->nReadwrite+p->nFastReader) ){ - eType = MT1_THREAD_FAST; - }else{ - eType = MT1_THREAD_SLOW; - delay.nMs = (p->nMs / 20); - } - - /* Open a new database connection. Initialize the pseudo-random number - ** argument based on the thread number. */ - iPrng = testPrngValue(iThread); - pDb = testOpen(p->zSystem, 0, &rc); - - if( rc==0 ){ - tdb_lsm_config_work_hook(pDb, xMt1Work, 0); - } - - /* Loop until either an error occurs or some other thread sets the - ** halt flag. */ - while( rc==0 && testThreadGetHalt(pThreadSet)==0 ){ - int iKey; - - /* Perform a read operation on an arbitrarily selected key. */ - iKey = (testPrngValue(iPrng++) % p->param.nKey); - dbReadOperation(&p->param, pDb, xMt1Delay, (void *)&delay, iKey, &rc); - if( rc ) continue; - nRead++; - - /* Attempt to write an arbitrary key value pair (and update the associated - ** checksum entries). dbWriteOperation() returns 1 if the write is - ** successful, or 0 if it failed with an LSM_BUSY error. */ - if( eType==MT1_THREAD_RDWR ){ - char aValue[50]; - char aRnd[25]; - - iKey = (testPrngValue(iPrng++) % p->param.nKey); - testPrngString(iPrng, aRnd, sizeof(aRnd)); - iPrng += sizeof(aRnd); - snprintf(aValue, sizeof(aValue), "%d.%s", iThread, aRnd); - nWrite += dbWriteOperation(&p->param, pDb, iKey, aValue, &rc); - } - } - testClose(&pDb); - - /* If an error has occured, set the thread error code and the threadset - ** halt flag to tell the other test threads to halt. Otherwise, set the - ** thread error code to 0 and post a message with the number of read - ** and write operations completed. */ - if( rc ){ - testThreadSetResult(pThreadSet, iThread, rc, 0); - testThreadSetHalt(pThreadSet); - }else{ - testThreadSetResult(pThreadSet, iThread, 0, "r/w: %d/%d", nRead, nWrite); - } -} - -static void do_test_mt1( - const char *zSystem, /* Database system name */ - const char *zPattern, /* Run test cases that match this pattern */ - int *pRc /* IN/OUT: Error code */ -){ - Mt1Test aTest[] = { - /* param, nReadwrite, nFastReader, nSlowReader, nMs, zSystem */ - { {10, 1000}, 4, 0, 0, 10000, 0 }, - { {10, 1000}, 4, 4, 2, 100000, 0 }, - { {10, 100000}, 4, 0, 0, 10000, 0 }, - { {10, 100000}, 4, 4, 2, 100000, 0 }, - }; - int i; - - for(i=0; *pRc==0 && iparam.nFanout, p->param.nKey, - p->nMs, p->nReadwrite, p->nFastReader, p->nSlowReader - ); - if( bRun ){ - TestDb *pDb; - ThreadSet *pSet; - int iThread; - int nThread; - - p->zSystem = zSystem; - pDb = testOpen(zSystem, 1, pRc); - - nThread = p->nReadwrite + p->nFastReader + p->nSlowReader; - pSet = testThreadInit(nThread); - for(iThread=0; *pRc==0 && iThreadnMs); - for(iThread=0; *pRc==0 && iThreadiNext = 1; - p->bEnable = 1; - p->nFail = 1; - p->pEnv = tdb_lsm_env(); -} - -static void xOomHook(OomTest *p){ - p->nFail++; -} - -static int testOomContinue(OomTest *p){ - if( p->rc!=0 || (p->iNext>1 && p->nFail==0) ){ - return 0; - } - p->nFail = 0; - testMallocOom(p->pEnv, p->iNext, 0, (void (*)(void*))xOomHook, (void *)p); - return 1; -} - -static void testOomEnable(OomTest *p, int bEnable){ - p->bEnable = bEnable; - testMallocOomEnable(p->pEnv, bEnable); -} - -static void testOomNext(OomTest *p){ - p->iNext++; -} - -static int testOomHit(OomTest *p){ - return (p->nFail>0); -} - -static int testOomFinish(OomTest *p){ - return p->rc; -} - -static void testOomAssert(OomTest *p, int bVal){ - if( bVal==0 ){ - test_failed(); - p->rc = 1; - } -} - -/* -** Test that the error code matches the state of the OomTest object passed -** as the first argument. Specifically, check that rc is LSM_NOMEM if an -** OOM error has already been injected, or LSM_OK if not. -*/ -static void testOomAssertRc(OomTest *p, int rc){ - testOomAssert(p, rc==LSM_OK || rc==LSM_NOMEM); - testOomAssert(p, testOomHit(p)==(rc==LSM_NOMEM) || p->bEnable==0 ); -} - -static void testOomOpen( - OomTest *pOom, - const char *zName, - lsm_db **ppDb, - int *pRc -){ - if( *pRc==LSM_OK ){ - int rc; - rc = lsm_new(tdb_lsm_env(), ppDb); - if( rc==LSM_OK ) rc = lsm_open(*ppDb, zName); - testOomAssertRc(pOom, rc); - *pRc = rc; - } -} - -static void testOomFetch( - OomTest *pOom, - lsm_db *pDb, - void *pKey, int nKey, - void *pVal, int nVal, - int *pRc -){ - testOomAssertRc(pOom, *pRc); - if( *pRc==LSM_OK ){ - lsm_cursor *pCsr; - int rc; - - rc = lsm_csr_open(pDb, &pCsr); - if( rc==LSM_OK ) rc = lsm_csr_seek(pCsr, pKey, nKey, 0); - testOomAssertRc(pOom, rc); - - if( rc==LSM_OK ){ - const void *p; int n; - testOomAssert(pOom, lsm_csr_valid(pCsr)); - - rc = lsm_csr_key(pCsr, &p, &n); - testOomAssertRc(pOom, rc); - testOomAssert(pOom, rc!=LSM_OK || (n==nKey && memcmp(pKey, p, nKey)==0) ); - } - - if( rc==LSM_OK ){ - const void *p; int n; - testOomAssert(pOom, lsm_csr_valid(pCsr)); - - rc = lsm_csr_value(pCsr, &p, &n); - testOomAssertRc(pOom, rc); - testOomAssert(pOom, rc!=LSM_OK || (n==nVal && memcmp(pVal, p, nVal)==0) ); - } - - lsm_csr_close(pCsr); - *pRc = rc; - } -} - -static void testOomWrite( - OomTest *pOom, - lsm_db *pDb, - void *pKey, int nKey, - void *pVal, int nVal, - int *pRc -){ - testOomAssertRc(pOom, *pRc); - if( *pRc==LSM_OK ){ - int rc; - - rc = lsm_insert(pDb, pKey, nKey, pVal, nVal); - testOomAssertRc(pOom, rc); - - *pRc = rc; - } -} - - -static void testOomFetchStr( - OomTest *pOom, - lsm_db *pDb, - const char *zKey, - const char *zVal, - int *pRc -){ - int nKey = strlen(zKey); - int nVal = strlen(zVal); - testOomFetch(pOom, pDb, (void *)zKey, nKey, (void *)zVal, nVal, pRc); -} - -static void testOomFetchData( - OomTest *pOom, - lsm_db *pDb, - Datasource *pData, - int iKey, - int *pRc -){ - void *pKey; int nKey; - void *pVal; int nVal; - testDatasourceEntry(pData, iKey, &pKey, &nKey, &pVal, &nVal); - testOomFetch(pOom, pDb, pKey, nKey, pVal, nVal, pRc); -} - -static void testOomWriteStr( - OomTest *pOom, - lsm_db *pDb, - const char *zKey, - const char *zVal, - int *pRc -){ - int nKey = strlen(zKey); - int nVal = strlen(zVal); - testOomWrite(pOom, pDb, (void *)zKey, nKey, (void *)zVal, nVal, pRc); -} - -static void testOomWriteData( - OomTest *pOom, - lsm_db *pDb, - Datasource *pData, - int iKey, - int *pRc -){ - void *pKey; int nKey; - void *pVal; int nVal; - testDatasourceEntry(pData, iKey, &pKey, &nKey, &pVal, &nVal); - testOomWrite(pOom, pDb, pKey, nKey, pVal, nVal, pRc); -} - -static void testOomScan( - OomTest *pOom, - lsm_db *pDb, - int bReverse, - const void *pKey, int nKey, - int nScan, - int *pRc -){ - if( *pRc==0 ){ - int rc; - int iScan = 0; - lsm_cursor *pCsr; - int (*xAdvance)(lsm_cursor *) = 0; - - - rc = lsm_csr_open(pDb, &pCsr); - testOomAssertRc(pOom, rc); - - if( rc==LSM_OK ){ - if( bReverse ){ - rc = lsm_csr_seek(pCsr, pKey, nKey, LSM_SEEK_LE); - xAdvance = lsm_csr_prev; - }else{ - rc = lsm_csr_seek(pCsr, pKey, nKey, LSM_SEEK_GE); - xAdvance = lsm_csr_next; - } - } - testOomAssertRc(pOom, rc); - - while( rc==LSM_OK && lsm_csr_valid(pCsr) && iScan "one" -** "two" -> "four" -** "three" -> "nine" -** "four" -> "sixteen" -** "five" -> "twentyfive" -** "six" -> "thirtysix" -** "seven" -> "fourtynine" -** "eight" -> "sixtyfour" -*/ -static void setup_populate_db(void){ - const char *azStr[] = { - "one", "one", - "two", "four", - "three", "nine", - "four", "sixteen", - "five", "twentyfive", - "six", "thirtysix", - "seven", "fourtynine", - "eight", "sixtyfour", - }; - int rc; - int ii; - lsm_db *pDb; - - testDeleteLsmdb(LSMTEST6_TESTDB); - - rc = lsm_new(tdb_lsm_env(), &pDb); - if( rc==LSM_OK ) rc = lsm_open(pDb, LSMTEST6_TESTDB); - - for(ii=0; rc==LSM_OK && iiiInsStart, pStep->nIns, pRc); - testDeleteDatasourceRange(pDb, pData, pStep->iDelStart, pStep->nDel, pRc); - if( *pRc==0 ){ - int nSave = -1; - int nBuf = 64; - lsm_db *db = tdb_lsm(pDb); - - lsm_config(db, LSM_CONFIG_AUTOFLUSH, &nSave); - lsm_config(db, LSM_CONFIG_AUTOFLUSH, &nBuf); - lsm_begin(db, 1); - lsm_commit(db, 0); - lsm_config(db, LSM_CONFIG_AUTOFLUSH, &nSave); - - *pRc = lsm_work(db, 0, 0, 0); - if( *pRc==0 ){ - *pRc = lsm_checkpoint(db, 0); - } - } -} - -static void doSetupStepArray( - TestDb *pDb, - Datasource *pData, - const SetupStep *aStep, - int nStep -){ - int i; - for(i=0; i -void testReadFile(const char *zFile, int iOff, void *pOut, int nByte, int *pRc){ - if( *pRc==0 ){ - FILE *fd; - fd = fopen(zFile, "rb"); - if( fd==0 ){ - *pRc = 1; - }else{ - if( 0!=fseek(fd, iOff, SEEK_SET) ){ - *pRc = 1; - }else{ - assert( nByte>=0 ); - if( (size_t)nByte!=fread(pOut, 1, nByte, fd) ){ - *pRc = 1; - } - } - fclose(fd); - } - } -} - -void testWriteFile( - const char *zFile, - int iOff, - void *pOut, - int nByte, - int *pRc -){ - if( *pRc==0 ){ - FILE *fd; - fd = fopen(zFile, "r+b"); - if( fd==0 ){ - *pRc = 1; - }else{ - if( 0!=fseek(fd, iOff, SEEK_SET) ){ - *pRc = 1; - }else{ - assert( nByte>=0 ); - if( (size_t)nByte!=fwrite(pOut, 1, nByte, fd) ){ - *pRc = 1; - } - } - fclose(fd); - } - } -} - -static ShmHeader *getShmHeader(const char *zDb){ - int rc = 0; - char *zShm = testMallocPrintf("%s-shm", zDb); - ShmHeader *pHdr; - - pHdr = testMalloc(sizeof(ShmHeader)); - testReadFile(zShm, 0, (void *)pHdr, sizeof(ShmHeader), &rc); - assert( rc==0 ); - - return pHdr; -} - -/* -** This function makes a copy of the three files associated with LSM -** database zDb (i.e. if zDb is "test.db", it makes copies of "test.db", -** "test.db-log" and "test.db-shm"). -** -** It then opens a new database connection to the copy with the xLock() call -** instrumented so that it appears that some other process already connected -** to the db (holding a shared lock on DMS2). This prevents recovery from -** running. Then: -** -** 1) Check that the checksum of the database is zCksum. -** 2) Write a few keys to the database. Then delete the same keys. -** 3) Check that the checksum is zCksum. -** 4) Flush the db to disk and run a checkpoint. -** 5) Check once more that the checksum is still zCksum. -*/ -static void doLiveRecovery(const char *zDb, const char *zCksum, int *pRc){ - if( *pRc==LSM_OK ){ - const DatasourceDefn defn = {TEST_DATASOURCE_RANDOM, 20, 25, 100, 500}; - Datasource *pData; - const char *zCopy = "testcopy.lsm"; - char zCksum2[TEST_CKSUM_BYTES]; - TestDb *pDb = 0; - int rc; - - pData = testDatasourceNew(&defn); - - testCopyLsmdb(zDb, zCopy); - rc = tdb_lsm_open("test_no_recovery=1", zCopy, 0, &pDb); - if( rc==0 ){ - ShmHeader *pHdr; - lsm_db *db; - testCksumDatabase(pDb, zCksum2); - testCompareStr(zCksum, zCksum2, &rc); - - testWriteDatasourceRange(pDb, pData, 1, 10, &rc); - testDeleteDatasourceRange(pDb, pData, 1, 10, &rc); - - /* Test that the two tree-headers are now consistent. */ - pHdr = getShmHeader(zCopy); - if( rc==0 && memcmp(&pHdr->hdr1, &pHdr->hdr2, sizeof(pHdr->hdr1)) ){ - rc = 1; - } - testFree(pHdr); - - if( rc==0 ){ - int nBuf = 64; - db = tdb_lsm(pDb); - lsm_config(db, LSM_CONFIG_AUTOFLUSH, &nBuf); - lsm_begin(db, 1); - lsm_commit(db, 0); - rc = lsm_work(db, 0, 0, 0); - } - - testCksumDatabase(pDb, zCksum2); - testCompareStr(zCksum, zCksum2, &rc); - } - - testDatasourceFree(pData); - testClose(&pDb); - testDeleteLsmdb(zCopy); - *pRc = rc; - } -} - -static void doWriterCrash1(int *pRc){ - const int nWrite = 2000; - const int nStep = 10; - const int iWriteStart = 20000; - int rc = 0; - TestDb *pDb = 0; - Datasource *pData = 0; - - rc = tdb_lsm_open("autowork=0", "testdb.lsm", 1, &pDb); - if( rc==0 ){ - int iDot = 0; - char zCksum[TEST_CKSUM_BYTES]; - int i; - setupDatabase1(pDb, &pData); - testCksumDatabase(pDb, zCksum); - testBegin(pDb, 2, &rc); - for(i=0; rc==0 && ihdr1, &pHdr1->hdr1, sizeof(pHdr1->hdr1)); - pHdr2->bWriter = 1; - testWriteFile("testdb.lsm-shm", 0, (void *)pHdr2, sizeof(ShmHeader), &rc); - doLiveRecovery("testdb.lsm", zCksum1, &rc); - - /* If both tree-headers are valid, tree-header-1 is used. */ - memcpy(&pHdr2->hdr1, &pHdr2->hdr2, sizeof(pHdr1->hdr1)); - memcpy(&pHdr2->hdr2, &pHdr1->hdr1, sizeof(pHdr1->hdr1)); - pHdr2->bWriter = 1; - testWriteFile("testdb.lsm-shm", 0, (void *)pHdr2, sizeof(ShmHeader), &rc); - doLiveRecovery("testdb.lsm", zCksum2, &rc); - - /* If tree-header 1 is invalid, tree-header-2 is used */ - memcpy(&pHdr2->hdr2, &pHdr2->hdr1, sizeof(pHdr1->hdr1)); - pHdr2->hdr1.aCksum[0] = 5; - pHdr2->hdr1.aCksum[0] = 6; - pHdr2->bWriter = 1; - testWriteFile("testdb.lsm-shm", 0, (void *)pHdr2, sizeof(ShmHeader), &rc); - doLiveRecovery("testdb.lsm", zCksum2, &rc); - - /* If tree-header 2 is invalid, tree-header-1 is used */ - memcpy(&pHdr2->hdr1, &pHdr2->hdr2, sizeof(pHdr1->hdr1)); - pHdr2->hdr2.aCksum[0] = 5; - pHdr2->hdr2.aCksum[0] = 6; - pHdr2->bWriter = 1; - testWriteFile("testdb.lsm-shm", 0, (void *)pHdr2, sizeof(ShmHeader), &rc); - doLiveRecovery("testdb.lsm", zCksum2, &rc); - - testFree(pHdr1); - testFree(pHdr2); - testClose(&pDb); - } - - *pRc = rc; -} - -void do_writer_crash_test(const char *zPattern, int *pRc){ - struct Test { - const char *zName; - void (*xFunc)(int *); - } aTest[] = { - { "writercrash1.lsm", doWriterCrash1 }, - { "writercrash2.lsm", doWriterCrash2 }, - }; - int i; - for(i=0; izName) ){ - p->xFunc(pRc); - testCaseFinish(*pRc); - } - } - -} diff --git a/ext/lsm1/lsm-test/lsmtest9.c b/ext/lsm1/lsm-test/lsmtest9.c deleted file mode 100644 index b01de0d4e5..0000000000 --- a/ext/lsm1/lsm-test/lsmtest9.c +++ /dev/null @@ -1,140 +0,0 @@ - -#include "lsmtest.h" - -#define DATA_SEQUENTIAL TEST_DATASOURCE_SEQUENCE -#define DATA_RANDOM TEST_DATASOURCE_RANDOM - -typedef struct Datatest4 Datatest4; - -/* -** Test overview: -** -** 1. Insert (Datatest4.nRec) records into a database. -** -** 2. Repeat (Datatest4.nRepeat) times: -** -** 2a. Delete 2/3 of the records in the database. -** -** 2b. Run lsm_work(nMerge=1). -** -** 2c. Insert as many records as were deleted in 2a. -** -** 2d. Check database content is as expected. -** -** 2e. If (Datatest4.bReopen) is true, close and reopen the database. -*/ -struct Datatest4 { - /* Datasource definition */ - DatasourceDefn defn; - - int nRec; - int nRepeat; - int bReopen; -}; - -static void doDataTest4( - const char *zSystem, /* Database system to test */ - Datatest4 *p, /* Structure containing test parameters */ - int *pRc /* OUT: Error code */ -){ - lsm_db *db = 0; - TestDb *pDb; - TestDb *pControl; - Datasource *pData; - int i; - int rc = 0; - int iDot = 0; - int bMultiThreaded = 0; /* True for MT LSM database */ - - int nRecOn3 = (p->nRec / 3); - int iData = 0; - - /* Start the test case, open a database and allocate the datasource. */ - rc = testControlDb(&pControl); - pDb = testOpen(zSystem, 1, &rc); - pData = testDatasourceNew(&p->defn); - if( rc==0 ){ - db = tdb_lsm(pDb); - bMultiThreaded = tdb_lsm_multithread(pDb); - } - - testWriteDatasourceRange(pControl, pData, iData, nRecOn3*3, &rc); - testWriteDatasourceRange(pDb, pData, iData, nRecOn3*3, &rc); - - for(i=0; rc==0 && inRepeat; i++){ - - testDeleteDatasourceRange(pControl, pData, iData, nRecOn3*2, &rc); - testDeleteDatasourceRange(pDb, pData, iData, nRecOn3*2, &rc); - - if( db ){ - int nDone; -#if 0 - fprintf(stderr, "lsm_work() start...\n"); fflush(stderr); -#endif - do { - nDone = 0; - rc = lsm_work(db, 1, (1<<30), &nDone); - }while( rc==0 && nDone>0 ); - if( bMultiThreaded && rc==LSM_BUSY ) rc = LSM_OK; -#if 0 - fprintf(stderr, "lsm_work() done...\n"); fflush(stderr); -#endif - } - -if( i+1nRepeat ){ - iData += (nRecOn3*2); - testWriteDatasourceRange(pControl, pData, iData+nRecOn3, nRecOn3*2, &rc); - testWriteDatasourceRange(pDb, pData, iData+nRecOn3, nRecOn3*2, &rc); - - testCompareDb(pData, nRecOn3*3, iData, pControl, pDb, &rc); - - /* If Datatest4.bReopen is true, close and reopen the database */ - if( p->bReopen ){ - testReopen(&pDb, &rc); - if( rc==0 ) db = tdb_lsm(pDb); - } -} - - /* Update the progress dots... */ - testCaseProgress(i, p->nRepeat, testCaseNDot(), &iDot); - } - - testClose(&pDb); - testClose(&pControl); - testDatasourceFree(pData); - testCaseFinish(rc); - *pRc = rc; -} - -static char *getName4(const char *zSystem, Datatest4 *pTest){ - char *zRet; - char *zData; - zData = testDatasourceName(&pTest->defn); - zRet = testMallocPrintf("data4.%s.%s.%d.%d.%d", - zSystem, zData, pTest->nRec, pTest->nRepeat, pTest->bReopen - ); - testFree(zData); - return zRet; -} - -void test_data_4( - const char *zSystem, /* Database system name */ - const char *zPattern, /* Run test cases that match this pattern */ - int *pRc /* IN/OUT: Error code */ -){ - Datatest4 aTest[] = { - /* defn, nRec, nRepeat, bReopen */ - { {DATA_RANDOM, 20,25, 500,600}, 10000, 10, 0 }, - { {DATA_RANDOM, 20,25, 500,600}, 10000, 10, 1 }, - }; - - int i; - - for(i=0; *pRc==LSM_OK && ieType ){ - case TEST_DATASOURCE_RANDOM: { - int nRange = (1 + p->nMaxKey - p->nMinKey); - nKey = (int)( testPrngValue((u32)iData) % nRange ) + p->nMinKey; - testPrngString((u32)iData, p->aKey, nKey); - break; - } - case TEST_DATASOURCE_SEQUENCE: - nKey = sprintf(p->aKey, "%012d", iData); - break; - } - *ppKey = p->aKey; - *pnKey = nKey; - } - if( ppVal ){ - u32 nVal = testPrngValue((u32)iData)%(1+p->nMaxVal-p->nMinVal)+p->nMinVal; - testPrngString((u32)~iData, p->aVal, (int)nVal); - *ppVal = p->aVal; - *pnVal = (int)nVal; - } -} - -void testDatasourceFree(Datasource *p){ - testFree(p); -} - -/* -** Return a pointer to a nul-terminated string that corresponds to the -** contents of the datasource-definition passed as the first argument. -** The caller should eventually free the returned pointer using testFree(). -*/ -char *testDatasourceName(const DatasourceDefn *p){ - char *zRet; - zRet = testMallocPrintf("%s.(%d-%d).(%d-%d)", - (p->eType==TEST_DATASOURCE_SEQUENCE ? "seq" : "rnd"), - p->nMinKey, p->nMaxKey, - p->nMinVal, p->nMaxVal - ); - return zRet; -} - -Datasource *testDatasourceNew(const DatasourceDefn *pDefn){ - Datasource *p; - int nMinKey; - int nMaxKey; - int nMinVal; - int nMaxVal; - - if( pDefn->eType==TEST_DATASOURCE_SEQUENCE ){ - nMinKey = 128; - nMaxKey = 128; - }else{ - nMinKey = MAX(0, pDefn->nMinKey); - nMaxKey = MAX(nMinKey, pDefn->nMaxKey); - } - nMinVal = MAX(0, pDefn->nMinVal); - nMaxVal = MAX(nMinVal, pDefn->nMaxVal); - - p = (Datasource *)testMalloc(sizeof(Datasource) + nMaxKey + nMaxVal + 1); - p->eType = pDefn->eType; - p->nMinKey = nMinKey; - p->nMinVal = nMinVal; - p->nMaxKey = nMaxKey; - p->nMaxVal = nMaxVal; - - p->aKey = (char *)&p[1]; - p->aVal = &p->aKey[nMaxKey]; - return p; -}; diff --git a/ext/lsm1/lsm-test/lsmtest_func.c b/ext/lsm1/lsm-test/lsmtest_func.c deleted file mode 100644 index eb8346aa83..0000000000 --- a/ext/lsm1/lsm-test/lsmtest_func.c +++ /dev/null @@ -1,177 +0,0 @@ - -#include "lsmtest.h" - - -int do_work(int nArg, char **azArg){ - struct Option { - const char *zName; - } aOpt [] = { - { "-nmerge" }, - { "-nkb" }, - { 0 } - }; - - lsm_db *pDb; - int rc; - int i; - const char *zDb; - int nMerge = 1; - int nKB = (1<<30); - - if( nArg==0 ) goto usage; - zDb = azArg[nArg-1]; - for(i=0; i<(nArg-1); i++){ - int iSel; - rc = testArgSelect(aOpt, "option", azArg[i], &iSel); - if( rc ) return rc; - switch( iSel ){ - case 0: - i++; - if( i==(nArg-1) ) goto usage; - nMerge = atoi(azArg[i]); - break; - case 1: - i++; - if( i==(nArg-1) ) goto usage; - nKB = atoi(azArg[i]); - break; - } - } - - rc = lsm_new(0, &pDb); - if( rc!=LSM_OK ){ - testPrintError("lsm_open(): rc=%d\n", rc); - }else{ - rc = lsm_open(pDb, zDb); - if( rc!=LSM_OK ){ - testPrintError("lsm_open(): rc=%d\n", rc); - }else{ - int n = -1; - lsm_config(pDb, LSM_CONFIG_BLOCK_SIZE, &n); - n = n*2; - lsm_config(pDb, LSM_CONFIG_AUTOCHECKPOINT, &n); - - rc = lsm_work(pDb, nMerge, nKB, 0); - if( rc!=LSM_OK ){ - testPrintError("lsm_work(): rc=%d\n", rc); - } - } - } - if( rc==LSM_OK ){ - rc = lsm_checkpoint(pDb, 0); - } - - lsm_close(pDb); - return rc; - - usage: - testPrintUsage("?-optimize? ?-n N? DATABASE"); - return -1; -} - - -/* -** lsmtest show ?-config LSM-CONFIG? DATABASE ?COMMAND ?PGNO?? -*/ -int do_show(int nArg, char **azArg){ - lsm_db *pDb; - int rc; - const char *zDb; - - int eOpt = LSM_INFO_DB_STRUCTURE; - unsigned int iPg = 0; - int bConfig = 0; - const char *zConfig = ""; - - struct Option { - const char *zName; - int bConfig; - int eOpt; - } aOpt [] = { - { "array", 0, LSM_INFO_ARRAY_STRUCTURE }, - { "array-pages", 0, LSM_INFO_ARRAY_PAGES }, - { "blocksize", 1, LSM_CONFIG_BLOCK_SIZE }, - { "pagesize", 1, LSM_CONFIG_PAGE_SIZE }, - { "freelist", 0, LSM_INFO_FREELIST }, - { "page-ascii", 0, LSM_INFO_PAGE_ASCII_DUMP }, - { "page-hex", 0, LSM_INFO_PAGE_HEX_DUMP }, - { 0, 0 } - }; - - char *z = 0; - int iDb = 0; /* Index of DATABASE in azArg[] */ - - /* Check if there is a "-config" option: */ - if( nArg>2 && strlen(azArg[0])>1 - && memcmp(azArg[0], "-config", strlen(azArg[0]))==0 - ){ - zConfig = azArg[1]; - iDb = 2; - } - if( nArg<(iDb+1) ) goto usage; - - if( nArg>(iDb+1) ){ - rc = testArgSelect(aOpt, "option", azArg[iDb+1], &eOpt); - if( rc!=0 ) return rc; - bConfig = aOpt[eOpt].bConfig; - eOpt = aOpt[eOpt].eOpt; - if( (bConfig==0 && eOpt==LSM_INFO_FREELIST) - || (bConfig==1 && eOpt==LSM_CONFIG_BLOCK_SIZE) - || (bConfig==1 && eOpt==LSM_CONFIG_PAGE_SIZE) - ){ - if( nArg!=(iDb+2) ) goto usage; - }else{ - if( nArg!=(iDb+3) ) goto usage; - iPg = atoi(azArg[iDb+2]); - } - } - zDb = azArg[iDb]; - - rc = lsm_new(0, &pDb); - tdb_lsm_configure(pDb, zConfig); - if( rc!=LSM_OK ){ - testPrintError("lsm_new(): rc=%d\n", rc); - }else{ - rc = lsm_open(pDb, zDb); - if( rc!=LSM_OK ){ - testPrintError("lsm_open(): rc=%d\n", rc); - } - } - - if( rc==LSM_OK ){ - if( bConfig==0 ){ - switch( eOpt ){ - case LSM_INFO_DB_STRUCTURE: - case LSM_INFO_FREELIST: - rc = lsm_info(pDb, eOpt, &z); - break; - case LSM_INFO_ARRAY_STRUCTURE: - case LSM_INFO_ARRAY_PAGES: - case LSM_INFO_PAGE_ASCII_DUMP: - case LSM_INFO_PAGE_HEX_DUMP: - rc = lsm_info(pDb, eOpt, iPg, &z); - break; - default: - assert( !"no chance" ); - } - - if( rc==LSM_OK ){ - printf("%s\n", z ? z : ""); - fflush(stdout); - } - lsm_free(lsm_get_env(pDb), z); - }else{ - int iRes = -1; - lsm_config(pDb, eOpt, &iRes); - printf("%d\n", iRes); - fflush(stdout); - } - } - - lsm_close(pDb); - return rc; - - usage: - testPrintUsage("DATABASE ?array|page-ascii|page-hex PGNO?"); - return -1; -} diff --git a/ext/lsm1/lsm-test/lsmtest_io.c b/ext/lsm1/lsm-test/lsmtest_io.c deleted file mode 100644 index 7aa5d10948..0000000000 --- a/ext/lsm1/lsm-test/lsmtest_io.c +++ /dev/null @@ -1,248 +0,0 @@ - -/* -** SUMMARY -** -** This file implements the 'io' subcommand of the test program. It is used -** for testing the performance of various combinations of write() and fsync() -** system calls. All operations occur on a single file, which may or may not -** exist when a test is started. -** -** A test consists of a series of commands. Each command is either a write -** or an fsync. A write is specified as "@", where -** is the amount of data written, and is the offset of the file -** to write to. An or an is specified as an integer number -** of bytes. Or, if postfixed with a "K", "M" or "G", an integer number of -** KB, MB or GB, respectively. An fsync is simply "S". All commands are -** case-insensitive. -** -** Example test program: -** -** 2M@6M 1492K@4M S 4096@4K S -** -** This program writes 2 MB of data starting at the offset 6MB offset of -** the file, followed by 1492 KB of data written at the 4MB offset of the -** file, followed by a call to fsync(), a write of 4KB of data at byte -** offset 4096, and finally another call to fsync(). -** -** Commands may either be specified on the command line (one command per -** command line argument) or read from stdin. Commands read from stdin -** must be separated by white-space. -** -** COMMAND LINE INVOCATION -** -** The sub-command implemented in this file must be invoked with at least -** two arguments - the path to the file to write to and the page-size to -** use for writing. If there are more than two arguments, then each -** subsequent argument is assumed to be a test command. If there are exactly -** two arguments, the test commands are read from stdin. -** -** A write command does not result in a single call to system call write(). -** Instead, the specified region is written sequentially using one or -** more calls to write(), each of which writes not more than one page of -** data. For example, if the page-size is 4KB, the command "2M@6M" results -** in 512 calls to write(), each of which writes 4KB of data. -** -** EXAMPLES -** -** Two equivalent examples: -** -** $ lsmtest io testfile.db 4KB 2M@6M 1492K@4M S 4096@4K S -** 3544K written in 129 ms -** $ echo "2M@6M 1492K@4M S 4096@4K S" | lsmtest io testfile.db 4096 -** 3544K written in 127 ms -** -*/ - -#include "lsmtest.h" - -typedef struct IoContext IoContext; - -struct IoContext { - int fd; - int nWrite; -}; - -/* -** As isspace(3) -*/ -static int safe_isspace(char c){ - if( c&0x80) return 0; - return isspace(c); -} - -/* -** As isdigit(3) -*/ -static int safe_isdigit(char c){ - if( c&0x80) return 0; - return isdigit(c); -} - -static i64 getNextSize(char *zIn, char **pzOut, int *pRc){ - i64 iRet = 0; - if( *pRc==0 ){ - char *z = zIn; - - if( !safe_isdigit(*z) ){ - *pRc = 1; - return 0; - } - - /* Process digits */ - while( safe_isdigit(*z) ){ - iRet = iRet*10 + (*z - '0'); - z++; - } - - /* Process suffix */ - switch( *z ){ - case 'k': case 'K': - iRet = iRet * 1024; - z++; - break; - - case 'm': case 'M': - iRet = iRet * 1024 * 1024; - z++; - break; - - case 'g': case 'G': - iRet = iRet * 1024 * 1024 * 1024; - z++; - break; - } - - if( pzOut ) *pzOut = z; - } - return iRet; -} - -static int doOneCmd( - IoContext *pCtx, - u8 *aData, - int pgsz, - char *zCmd, - char **pzOut -){ - char c; - char *z = zCmd; - - while( safe_isspace(*z) ) z++; - c = *z; - - if( c==0 ){ - if( pzOut ) *pzOut = z; - return 0; - } - - if( c=='s' || c=='S' ){ - if( pzOut ) *pzOut = &z[1]; - return fdatasync(pCtx->fd); - } - - if( safe_isdigit(c) ){ - i64 iOff = 0; - int nByte = 0; - int rc = 0; - int nPg; - int iPg; - - nByte = (int)getNextSize(z, &z, &rc); - if( rc || *z!='@' ) goto bad_command; - z++; - iOff = getNextSize(z, &z, &rc); - if( rc || (safe_isspace(*z)==0 && *z!='\0') ) goto bad_command; - if( pzOut ) *pzOut = z; - - nPg = (nByte+pgsz-1) / pgsz; - lseek(pCtx->fd, (off_t)iOff, SEEK_SET); - for(iPg=0; iPgfd, aData, pgsz); - } - pCtx->nWrite += nByte/1024; - - return 0; - } - - bad_command: - testPrintError("unrecognized command: %s", zCmd); - return 1; -} - -static int readStdin(char **pzOut){ - int nAlloc = 128; - char *zOut = 0; - int nOut = 0; - - while( !feof(stdin) ){ - int nRead; - - nAlloc = nAlloc*2; - zOut = realloc(zOut, nAlloc); - nRead = fread(&zOut[nOut], 1, nAlloc-nOut-1, stdin); - - if( nRead==0 ) break; - nOut += nRead; - zOut[nOut] = '\0'; - } - - *pzOut = zOut; - return 0; -} - -int do_io(int nArg, char **azArg){ - IoContext ctx; - int pgsz; - char *zFile; - char *zPgsz; - int i; - int rc = 0; - - char *zStdin = 0; - char *z; - - u8 *aData; - - memset(&ctx, 0, sizeof(IoContext)); - if( nArg<2 ){ - testPrintUsage("FILE PGSZ ?CMD-1 ...?"); - return -1; - } - zFile = azArg[0]; - zPgsz = azArg[1]; - - pgsz = (int)getNextSize(zPgsz, 0, &rc); - if( pgsz<=0 ){ - testPrintError("Ridiculous page size: %d", pgsz); - return -1; - } - aData = malloc(pgsz); - memset(aData, 0x77, pgsz); - - ctx.fd = open(zFile, O_RDWR|O_CREAT|_O_BINARY, 0644); - if( ctx.fd<0 ){ - perror("open: "); - return -1; - } - - if( nArg==2 ){ - readStdin(&zStdin); - testTimeInit(); - z = zStdin; - while( *z && rc==0 ){ - rc = doOneCmd(&ctx, aData, pgsz, z, &z); - } - }else{ - testTimeInit(); - for(i=2; i - -void test_failed(){ - assert( 0 ); - return; -} - -#define testSetError(rc) testSetErrorFunc(rc, pRc, __FILE__, __LINE__) -static void testSetErrorFunc(int rc, int *pRc, const char *zFile, int iLine){ - if( rc ){ - *pRc = rc; - fprintf(stderr, "FAILED (%s:%d) rc=%d ", zFile, iLine, rc); - test_failed(); - } -} - -static int lsm_memcmp(u8 *a, u8 *b, int c){ - int i; - for(i=0; i0 && lsm_memcmp(pVal, pDbVal, nVal))) ){ - testSetError(1); - } - } -} - -void testWrite( - TestDb *pDb, /* Database handle */ - void *pKey, int nKey, /* Key to query database for */ - void *pVal, int nVal, /* Value to write */ - int *pRc /* IN/OUT: Error code */ -){ - if( *pRc==0 ){ - int rc; -static int nCall = 0; -nCall++; - rc = tdb_write(pDb, pKey, nKey, pVal, nVal); - testSetError(rc); - } -} -void testDelete( - TestDb *pDb, /* Database handle */ - void *pKey, int nKey, /* Key to query database for */ - int *pRc /* IN/OUT: Error code */ -){ - if( *pRc==0 ){ - int rc; - *pRc = rc = tdb_delete(pDb, pKey, nKey); - testSetError(rc); - } -} -void testDeleteRange( - TestDb *pDb, /* Database handle */ - void *pKey1, int nKey1, - void *pKey2, int nKey2, - int *pRc /* IN/OUT: Error code */ -){ - if( *pRc==0 ){ - int rc; - *pRc = rc = tdb_delete_range(pDb, pKey1, nKey1, pKey2, nKey2); - testSetError(rc); - } -} - -void testBegin(TestDb *pDb, int iTrans, int *pRc){ - if( *pRc==0 ){ - int rc; - rc = tdb_begin(pDb, iTrans); - testSetError(rc); - } -} -void testCommit(TestDb *pDb, int iTrans, int *pRc){ - if( *pRc==0 ){ - int rc; - rc = tdb_commit(pDb, iTrans); - testSetError(rc); - } -} -#if 0 /* unused */ -static void testRollback(TestDb *pDb, int iTrans, int *pRc){ - if( *pRc==0 ){ - int rc; - rc = tdb_rollback(pDb, iTrans); - testSetError(rc); - } -} -#endif - -void testWriteStr( - TestDb *pDb, /* Database handle */ - const char *zKey, /* Key to query database for */ - const char *zVal, /* Value to write */ - int *pRc /* IN/OUT: Error code */ -){ - int nVal = (zVal ? strlen(zVal) : 0); - testWrite(pDb, (void *)zKey, strlen(zKey), (void *)zVal, nVal, pRc); -} - -#if 0 /* unused */ -static void testDeleteStr(TestDb *pDb, const char *zKey, int *pRc){ - testDelete(pDb, (void *)zKey, strlen(zKey), pRc); -} -#endif -void testFetchStr( - TestDb *pDb, /* Database handle */ - const char *zKey, /* Key to query database for */ - const char *zVal, /* Value to write */ - int *pRc /* IN/OUT: Error code */ -){ - int nVal = (zVal ? strlen(zVal) : 0); - testFetch(pDb, (void *)zKey, strlen(zKey), (void *)zVal, nVal, pRc); -} - -void testFetchCompare( - TestDb *pControl, - TestDb *pDb, - void *pKey, int nKey, - int *pRc -){ - int rc; - void *pDbVal1; - void *pDbVal2; - int nDbVal1; - int nDbVal2; - - static int nCall = 0; - nCall++; - - rc = tdb_fetch(pControl, pKey, nKey, &pDbVal1, &nDbVal1); - testSetError(rc); - - rc = tdb_fetch(pDb, pKey, nKey, &pDbVal2, &nDbVal2); - testSetError(rc); - - if( *pRc==0 - && (nDbVal1!=nDbVal2 || (nDbVal1>0 && memcmp(pDbVal1, pDbVal2, nDbVal1))) - ){ - testSetError(1); - } -} - -typedef struct ScanResult ScanResult; -struct ScanResult { - TestDb *pDb; - - int nRow; - u32 cksum1; - u32 cksum2; - void *pKey1; int nKey1; - void *pKey2; int nKey2; - - int bReverse; - int nPrevKey; - u8 aPrevKey[256]; -}; - -static int keyCompare(void *pKey1, int nKey1, void *pKey2, int nKey2){ - int res; - res = memcmp(pKey1, pKey2, MIN(nKey1, nKey2)); - if( res==0 ){ - res = nKey1 - nKey2; - } - return res; -} - -int test_scan_debug = 0; - -static void scanCompareCb( - void *pCtx, - void *pKey, int nKey, - void *pVal, int nVal -){ - ScanResult *p = (ScanResult *)pCtx; - u8 *aKey = (u8 *)pKey; - u8 *aVal = (u8 *)pVal; - int i; - - if( test_scan_debug ){ - printf("%d: %.*s\n", p->nRow, nKey, (char *)pKey); - fflush(stdout); - } -#if 0 - if( test_scan_debug ) printf("%.20s\n", (char *)pVal); -#endif - -#if 0 - /* Check tdb_fetch() matches */ - int rc = 0; - testFetch(p->pDb, pKey, nKey, pVal, nVal, &rc); - assert( rc==0 ); -#endif - - /* Update the checksum data */ - p->nRow++; - for(i=0; icksum1 += ((int)aKey[i] << (i&0x0F)); - p->cksum2 += p->cksum1; - } - for(i=0; icksum1 += ((int)aVal[i] << (i&0x0F)); - p->cksum2 += p->cksum1; - } - - /* Check that the delivered row is not out of order. */ - if( nKey<(int)sizeof(p->aPrevKey) ){ - if( p->nPrevKey ){ - int res = keyCompare(p->aPrevKey, p->nPrevKey, pKey, nKey); - if( (res<0 && p->bReverse) || (res>0 && p->bReverse==0) ){ - testPrintError("Returned key out of order at %s:%d\n", - __FILE__, __LINE__ - ); - } - } - - p->nPrevKey = nKey; - memcpy(p->aPrevKey, pKey, MIN(p->nPrevKey, nKey)); - } - - /* Check that the delivered row is within range. */ - if( p->pKey1 && ( - (memcmp(p->pKey1, pKey, MIN(p->nKey1, nKey))>0) - || (memcmp(p->pKey1, pKey, MIN(p->nKey1, nKey))==0 && p->nKey1>nKey) - )){ - testPrintError("Returned key too small at %s:%d\n", __FILE__, __LINE__); - } - if( p->pKey2 && ( - (memcmp(p->pKey2, pKey, MIN(p->nKey2, nKey))<0) - || (memcmp(p->pKey2, pKey, MIN(p->nKey2, nKey))==0 && p->nKey2=0 ); - zRet = (char *)testMalloc(nByte+1); - vsnprintf(zRet, nByte+1, zFormat, ap); - return zRet; -} - -char *testMallocPrintf(const char *zFormat, ...){ - va_list ap; - char *zRet; - - va_start(ap, zFormat); - zRet = testMallocVPrintf(zFormat, ap); - va_end(ap); - - return zRet; -} - - -/* -** A wrapper around malloc(3). -** -** This function should be used for all allocations made by test procedures. -** It has the following properties: -** -** * Test code may assume that allocations may not fail. -** * Returned memory is always zeroed. -** -** Allocations made using testMalloc() should be freed using testFree(). -*/ -void *testMalloc(int n){ - u8 *p = (u8*)malloc(n + 8); - memset(p, 0, n+8); - *(int*)p = n; - return (void*)&p[8]; -} - -void *testMallocCopy(void *pCopy, int nByte){ - void *pRet = testMalloc(nByte); - memcpy(pRet, pCopy, nByte); - return pRet; -} - -void *testRealloc(void *ptr, int n){ - if( ptr ){ - u8 *p = (u8*)ptr - 8; - int nOrig = *(int*)p; - p = (u8*)realloc(p, n+8); - if( nOrig1 ){ - testPrintError("Usage: test ?PATTERN?\n"); - return 1; - } - if( nArg==1 ){ - zPattern = azArg[0]; - } - - for(j=0; tdb_system_name(j); j++){ - rc = 0; - - test_data_1(tdb_system_name(j), zPattern, &rc); - test_data_2(tdb_system_name(j), zPattern, &rc); - test_data_3(tdb_system_name(j), zPattern, &rc); - test_data_4(tdb_system_name(j), zPattern, &rc); - test_rollback(tdb_system_name(j), zPattern, &rc); - test_mc(tdb_system_name(j), zPattern, &rc); - test_mt(tdb_system_name(j), zPattern, &rc); - - if( rc ) nFail++; - } - - rc = 0; - test_oom(zPattern, &rc); - if( rc ) nFail++; - - rc = 0; - test_api(zPattern, &rc); - if( rc ) nFail++; - - rc = 0; - do_crash_test(zPattern, &rc); - if( rc ) nFail++; - - rc = 0; - do_writer_crash_test(zPattern, &rc); - if( rc ) nFail++; - - return (nFail!=0); -} - -static lsm_db *configure_lsm_db(TestDb *pDb){ - lsm_db *pLsm; - pLsm = tdb_lsm(pDb); - if( pLsm ){ - tdb_lsm_config_str(pDb, "mmap=1 autowork=1 automerge=4 worker_automerge=4"); - } - return pLsm; -} - -typedef struct WriteHookEvent WriteHookEvent; -struct WriteHookEvent { - i64 iOff; - int nData; - int nUs; -}; -WriteHookEvent prev = {0, 0, 0}; - -static void flushPrev(FILE *pOut){ - if( prev.nData ){ - fprintf(pOut, "w %s %lld %d %d\n", "d", prev.iOff, prev.nData, prev.nUs); - prev.nData = 0; - } -} - -#if 0 /* unused */ -static void do_speed_write_hook2( - void *pCtx, - int bLog, - i64 iOff, - int nData, - int nUs -){ - FILE *pOut = (FILE *)pCtx; - if( bLog ) return; - - if( prev.nData && nData && iOff==prev.iOff+prev.nData ){ - prev.nData += nData; - prev.nUs += nUs; - }else{ - flushPrev(pOut); - if( nData==0 ){ - fprintf(pOut, "s %s 0 0 %d\n", (bLog ? "l" : "d"), nUs); - }else{ - prev.iOff = iOff; - prev.nData = nData; - prev.nUs = nUs; - } - } -} -#endif - -#define ST_REPEAT 0 -#define ST_WRITE 1 -#define ST_PAUSE 2 -#define ST_FETCH 3 -#define ST_SCAN 4 -#define ST_NSCAN 5 -#define ST_KEYSIZE 6 -#define ST_VALSIZE 7 -#define ST_TRANS 8 - - -static void print_speed_test_help(){ - printf( -"\n" -"Repeat the following $repeat times:\n" -" 1. Insert $write key-value pairs. One transaction for each write op.\n" -" 2. Pause for $pause ms.\n" -" 3. Perform $fetch queries on the database.\n" -"\n" -" Keys are $keysize bytes in size. Values are $valsize bytes in size\n" -" Both keys and values are pseudo-randomly generated\n" -"\n" -"Options are:\n" -" -repeat $repeat (default value 10)\n" -" -write $write (default value 10000)\n" -" -pause $pause (default value 0)\n" -" -fetch $fetch (default value 0)\n" -" -keysize $keysize (default value 12)\n" -" -valsize $valsize (default value 100)\n" -" -system $system (default value \"lsm\")\n" -" -trans $trans (default value 0)\n" -"\n" -); -} - -int do_speed_test2(int nArg, char **azArg){ - struct Option { - const char *zOpt; - int eVal; - int iDefault; - } aOpt[] = { - { "-repeat", ST_REPEAT, 10}, - { "-write", ST_WRITE, 10000}, - { "-pause", ST_PAUSE, 0}, - { "-fetch", ST_FETCH, 0}, - { "-scan", ST_SCAN, 0}, - { "-nscan", ST_NSCAN, 0}, - { "-keysize", ST_KEYSIZE, 12}, - { "-valsize", ST_VALSIZE, 100}, - { "-trans", ST_TRANS, 0}, - { "-system", -1, 0}, - { "help", -2, 0}, - {0, 0, 0} - }; - int i; - int aParam[9]; - int rc = 0; - int bReadonly = 0; - int nContent = 0; - - TestDb *pDb; - Datasource *pData; - DatasourceDefn defn = { TEST_DATASOURCE_RANDOM, 0, 0, 0, 0 }; - char *zSystem = ""; - int bLsm = 1; - FILE *pLog = 0; - -#ifdef NDEBUG - /* If NDEBUG is defined, disable the dynamic memory related checks in - ** lsmtest_mem.c. They slow things down. */ - testMallocUninstall(tdb_lsm_env()); -#endif - - /* Initialize aParam[] with default values. */ - for(i=0; i=0 ){ - aParam[aOpt[iSel].eVal] = atoi(azArg[i+1]); - }else{ - zSystem = azArg[i+1]; - bLsm = 0; -#if 0 - for(j=0; zSystem[j]; j++){ - if( zSystem[j]=='=' ) bLsm = 1; - } -#endif - } - } - - printf("#"); - for(i=0; i=0 ){ - printf(" %s=%d", &aOpt[i].zOpt[1], aParam[aOpt[i].eVal]); - }else if( aOpt[i].eVal==-1 ){ - printf(" %s=\"%s\"", &aOpt[i].zOpt[1], zSystem); - } - } - } - printf("\n"); - - defn.nMinKey = defn.nMaxKey = aParam[ST_KEYSIZE]; - defn.nMinVal = defn.nMaxVal = aParam[ST_VALSIZE]; - pData = testDatasourceNew(&defn); - - if( aParam[ST_WRITE]==0 ){ - bReadonly = 1; - } - - if( bLsm ){ - rc = tdb_lsm_open(zSystem, "testdb.lsm", !bReadonly, &pDb); - }else{ - pDb = testOpen(zSystem, !bReadonly, &rc); - } - if( rc!=0 ) return rc; - if( bReadonly ){ - nContent = testCountDatabase(pDb); - } - -#if 0 - pLog = fopen("/tmp/speed.log", "w"); - tdb_lsm_write_hook(pDb, do_speed_write_hook2, (void *)pLog); -#endif - - for(i=0; i=nArg ){ - testPrintError("option %s requires an argument\n", aOpt[iSel].zOpt); - return 1; - } - if( aOpt[iSel].isSwitch==1 ){ - nRow = atoi(azArg[i]); - } - if( aOpt[iSel].isSwitch==2 ){ - nSleep = atoi(azArg[i]); - } - if( aOpt[iSel].isSwitch==3 ){ - struct Mode { - const char *zMode; - int doReadTest; - int doWriteTest; - } aMode[] = {{"ro", 1, 0} , {"rw", 1, 1}, {"wo", 0, 1}, {0, 0, 0}}; - int iMode; - rc = testArgSelect(aMode, "option", azArg[i], &iMode); - if( rc ) return rc; - doReadTest = aMode[iMode].doReadTest; - doWriteTest = aMode[iMode].doWriteTest; - } - if( aOpt[iSel].isSwitch==4 ){ - /* The "-out FILE" switch. This option is used to specify a file to - ** write the gnuplot script to. */ - zOut = azArg[i]; - } - }else{ - /* A db name */ - rc = testArgSelect(aOpt, "system", azArg[i], &iSel); - if( rc ) return rc; - sys_mask |= (1< 100000) ? 100000 : nSelStep; - - aTime = malloc(sizeof(int) * ArraySize(aSys) * nRow/nStep); - aWrite = malloc(sizeof(int) * nRow/nStep); - aSelTime = malloc(sizeof(int) * ArraySize(aSys) * nRow/nSelStep); - - /* This loop collects the INSERT speed data. */ - if( doWriteTest ){ - printf("Writing output to file \"%s\".\n", zOut); - - for(j=0; aSys[j].zLibrary; j++){ - FILE *pLog = 0; - TestDb *pDb; /* Database being tested */ - lsm_db *pLsm; - int iDot = 0; - - if( ((1<nData ){ - fprintf(pHook->pOut, "write %s %d %d\n", - (pHook->bLog ? "log" : "db"), (int)pHook->iOff, pHook->nData - ); - pHook->nData = 0; - fflush(pHook->pOut); - } -} - -static void do_insert_write_hook( - void *pCtx, - int bLog, - i64 iOff, - int nData, - int nUs -){ - InsertWriteHook *pHook = (InsertWriteHook *)pCtx; - if( bLog ) return; - - if( nData==0 ){ - flushHook(pHook); - fprintf(pHook->pOut, "sync %s\n", (bLog ? "log" : "db")); - }else if( pHook->nData - && bLog==pHook->bLog - && iOff==(pHook->iOff+pHook->nData) - ){ - pHook->nData += nData; - }else{ - flushHook(pHook); - pHook->bLog = bLog; - pHook->iOff = iOff; - pHook->nData = nData; - } -} - -static int do_replay(int nArg, char **azArg){ - char aBuf[4096]; - FILE *pInput; - FILE *pClose = 0; - const char *zDb; - - lsm_env *pEnv; - lsm_file *pOut; - int rc; - - if( nArg!=2 ){ - testPrintError("Usage: replay WRITELOG FILE\n"); - return 1; - } - - if( strcmp(azArg[0], "-")==0 ){ - pInput = stdin; - }else{ - pClose = pInput = fopen(azArg[0], "r"); - } - zDb = azArg[1]; - pEnv = tdb_lsm_env(); - rc = pEnv->xOpen(pEnv, zDb, 0, &pOut); - if( rc!=LSM_OK ) return rc; - - while( feof(pInput)==0 ){ - char zLine[80]; - fgets(zLine, sizeof(zLine)-1, pInput); - zLine[sizeof(zLine)-1] = '\0'; - - if( 0==memcmp("sync db", zLine, 7) ){ - rc = pEnv->xSync(pOut); - if( rc!=0 ) break; - }else{ - int iOff; - int nData; - int nMatch; - nMatch = sscanf(zLine, "write db %d %d", &iOff, &nData); - if( nMatch==2 ){ - int i; - for(i=0; ixWrite(pOut, iOff+i, aBuf, sizeof(aBuf)); - if( rc!=0 ) break; - } - } - } - } - if( pClose ) fclose(pClose); - pEnv->xClose(pOut); - - return rc; -} - -static int do_insert(int nArg, char **azArg){ - const char *zDb = "lsm"; - TestDb *pDb = 0; - int i; - int rc; - const int nRow = 1 * 1000 * 1000; - - DatasourceDefn defn = { TEST_DATASOURCE_RANDOM, 8, 15, 80, 150 }; - Datasource *pData = 0; - - if( nArg>1 ){ - testPrintError("Usage: insert ?DATABASE?\n"); - return 1; - } - if( nArg==1 ){ zDb = azArg[0]; } - - testMallocUninstall(tdb_lsm_env()); - for(i=0; zDb[i] && zDb[i]!='='; i++); - if( zDb[i] ){ - rc = tdb_lsm_open(zDb, "testdb.lsm", 1, &pDb); - }else{ - rc = tdb_open(zDb, 0, 1, &pDb); - } - - if( rc!=0 ){ - testPrintError("Error opening db \"%s\": %d\n", zDb, rc); - }else{ - InsertWriteHook hook; - memset(&hook, 0, sizeof(hook)); - hook.pOut = fopen("writelog.txt", "w"); - - pData = testDatasourceNew(&defn); - tdb_lsm_config_work_hook(pDb, do_insert_work_hook, 0); - tdb_lsm_write_hook(pDb, do_insert_write_hook, (void *)&hook); - - if( rc==0 ){ - for(i=0; i -#include - -static void lsmtest_rusage_report(void){ - struct rusage r; - memset(&r, 0, sizeof(r)); - - getrusage(RUSAGE_SELF, &r); - printf("# getrusage: { ru_maxrss %d ru_oublock %d ru_inblock %d }\n", - (int)r.ru_maxrss, (int)r.ru_oublock, (int)r.ru_inblock - ); -} -#else -static void lsmtest_rusage_report(void){ - /* no-op */ -} -#endif - -int main(int argc, char **argv){ - struct TestFunc { - const char *zName; - int bRusageReport; - int (*xFunc)(int, char **); - } aTest[] = { - {"random", 1, do_random_tests}, - {"writespeed", 1, do_writer_test}, - {"io", 1, st_do_io}, - - {"insert", 1, do_insert}, - {"replay", 1, do_replay}, - - {"speed", 1, do_speed_tests}, - {"speed2", 1, do_speed_test2}, - {"show", 0, st_do_show}, - {"work", 1, st_do_work}, - {"test", 1, do_test}, - - {0, 0} - }; - int rc; /* Return Code */ - int iFunc; /* Index into aTest[] */ - - int nLeakAlloc = 0; /* Allocations leaked by lsm */ - int nLeakByte = 0; /* Bytes leaked by lsm */ - -#ifdef LSM_DEBUG_MEM - FILE *pReport = 0; /* lsm malloc() report file */ - const char *zReport = "malloc.txt generated"; -#else - const char *zReport = "malloc.txt NOT generated"; -#endif - - testMallocInstall(tdb_lsm_env()); - - if( argc<2 ){ - testPrintError("Usage: %s sub-command ?args...?\n", argv[0]); - return -1; - } - - /* Initialize error reporting */ - testErrorInit(argc, argv); - - /* Initialize PRNG system */ - testPrngInit(); - - rc = testArgSelect(aTest, "sub-command", argv[1], &iFunc); - if( rc==0 ){ - rc = aTest[iFunc].xFunc(argc-2, &argv[2]); - } - -#ifdef LSM_DEBUG_MEM - pReport = fopen("malloc.txt", "w"); - testMallocCheck(tdb_lsm_env(), &nLeakAlloc, &nLeakByte, pReport); - fclose(pReport); -#else - testMallocCheck(tdb_lsm_env(), &nLeakAlloc, &nLeakByte, 0); -#endif - - if( nLeakAlloc ){ - testPrintError("Leaked %d bytes in %d allocations (%s)\n", - nLeakByte, nLeakAlloc, zReport - ); - if( rc==0 ) rc = -1; - } - testMallocUninstall(tdb_lsm_env()); - - if( aTest[iFunc].bRusageReport ){ - lsmtest_rusage_report(); - } - return rc; -} diff --git a/ext/lsm1/lsm-test/lsmtest_mem.c b/ext/lsm1/lsm-test/lsmtest_mem.c deleted file mode 100644 index 4c35e849f2..0000000000 --- a/ext/lsm1/lsm-test/lsmtest_mem.c +++ /dev/null @@ -1,409 +0,0 @@ - -#include -#include -#include - -#define ArraySize(x) ((int)(sizeof(x) / sizeof((x)[0]))) - -#define MIN(x,y) ((x)<(y) ? (x) : (y)) - -typedef unsigned int u32; -typedef unsigned char u8; -typedef long long int i64; -typedef unsigned long long int u64; - -#if defined(__GLIBC__) && defined(LSM_DEBUG_MEM) - extern int backtrace(void**,int); - extern void backtrace_symbols_fd(void*const*,int,int); -# define TM_BACKTRACE 12 -#else -# define backtrace(A,B) 1 -# define backtrace_symbols_fd(A,B,C) -#endif - - -typedef struct TmBlockHdr TmBlockHdr; -typedef struct TmAgg TmAgg; -typedef struct TmGlobal TmGlobal; - -struct TmGlobal { - /* Linked list of all currently outstanding allocations. And a table of - ** all allocations, past and present, indexed by backtrace() info. */ - TmBlockHdr *pFirst; -#ifdef TM_BACKTRACE - TmAgg *aHash[10000]; -#endif - - /* Underlying malloc/realloc/free functions */ - void *(*xMalloc)(int); /* underlying malloc(3) function */ - void *(*xRealloc)(void *, int); /* underlying realloc(3) function */ - void (*xFree)(void *); /* underlying free(3) function */ - - /* Mutex to protect pFirst and aHash */ - void (*xEnterMutex)(TmGlobal*); /* Call this to enter the mutex */ - void (*xLeaveMutex)(TmGlobal*); /* Call this to leave mutex */ - void (*xDelMutex)(TmGlobal*); /* Call this to delete mutex */ - void *pMutex; /* Mutex handle */ - - void *(*xSaveMalloc)(void *, size_t); - void *(*xSaveRealloc)(void *, void *, size_t); - void (*xSaveFree)(void *, void *); - - /* OOM injection scheduling. If nCountdown is greater than zero when a - ** malloc attempt is made, it is decremented. If this means nCountdown - ** transitions from 1 to 0, then the allocation fails. If bPersist is true - ** when this happens, nCountdown is then incremented back to 1 (so that the - ** next attempt fails too). - */ - int nCountdown; - int bPersist; - int bEnable; - void (*xHook)(void *); - void *pHookCtx; -}; - -struct TmBlockHdr { - TmBlockHdr *pNext; - TmBlockHdr *pPrev; - int nByte; -#ifdef TM_BACKTRACE - TmAgg *pAgg; -#endif - u32 iForeGuard; -}; - -#ifdef TM_BACKTRACE -struct TmAgg { - int nAlloc; /* Number of allocations at this path */ - int nByte; /* Total number of bytes allocated */ - int nOutAlloc; /* Number of outstanding allocations */ - int nOutByte; /* Number of outstanding bytes */ - void *aFrame[TM_BACKTRACE]; /* backtrace() output */ - TmAgg *pNext; /* Next object in hash-table collision */ -}; -#endif - -#define FOREGUARD 0x80F5E153 -#define REARGUARD 0xE4676B53 -static const u32 rearguard = REARGUARD; - -#define ROUND8(x) (((x)+7)&~7) - -#define BLOCK_HDR_SIZE (ROUND8( sizeof(TmBlockHdr) )) - -static void lsmtest_oom_error(void){ - static int nErr = 0; - nErr++; -} - -static void tmEnterMutex(TmGlobal *pTm){ - pTm->xEnterMutex(pTm); -} -static void tmLeaveMutex(TmGlobal *pTm){ - pTm->xLeaveMutex(pTm); -} - -static void *tmMalloc(TmGlobal *pTm, int nByte){ - TmBlockHdr *pNew; /* New allocation header block */ - u8 *pUser; /* Return value */ - int nReq; /* Total number of bytes requested */ - - assert( sizeof(rearguard)==4 ); - nReq = BLOCK_HDR_SIZE + nByte + 4; - pNew = (TmBlockHdr *)pTm->xMalloc(nReq); - memset(pNew, 0, sizeof(TmBlockHdr)); - - tmEnterMutex(pTm); - assert( pTm->nCountdown>=0 ); - assert( pTm->bPersist==0 || pTm->bPersist==1 ); - - if( pTm->bEnable && pTm->nCountdown==1 ){ - /* Simulate an OOM error. */ - lsmtest_oom_error(); - pTm->xFree(pNew); - pTm->nCountdown = pTm->bPersist; - if( pTm->xHook ) pTm->xHook(pTm->pHookCtx); - pUser = 0; - }else{ - if( pTm->bEnable && pTm->nCountdown ) pTm->nCountdown--; - - pNew->iForeGuard = FOREGUARD; - pNew->nByte = nByte; - pNew->pNext = pTm->pFirst; - - if( pTm->pFirst ){ - pTm->pFirst->pPrev = pNew; - } - pTm->pFirst = pNew; - - pUser = &((u8 *)pNew)[BLOCK_HDR_SIZE]; - memset(pUser, 0x56, nByte); - memcpy(&pUser[nByte], &rearguard, 4); - -#ifdef TM_BACKTRACE - { - TmAgg *pAgg; - int i; - u32 iHash = 0; - void *aFrame[TM_BACKTRACE]; - memset(aFrame, 0, sizeof(aFrame)); - backtrace(aFrame, TM_BACKTRACE); - - for(i=0; iaHash); - - for(pAgg=pTm->aHash[iHash]; pAgg; pAgg=pAgg->pNext){ - if( memcmp(pAgg->aFrame, aFrame, sizeof(aFrame))==0 ) break; - } - if( !pAgg ){ - pAgg = (TmAgg *)pTm->xMalloc(sizeof(TmAgg)); - memset(pAgg, 0, sizeof(TmAgg)); - memcpy(pAgg->aFrame, aFrame, sizeof(aFrame)); - pAgg->pNext = pTm->aHash[iHash]; - pTm->aHash[iHash] = pAgg; - } - pAgg->nAlloc++; - pAgg->nByte += nByte; - pAgg->nOutAlloc++; - pAgg->nOutByte += nByte; - pNew->pAgg = pAgg; - } -#endif - } - - tmLeaveMutex(pTm); - return pUser; -} - -static void tmFree(TmGlobal *pTm, void *p){ - if( p ){ - TmBlockHdr *pHdr; - u8 *pUser = (u8 *)p; - - tmEnterMutex(pTm); - pHdr = (TmBlockHdr *)(pUser - BLOCK_HDR_SIZE); - assert( pHdr->iForeGuard==FOREGUARD ); - assert( 0==memcmp(&pUser[pHdr->nByte], &rearguard, 4) ); - - if( pHdr->pPrev ){ - assert( pHdr->pPrev->pNext==pHdr ); - pHdr->pPrev->pNext = pHdr->pNext; - }else{ - assert( pHdr==pTm->pFirst ); - pTm->pFirst = pHdr->pNext; - } - if( pHdr->pNext ){ - assert( pHdr->pNext->pPrev==pHdr ); - pHdr->pNext->pPrev = pHdr->pPrev; - } - -#ifdef TM_BACKTRACE - pHdr->pAgg->nOutAlloc--; - pHdr->pAgg->nOutByte -= pHdr->nByte; -#endif - - tmLeaveMutex(pTm); - memset(pUser, 0x58, pHdr->nByte); - memset(pHdr, 0x57, sizeof(TmBlockHdr)); - pTm->xFree(pHdr); - } -} - -static void *tmRealloc(TmGlobal *pTm, void *p, int nByte){ - void *pNew; - - pNew = tmMalloc(pTm, nByte); - if( pNew && p ){ - TmBlockHdr *pHdr; - u8 *pUser = (u8 *)p; - pHdr = (TmBlockHdr *)(pUser - BLOCK_HDR_SIZE); - memcpy(pNew, p, MIN(nByte, pHdr->nByte)); - tmFree(pTm, p); - } - return pNew; -} - -static void tmMallocOom( - TmGlobal *pTm, - int nCountdown, - int bPersist, - void (*xHook)(void *), - void *pHookCtx -){ - assert( nCountdown>=0 ); - assert( bPersist==0 || bPersist==1 ); - pTm->nCountdown = nCountdown; - pTm->bPersist = bPersist; - pTm->xHook = xHook; - pTm->pHookCtx = pHookCtx; - pTm->bEnable = 1; -} - -static void tmMallocOomEnable( - TmGlobal *pTm, - int bEnable -){ - pTm->bEnable = bEnable; -} - -static void tmMallocCheck( - TmGlobal *pTm, - int *pnLeakAlloc, - int *pnLeakByte, - FILE *pFile -){ - TmBlockHdr *pHdr; - int nLeak = 0; - int nByte = 0; - - if( pTm==0 ) return; - - for(pHdr=pTm->pFirst; pHdr; pHdr=pHdr->pNext){ - nLeak++; - nByte += pHdr->nByte; - } - if( pnLeakAlloc ) *pnLeakAlloc = nLeak; - if( pnLeakByte ) *pnLeakByte = nByte; - -#ifdef TM_BACKTRACE - if( pFile ){ - int i; - fprintf(pFile, "LEAKS\n"); - for(i=0; iaHash); i++){ - TmAgg *pAgg; - for(pAgg=pTm->aHash[i]; pAgg; pAgg=pAgg->pNext){ - if( pAgg->nOutAlloc ){ - int j; - fprintf(pFile, "%d %d ", pAgg->nOutByte, pAgg->nOutAlloc); - for(j=0; jaFrame[j]); - } - fprintf(pFile, "\n"); - } - } - } - fprintf(pFile, "\nALLOCATIONS\n"); - for(i=0; iaHash); i++){ - TmAgg *pAgg; - for(pAgg=pTm->aHash[i]; pAgg; pAgg=pAgg->pNext){ - int j; - fprintf(pFile, "%d %d ", pAgg->nByte, pAgg->nAlloc); - for(j=0; jaFrame[j]); - fprintf(pFile, "\n"); - } - } - } -#else - (void)pFile; -#endif -} - - -#include "lsm.h" -#include "stdlib.h" - -typedef struct LsmMutex LsmMutex; -struct LsmMutex { - lsm_env *pEnv; - lsm_mutex *pMutex; -}; - -static void tmLsmMutexEnter(TmGlobal *pTm){ - LsmMutex *p = (LsmMutex *)pTm->pMutex; - p->pEnv->xMutexEnter(p->pMutex); -} -static void tmLsmMutexLeave(TmGlobal *pTm){ - LsmMutex *p = (LsmMutex *)(pTm->pMutex); - p->pEnv->xMutexLeave(p->pMutex); -} -static void tmLsmMutexDel(TmGlobal *pTm){ - LsmMutex *p = (LsmMutex *)pTm->pMutex; - pTm->xFree(p); -} -static void *tmLsmMalloc(int n){ return malloc(n); } -static void tmLsmFree(void *ptr){ free(ptr); } -static void *tmLsmRealloc(void *ptr, int n){ return realloc(ptr, n); } - -static void *tmLsmEnvMalloc(lsm_env *p, size_t n){ - return tmMalloc((TmGlobal *)(p->pMemCtx), n); -} -static void tmLsmEnvFree(lsm_env *p, void *ptr){ - tmFree((TmGlobal *)(p->pMemCtx), ptr); -} -static void *tmLsmEnvRealloc(lsm_env *p, void *ptr, size_t n){ - return tmRealloc((TmGlobal *)(p->pMemCtx), ptr, n); -} - -void testMallocInstall(lsm_env *pEnv){ - TmGlobal *pGlobal; - LsmMutex *pMutex; - assert( pEnv->pMemCtx==0 ); - - /* Allocate and populate a TmGlobal structure. */ - pGlobal = (TmGlobal *)tmLsmMalloc(sizeof(TmGlobal)); - memset(pGlobal, 0, sizeof(TmGlobal)); - pGlobal->xMalloc = tmLsmMalloc; - pGlobal->xRealloc = tmLsmRealloc; - pGlobal->xFree = tmLsmFree; - pMutex = (LsmMutex *)pGlobal->xMalloc(sizeof(LsmMutex)); - pMutex->pEnv = pEnv; - pEnv->xMutexStatic(pEnv, LSM_MUTEX_HEAP, &pMutex->pMutex); - pGlobal->xEnterMutex = tmLsmMutexEnter; - pGlobal->xLeaveMutex = tmLsmMutexLeave; - pGlobal->xDelMutex = tmLsmMutexDel; - pGlobal->pMutex = (void *)pMutex; - - pGlobal->xSaveMalloc = pEnv->xMalloc; - pGlobal->xSaveRealloc = pEnv->xRealloc; - pGlobal->xSaveFree = pEnv->xFree; - - /* Set up pEnv to the use the new TmGlobal */ - pEnv->pMemCtx = (void *)pGlobal; - pEnv->xMalloc = tmLsmEnvMalloc; - pEnv->xRealloc = tmLsmEnvRealloc; - pEnv->xFree = tmLsmEnvFree; -} - -void testMallocUninstall(lsm_env *pEnv){ - TmGlobal *p = (TmGlobal *)pEnv->pMemCtx; - pEnv->pMemCtx = 0; - if( p ){ - pEnv->xMalloc = p->xSaveMalloc; - pEnv->xRealloc = p->xSaveRealloc; - pEnv->xFree = p->xSaveFree; - p->xDelMutex(p); - tmLsmFree(p); - } -} - -void testMallocCheck( - lsm_env *pEnv, - int *pnLeakAlloc, - int *pnLeakByte, - FILE *pFile -){ - if( pEnv->pMemCtx==0 ){ - *pnLeakAlloc = 0; - *pnLeakByte = 0; - }else{ - tmMallocCheck((TmGlobal *)(pEnv->pMemCtx), pnLeakAlloc, pnLeakByte, pFile); - } -} - -void testMallocOom( - lsm_env *pEnv, - int nCountdown, - int bPersist, - void (*xHook)(void *), - void *pHookCtx -){ - TmGlobal *pTm = (TmGlobal *)(pEnv->pMemCtx); - tmMallocOom(pTm, nCountdown, bPersist, xHook, pHookCtx); -} - -void testMallocOomEnable(lsm_env *pEnv, int bEnable){ - TmGlobal *pTm = (TmGlobal *)(pEnv->pMemCtx); - tmMallocOomEnable(pTm, bEnable); -} diff --git a/ext/lsm1/lsm-test/lsmtest_tdb.c b/ext/lsm1/lsm-test/lsmtest_tdb.c deleted file mode 100644 index 8f63f64acb..0000000000 --- a/ext/lsm1/lsm-test/lsmtest_tdb.c +++ /dev/null @@ -1,846 +0,0 @@ - -/* -** This program attempts to test the correctness of some facets of the -** LSM database library. Specifically, that the contents of the database -** are maintained correctly during a series of inserts and deletes. -*/ - - -#include "lsmtest_tdb.h" -#include "lsm.h" - -#include "lsmtest.h" - -#include -#include -#include -#ifndef _WIN32 -# include -#endif -#include - - -typedef struct SqlDb SqlDb; - -static int error_transaction_function(TestDb *p, int iLevel){ - unused_parameter(p); - unused_parameter(iLevel); - return -1; -} - - -/************************************************************************* -** Begin wrapper for LevelDB. -*/ -#ifdef HAVE_LEVELDB - -#include - -typedef struct LevelDb LevelDb; -struct LevelDb { - TestDb base; - leveldb_t *db; - leveldb_options_t *pOpt; - leveldb_writeoptions_t *pWriteOpt; - leveldb_readoptions_t *pReadOpt; - - char *pVal; -}; - -static int test_leveldb_close(TestDb *pTestDb){ - LevelDb *pDb = (LevelDb *)pTestDb; - - leveldb_close(pDb->db); - leveldb_writeoptions_destroy(pDb->pWriteOpt); - leveldb_readoptions_destroy(pDb->pReadOpt); - leveldb_options_destroy(pDb->pOpt); - free(pDb->pVal); - free(pDb); - - return 0; -} - -static int test_leveldb_write( - TestDb *pTestDb, - void *pKey, - int nKey, - void *pVal, - int nVal -){ - LevelDb *pDb = (LevelDb *)pTestDb; - char *zErr = 0; - leveldb_put(pDb->db, pDb->pWriteOpt, pKey, nKey, pVal, nVal, &zErr); - return (zErr!=0); -} - -static int test_leveldb_delete(TestDb *pTestDb, void *pKey, int nKey){ - LevelDb *pDb = (LevelDb *)pTestDb; - char *zErr = 0; - leveldb_delete(pDb->db, pDb->pWriteOpt, pKey, nKey, &zErr); - return (zErr!=0); -} - -static int test_leveldb_fetch( - TestDb *pTestDb, - void *pKey, - int nKey, - void **ppVal, - int *pnVal -){ - LevelDb *pDb = (LevelDb *)pTestDb; - char *zErr = 0; - size_t nVal = 0; - - if( pKey==0 ) return 0; - free(pDb->pVal); - pDb->pVal = leveldb_get(pDb->db, pDb->pReadOpt, pKey, nKey, &nVal, &zErr); - *ppVal = (void *)(pDb->pVal); - if( pDb->pVal==0 ){ - *pnVal = -1; - }else{ - *pnVal = (int)nVal; - } - - return (zErr!=0); -} - -static int test_leveldb_scan( - TestDb *pTestDb, - void *pCtx, - int bReverse, - void *pKey1, int nKey1, /* Start of search */ - void *pKey2, int nKey2, /* End of search */ - void (*xCallback)(void *, void *, int , void *, int) -){ - LevelDb *pDb = (LevelDb *)pTestDb; - leveldb_iterator_t *iter; - - iter = leveldb_create_iterator(pDb->db, pDb->pReadOpt); - - if( bReverse==0 ){ - if( pKey1 ){ - leveldb_iter_seek(iter, pKey1, nKey1); - }else{ - leveldb_iter_seek_to_first(iter); - } - }else{ - if( pKey2 ){ - leveldb_iter_seek(iter, pKey2, nKey2); - - if( leveldb_iter_valid(iter)==0 ){ - leveldb_iter_seek_to_last(iter); - }else{ - const char *k; size_t n; - int res; - k = leveldb_iter_key(iter, &n); - res = memcmp(k, pKey2, MIN(n, nKey2)); - if( res==0 ) res = n - nKey2; - assert( res>=0 ); - if( res>0 ){ - leveldb_iter_prev(iter); - } - } - }else{ - leveldb_iter_seek_to_last(iter); - } - } - - - while( leveldb_iter_valid(iter) ){ - const char *k; size_t n; - const char *v; size_t n2; - int res; - - k = leveldb_iter_key(iter, &n); - if( bReverse==0 && pKey2 ){ - res = memcmp(k, pKey2, MIN(n, nKey2)); - if( res==0 ) res = n - nKey2; - if( res>0 ) break; - } - if( bReverse!=0 && pKey1 ){ - res = memcmp(k, pKey1, MIN(n, nKey1)); - if( res==0 ) res = n - nKey1; - if( res<0 ) break; - } - - v = leveldb_iter_value(iter, &n2); - - xCallback(pCtx, (void *)k, n, (void *)v, n2); - - if( bReverse==0 ){ - leveldb_iter_next(iter); - }else{ - leveldb_iter_prev(iter); - } - } - - leveldb_iter_destroy(iter); - return 0; -} - -static int test_leveldb_open( - const char *zSpec, - const char *zFilename, - int bClear, - TestDb **ppDb -){ - static const DatabaseMethods LeveldbMethods = { - test_leveldb_close, - test_leveldb_write, - test_leveldb_delete, - 0, - test_leveldb_fetch, - test_leveldb_scan, - error_transaction_function, - error_transaction_function, - error_transaction_function - }; - - LevelDb *pLevelDb; - char *zErr = 0; - - if( bClear ){ - char *zCmd = sqlite3_mprintf("rm -rf %s\n", zFilename); - system(zCmd); - sqlite3_free(zCmd); - } - - pLevelDb = (LevelDb *)malloc(sizeof(LevelDb)); - memset(pLevelDb, 0, sizeof(LevelDb)); - - pLevelDb->pOpt = leveldb_options_create(); - leveldb_options_set_create_if_missing(pLevelDb->pOpt, 1); - pLevelDb->pWriteOpt = leveldb_writeoptions_create(); - pLevelDb->pReadOpt = leveldb_readoptions_create(); - - pLevelDb->db = leveldb_open(pLevelDb->pOpt, zFilename, &zErr); - - if( zErr ){ - test_leveldb_close((TestDb *)pLevelDb); - *ppDb = 0; - return 1; - } - - *ppDb = (TestDb *)pLevelDb; - pLevelDb->base.pMethods = &LeveldbMethods; - return 0; -} -#endif /* HAVE_LEVELDB */ -/* -** End wrapper for LevelDB. -*************************************************************************/ - -#ifdef HAVE_KYOTOCABINET -static int kc_close(TestDb *pTestDb){ - return test_kc_close(pTestDb); -} - -static int kc_write( - TestDb *pTestDb, - void *pKey, - int nKey, - void *pVal, - int nVal -){ - return test_kc_write(pTestDb, pKey, nKey, pVal, nVal); -} - -static int kc_delete(TestDb *pTestDb, void *pKey, int nKey){ - return test_kc_delete(pTestDb, pKey, nKey); -} - -static int kc_delete_range( - TestDb *pTestDb, - void *pKey1, int nKey1, - void *pKey2, int nKey2 -){ - return test_kc_delete_range(pTestDb, pKey1, nKey1, pKey2, nKey2); -} - -static int kc_fetch( - TestDb *pTestDb, - void *pKey, - int nKey, - void **ppVal, - int *pnVal -){ - if( pKey==0 ) return LSM_OK; - return test_kc_fetch(pTestDb, pKey, nKey, ppVal, pnVal); -} - -static int kc_scan( - TestDb *pTestDb, - void *pCtx, - int bReverse, - void *pFirst, int nFirst, - void *pLast, int nLast, - void (*xCallback)(void *, void *, int , void *, int) -){ - return test_kc_scan( - pTestDb, pCtx, bReverse, pFirst, nFirst, pLast, nLast, xCallback - ); -} - -static int kc_open( - const char *zSpec, - const char *zFilename, - int bClear, - TestDb **ppDb -){ - static const DatabaseMethods KcdbMethods = { - kc_close, - kc_write, - kc_delete, - kc_delete_range, - kc_fetch, - kc_scan, - error_transaction_function, - error_transaction_function, - error_transaction_function - }; - - int rc; - TestDb *pTestDb = 0; - - rc = test_kc_open(zFilename, bClear, &pTestDb); - if( rc!=0 ){ - *ppDb = 0; - return rc; - } - pTestDb->pMethods = &KcdbMethods; - *ppDb = pTestDb; - return 0; -} -#endif /* HAVE_KYOTOCABINET */ -/* -** End wrapper for Kyoto cabinet. -*************************************************************************/ - -#ifdef HAVE_MDB -static int mdb_close(TestDb *pTestDb){ - return test_mdb_close(pTestDb); -} - -static int mdb_write( - TestDb *pTestDb, - void *pKey, - int nKey, - void *pVal, - int nVal -){ - return test_mdb_write(pTestDb, pKey, nKey, pVal, nVal); -} - -static int mdb_delete(TestDb *pTestDb, void *pKey, int nKey){ - return test_mdb_delete(pTestDb, pKey, nKey); -} - -static int mdb_fetch( - TestDb *pTestDb, - void *pKey, - int nKey, - void **ppVal, - int *pnVal -){ - if( pKey==0 ) return LSM_OK; - return test_mdb_fetch(pTestDb, pKey, nKey, ppVal, pnVal); -} - -static int mdb_scan( - TestDb *pTestDb, - void *pCtx, - int bReverse, - void *pFirst, int nFirst, - void *pLast, int nLast, - void (*xCallback)(void *, void *, int , void *, int) -){ - return test_mdb_scan( - pTestDb, pCtx, bReverse, pFirst, nFirst, pLast, nLast, xCallback - ); -} - -static int mdb_open( - const char *zSpec, - const char *zFilename, - int bClear, - TestDb **ppDb -){ - static const DatabaseMethods KcdbMethods = { - mdb_close, - mdb_write, - mdb_delete, - 0, - mdb_fetch, - mdb_scan, - error_transaction_function, - error_transaction_function, - error_transaction_function - }; - - int rc; - TestDb *pTestDb = 0; - - rc = test_mdb_open(zSpec, zFilename, bClear, &pTestDb); - if( rc!=0 ){ - *ppDb = 0; - return rc; - } - pTestDb->pMethods = &KcdbMethods; - *ppDb = pTestDb; - return 0; -} -#endif /* HAVE_MDB */ - -/************************************************************************* -** Begin wrapper for SQLite. -*/ - -/* -** nOpenTrans: -** The number of open nested transactions, in the same sense as used -** by the tdb_begin/commit/rollback and SQLite 4 KV interfaces. If this -** value is 0, there are no transactions open at all. If it is 1, then -** there is a read transaction. If it is 2 or greater, then there are -** (nOpenTrans-1) nested write transactions open. -*/ -struct SqlDb { - TestDb base; - sqlite3 *db; - sqlite3_stmt *pInsert; - sqlite3_stmt *pDelete; - sqlite3_stmt *pDeleteRange; - sqlite3_stmt *pFetch; - sqlite3_stmt *apScan[8]; - - int nOpenTrans; - - /* Used by sql_fetch() to allocate space for results */ - int nAlloc; - u8 *aAlloc; -}; - -static int sql_close(TestDb *pTestDb){ - SqlDb *pDb = (SqlDb *)pTestDb; - sqlite3_finalize(pDb->pInsert); - sqlite3_finalize(pDb->pDelete); - sqlite3_finalize(pDb->pDeleteRange); - sqlite3_finalize(pDb->pFetch); - sqlite3_finalize(pDb->apScan[0]); - sqlite3_finalize(pDb->apScan[1]); - sqlite3_finalize(pDb->apScan[2]); - sqlite3_finalize(pDb->apScan[3]); - sqlite3_finalize(pDb->apScan[4]); - sqlite3_finalize(pDb->apScan[5]); - sqlite3_finalize(pDb->apScan[6]); - sqlite3_finalize(pDb->apScan[7]); - sqlite3_close(pDb->db); - free((char *)pDb->aAlloc); - free((char *)pDb); - return SQLITE_OK; -} - -static int sql_write( - TestDb *pTestDb, - void *pKey, - int nKey, - void *pVal, - int nVal -){ - SqlDb *pDb = (SqlDb *)pTestDb; - sqlite3_bind_blob(pDb->pInsert, 1, pKey, nKey, SQLITE_STATIC); - sqlite3_bind_blob(pDb->pInsert, 2, pVal, nVal, SQLITE_STATIC); - sqlite3_step(pDb->pInsert); - return sqlite3_reset(pDb->pInsert); -} - -static int sql_delete(TestDb *pTestDb, void *pKey, int nKey){ - SqlDb *pDb = (SqlDb *)pTestDb; - sqlite3_bind_blob(pDb->pDelete, 1, pKey, nKey, SQLITE_STATIC); - sqlite3_step(pDb->pDelete); - return sqlite3_reset(pDb->pDelete); -} - -static int sql_delete_range( - TestDb *pTestDb, - void *pKey1, int nKey1, - void *pKey2, int nKey2 -){ - SqlDb *pDb = (SqlDb *)pTestDb; - sqlite3_bind_blob(pDb->pDeleteRange, 1, pKey1, nKey1, SQLITE_STATIC); - sqlite3_bind_blob(pDb->pDeleteRange, 2, pKey2, nKey2, SQLITE_STATIC); - sqlite3_step(pDb->pDeleteRange); - return sqlite3_reset(pDb->pDeleteRange); -} - -static int sql_fetch( - TestDb *pTestDb, - void *pKey, - int nKey, - void **ppVal, - int *pnVal -){ - SqlDb *pDb = (SqlDb *)pTestDb; - int rc; - - sqlite3_reset(pDb->pFetch); - if( pKey==0 ){ - assert( ppVal==0 ); - assert( pnVal==0 ); - return LSM_OK; - } - - sqlite3_bind_blob(pDb->pFetch, 1, pKey, nKey, SQLITE_STATIC); - rc = sqlite3_step(pDb->pFetch); - if( rc==SQLITE_ROW ){ - int nVal = sqlite3_column_bytes(pDb->pFetch, 0); - u8 *aVal = (void *)sqlite3_column_blob(pDb->pFetch, 0); - - if( nVal>pDb->nAlloc ){ - free(pDb->aAlloc); - pDb->aAlloc = (u8 *)malloc(nVal*2); - pDb->nAlloc = nVal*2; - } - memcpy(pDb->aAlloc, aVal, nVal); - *pnVal = nVal; - *ppVal = (void *)pDb->aAlloc; - }else{ - *pnVal = -1; - *ppVal = 0; - } - - rc = sqlite3_reset(pDb->pFetch); - return rc; -} - -static int sql_scan( - TestDb *pTestDb, - void *pCtx, - int bReverse, - void *pFirst, int nFirst, - void *pLast, int nLast, - void (*xCallback)(void *, void *, int , void *, int) -){ - SqlDb *pDb = (SqlDb *)pTestDb; - sqlite3_stmt *pScan; - - assert( bReverse==1 || bReverse==0 ); - pScan = pDb->apScan[(pFirst==0) + (pLast==0)*2 + bReverse*4]; - - if( pFirst ) sqlite3_bind_blob(pScan, 1, pFirst, nFirst, SQLITE_STATIC); - if( pLast ) sqlite3_bind_blob(pScan, 2, pLast, nLast, SQLITE_STATIC); - - while( SQLITE_ROW==sqlite3_step(pScan) ){ - void *pKey; int nKey; - void *pVal; int nVal; - - nKey = sqlite3_column_bytes(pScan, 0); - pKey = (void *)sqlite3_column_blob(pScan, 0); - nVal = sqlite3_column_bytes(pScan, 1); - pVal = (void *)sqlite3_column_blob(pScan, 1); - - xCallback(pCtx, pKey, nKey, pVal, nVal); - } - return sqlite3_reset(pScan); -} - -static int sql_begin(TestDb *pTestDb, int iLevel){ - int i; - SqlDb *pDb = (SqlDb *)pTestDb; - - /* iLevel==0 is a no-op */ - if( iLevel==0 ) return 0; - - /* If there are no transactions at all open, open a read transaction. */ - if( pDb->nOpenTrans==0 ){ - int rc = sqlite3_exec(pDb->db, - "BEGIN; SELECT * FROM sqlite_schema LIMIT 1;" , 0, 0, 0 - ); - if( rc!=0 ) return rc; - pDb->nOpenTrans = 1; - } - - /* Open any required write transactions */ - for(i=pDb->nOpenTrans; idb, zSql, 0, 0, 0); - sqlite3_free(zSql); - if( rc!=SQLITE_OK ) return rc; - } - - pDb->nOpenTrans = iLevel; - return 0; -} - -static int sql_commit(TestDb *pTestDb, int iLevel){ - SqlDb *pDb = (SqlDb *)pTestDb; - assert( iLevel>=0 ); - - /* Close the read transaction if requested. */ - if( pDb->nOpenTrans>=1 && iLevel==0 ){ - int rc = sqlite3_exec(pDb->db, "COMMIT", 0, 0, 0); - if( rc!=0 ) return rc; - pDb->nOpenTrans = 0; - } - - /* Close write transactions as required */ - if( pDb->nOpenTrans>iLevel ){ - char *zSql = sqlite3_mprintf("RELEASE x%d", iLevel); - int rc = sqlite3_exec(pDb->db, zSql, 0, 0, 0); - sqlite3_free(zSql); - if( rc!=0 ) return rc; - } - - pDb->nOpenTrans = iLevel; - return 0; -} - -static int sql_rollback(TestDb *pTestDb, int iLevel){ - SqlDb *pDb = (SqlDb *)pTestDb; - assert( iLevel>=0 ); - - if( pDb->nOpenTrans>=1 && iLevel==0 ){ - /* Close the read transaction if requested. */ - int rc = sqlite3_exec(pDb->db, "ROLLBACK", 0, 0, 0); - if( rc!=0 ) return rc; - }else if( pDb->nOpenTrans>1 && iLevel==1 ){ - /* Or, rollback and close the top-level write transaction */ - int rc = sqlite3_exec(pDb->db, "ROLLBACK TO x1; RELEASE x1;", 0, 0, 0); - if( rc!=0 ) return rc; - }else{ - /* Or, just roll back some nested transactions */ - char *zSql = sqlite3_mprintf("ROLLBACK TO x%d", iLevel-1); - int rc = sqlite3_exec(pDb->db, zSql, 0, 0, 0); - sqlite3_free(zSql); - if( rc!=0 ) return rc; - } - - pDb->nOpenTrans = iLevel; - return 0; -} - -static int sql_open( - const char *zSpec, - const char *zFilename, - int bClear, - TestDb **ppDb -){ - static const DatabaseMethods SqlMethods = { - sql_close, - sql_write, - sql_delete, - sql_delete_range, - sql_fetch, - sql_scan, - sql_begin, - sql_commit, - sql_rollback - }; - const char *zCreate = "CREATE TABLE IF NOT EXISTS t1(k PRIMARY KEY, v)"; - const char *zInsert = "REPLACE INTO t1 VALUES(?, ?)"; - const char *zDelete = "DELETE FROM t1 WHERE k = ?"; - const char *zRange = "DELETE FROM t1 WHERE k>? AND k= ?1 ORDER BY k"; - const char *zScan3 = "SELECT * FROM t1 ORDER BY k"; - - const char *zScan4 = - "SELECT * FROM t1 WHERE k BETWEEN ?1 AND ?2 ORDER BY k DESC"; - const char *zScan5 = "SELECT * FROM t1 WHERE k <= ?2 ORDER BY k DESC"; - const char *zScan6 = "SELECT * FROM t1 WHERE k >= ?1 ORDER BY k DESC"; - const char *zScan7 = "SELECT * FROM t1 ORDER BY k DESC"; - - int rc; - SqlDb *pDb; - char *zPragma; - - if( bClear && zFilename && zFilename[0] ){ - unlink(zFilename); - } - - pDb = (SqlDb *)malloc(sizeof(SqlDb)); - memset(pDb, 0, sizeof(SqlDb)); - pDb->base.pMethods = &SqlMethods; - - if( 0!=(rc = sqlite3_open(zFilename, &pDb->db)) - || 0!=(rc = sqlite3_exec(pDb->db, zCreate, 0, 0, 0)) - || 0!=(rc = sqlite3_prepare_v2(pDb->db, zInsert, -1, &pDb->pInsert, 0)) - || 0!=(rc = sqlite3_prepare_v2(pDb->db, zDelete, -1, &pDb->pDelete, 0)) - || 0!=(rc = sqlite3_prepare_v2(pDb->db, zRange, -1, &pDb->pDeleteRange, 0)) - || 0!=(rc = sqlite3_prepare_v2(pDb->db, zFetch, -1, &pDb->pFetch, 0)) - || 0!=(rc = sqlite3_prepare_v2(pDb->db, zScan0, -1, &pDb->apScan[0], 0)) - || 0!=(rc = sqlite3_prepare_v2(pDb->db, zScan1, -1, &pDb->apScan[1], 0)) - || 0!=(rc = sqlite3_prepare_v2(pDb->db, zScan2, -1, &pDb->apScan[2], 0)) - || 0!=(rc = sqlite3_prepare_v2(pDb->db, zScan3, -1, &pDb->apScan[3], 0)) - || 0!=(rc = sqlite3_prepare_v2(pDb->db, zScan4, -1, &pDb->apScan[4], 0)) - || 0!=(rc = sqlite3_prepare_v2(pDb->db, zScan5, -1, &pDb->apScan[5], 0)) - || 0!=(rc = sqlite3_prepare_v2(pDb->db, zScan6, -1, &pDb->apScan[6], 0)) - || 0!=(rc = sqlite3_prepare_v2(pDb->db, zScan7, -1, &pDb->apScan[7], 0)) - ){ - *ppDb = 0; - sql_close((TestDb *)pDb); - return rc; - } - - zPragma = sqlite3_mprintf("PRAGMA page_size=%d", TESTDB_DEFAULT_PAGE_SIZE); - sqlite3_exec(pDb->db, zPragma, 0, 0, 0); - sqlite3_free(zPragma); - zPragma = sqlite3_mprintf("PRAGMA cache_size=%d", TESTDB_DEFAULT_CACHE_SIZE); - sqlite3_exec(pDb->db, zPragma, 0, 0, 0); - sqlite3_free(zPragma); - - /* sqlite3_exec(pDb->db, "PRAGMA locking_mode=EXCLUSIVE", 0, 0, 0); */ - sqlite3_exec(pDb->db, "PRAGMA synchronous=OFF", 0, 0, 0); - sqlite3_exec(pDb->db, "PRAGMA journal_mode=WAL", 0, 0, 0); - sqlite3_exec(pDb->db, "PRAGMA wal_autocheckpoint=4096", 0, 0, 0); - if( zSpec ){ - rc = sqlite3_exec(pDb->db, zSpec, 0, 0, 0); - if( rc!=SQLITE_OK ){ - sql_close((TestDb *)pDb); - return rc; - } - } - - *ppDb = (TestDb *)pDb; - return 0; -} -/* -** End wrapper for SQLite. -*************************************************************************/ - -/************************************************************************* -** Begin exported functions. -*/ -static struct Lib { - const char *zName; - const char *zDefaultDb; - int (*xOpen)(const char *, const char *zFilename, int bClear, TestDb **ppDb); -} aLib[] = { - { "sqlite3", "testdb.sqlite", sql_open }, - { "lsm_small", "testdb.lsm_small", test_lsm_small_open }, - { "lsm_lomem", "testdb.lsm_lomem", test_lsm_lomem_open }, - { "lsm_lomem2", "testdb.lsm_lomem2", test_lsm_lomem2_open }, -#ifdef HAVE_ZLIB - { "lsm_zip", "testdb.lsm_zip", test_lsm_zip_open }, -#endif - { "lsm", "testdb.lsm", test_lsm_open }, -#ifdef LSM_MUTEX_PTHREADS - { "lsm_mt2", "testdb.lsm_mt2", test_lsm_mt2 }, - { "lsm_mt3", "testdb.lsm_mt3", test_lsm_mt3 }, -#endif -#ifdef HAVE_LEVELDB - { "leveldb", "testdb.leveldb", test_leveldb_open }, -#endif -#ifdef HAVE_KYOTOCABINET - { "kyotocabinet", "testdb.kc", kc_open }, -#endif -#ifdef HAVE_MDB - { "mdb", "./testdb.mdb", mdb_open } -#endif -}; - -const char *tdb_system_name(int i){ - if( i<0 || i>=ArraySize(aLib) ) return 0; - return aLib[i].zName; -} - -const char *tdb_default_db(const char *zSys){ - int i; - for(i=0; izLibrary = aLib[i].zName; - } - break; - } - } - - if( rc ){ - /* Failed to find the requested database library. Return an error. */ - *ppDb = 0; - } - return rc; -} - -int tdb_close(TestDb *pDb){ - if( pDb ){ - return pDb->pMethods->xClose(pDb); - } - return 0; -} - -int tdb_write(TestDb *pDb, void *pKey, int nKey, void *pVal, int nVal){ - return pDb->pMethods->xWrite(pDb, pKey, nKey, pVal, nVal); -} - -int tdb_delete(TestDb *pDb, void *pKey, int nKey){ - return pDb->pMethods->xDelete(pDb, pKey, nKey); -} - -int tdb_delete_range( - TestDb *pDb, void *pKey1, int nKey1, void *pKey2, int nKey2 -){ - return pDb->pMethods->xDeleteRange(pDb, pKey1, nKey1, pKey2, nKey2); -} - -int tdb_fetch(TestDb *pDb, void *pKey, int nKey, void **ppVal, int *pnVal){ - return pDb->pMethods->xFetch(pDb, pKey, nKey, ppVal, pnVal); -} - -int tdb_scan( - TestDb *pDb, /* Database handle */ - void *pCtx, /* Context pointer to pass to xCallback */ - int bReverse, /* True to scan in reverse order */ - void *pKey1, int nKey1, /* Start of search */ - void *pKey2, int nKey2, /* End of search */ - void (*xCallback)(void *pCtx, void *pKey, int nKey, void *pVal, int nVal) -){ - return pDb->pMethods->xScan( - pDb, pCtx, bReverse, pKey1, nKey1, pKey2, nKey2, xCallback - ); -} - -int tdb_begin(TestDb *pDb, int iLevel){ - return pDb->pMethods->xBegin(pDb, iLevel); -} -int tdb_commit(TestDb *pDb, int iLevel){ - return pDb->pMethods->xCommit(pDb, iLevel); -} -int tdb_rollback(TestDb *pDb, int iLevel){ - return pDb->pMethods->xRollback(pDb, iLevel); -} - -int tdb_transaction_support(TestDb *pDb){ - return (pDb->pMethods->xBegin != error_transaction_function); -} - -const char *tdb_library_name(TestDb *pDb){ - return pDb->zLibrary; -} - -/* -** End exported functions. -*************************************************************************/ diff --git a/ext/lsm1/lsm-test/lsmtest_tdb.h b/ext/lsm1/lsm-test/lsmtest_tdb.h deleted file mode 100644 index c55b6e2f80..0000000000 --- a/ext/lsm1/lsm-test/lsmtest_tdb.h +++ /dev/null @@ -1,174 +0,0 @@ - -/* -** This file is the interface to a very simple database library used for -** testing. The interface is similar to that of the LSM. The main virtue -** of this library is that the same API may be used to access a key-value -** store implemented by LSM, SQLite or another database system. Which -** makes it easy to use for correctness and performance tests. -*/ - -#ifndef __WRAPPER_H_ -#define __WRAPPER_H_ - -#ifdef __cplusplus -extern "C" { -#endif - -#include "lsm.h" - -typedef struct TestDb TestDb; - -/* -** Open a new database connection. The first argument is the name of the -** database library to use. e.g. something like: -** -** "sqlite3" -** "lsm" -** -** See function tdb_system_name() for a list of available database systems. -** -** The second argument is the name of the database to open (e.g. a filename). -** -** If the third parameter is non-zero, then any existing database by the -** name of zDb is removed before opening a new one. If it is zero, then an -** existing database may be opened. -*/ -int tdb_open(const char *zLibrary, const char *zDb, int bClear, TestDb **ppDb); - -/* -** Close a database handle. -*/ -int tdb_close(TestDb *pDb); - -/* -** Write a new key/value into the database. -*/ -int tdb_write(TestDb *pDb, void *pKey, int nKey, void *pVal, int nVal); - -/* -** Delete a key from the database. -*/ -int tdb_delete(TestDb *pDb, void *pKey, int nKey); - -/* -** Delete a range of keys from the database. -*/ -int tdb_delete_range(TestDb *, void *pKey1, int nKey1, void *pKey2, int nKey2); - -/* -** Query the database for key (pKey/nKey). If no entry is found, set *ppVal -** to 0 and *pnVal to -1 before returning. Otherwise, set *ppVal and *pnVal -** to a pointer to and size of the value associated with (pKey/nKey). -*/ -int tdb_fetch(TestDb *pDb, void *pKey, int nKey, void **ppVal, int *pnVal); - -/* -** Open and close nested transactions. Currently, these functions only -** work for SQLite3 and LSM systems. Use the tdb_transaction_support() -** function to determine if a given TestDb handle supports these methods. -** -** These functions and the iLevel parameter follow the same conventions as -** the SQLite 4 transaction interface. Note that this is slightly different -** from the way LSM does things. As follows: -** -** tdb_begin(): -** A successful call to tdb_begin() with (iLevel>1) guarantees that -** there are at least (iLevel-1) write transactions open. If iLevel==1, -** then it guarantees that at least a read-transaction is open. Calling -** tdb_begin() with iLevel==0 is a no-op. -** -** tdb_commit(): -** A successful call to tdb_commit() with (iLevel>1) guarantees that -** there are at most (iLevel-1) write transactions open. If iLevel==1, -** then it guarantees that there are no write transactions open (although -** a read-transaction may remain open). Calling tdb_commit() with -** iLevel==0 ensures that all transactions, read or write, have been -** closed and committed. -** -** tdb_rollback(): -** This call is similar to tdb_commit(), except that instead of committing -** transactions, it reverts them. For example, calling tdb_rollback() with -** iLevel==2 ensures that there is at most one write transaction open, and -** restores the database to the state that it was in when that transaction -** was opened. -** -** In other words, tdb_commit() just closes transactions - tdb_rollback() -** closes transactions and then restores the database to the state it -** was in before those transactions were even opened. -*/ -int tdb_begin(TestDb *pDb, int iLevel); -int tdb_commit(TestDb *pDb, int iLevel); -int tdb_rollback(TestDb *pDb, int iLevel); - -/* -** Return true if transactions are supported, or false otherwise. -*/ -int tdb_transaction_support(TestDb *pDb); - -/* -** Return the name of the database library (as passed to tdb_open()) used -** by the handled passed as the first argument. -*/ -const char *tdb_library_name(TestDb *pDb); - -/* -** Scan a range of database keys. Invoke the callback function for each -** key visited. -*/ -int tdb_scan( - TestDb *pDb, /* Database handle */ - void *pCtx, /* Context pointer to pass to xCallback */ - int bReverse, /* True to scan in reverse order */ - void *pKey1, int nKey1, /* Start of search */ - void *pKey2, int nKey2, /* End of search */ - void (*xCallback)(void *pCtx, void *pKey, int nKey, void *pVal, int nVal) -); - -const char *tdb_system_name(int i); -const char *tdb_default_db(const char *zSys); - -int tdb_lsm_open(const char *zCfg, const char *zDb, int bClear, TestDb **ppDb); - -/* -** If the TestDb handle passed as an argument is a wrapper around an LSM -** database, return the LSM handle. Otherwise, if the argument is some other -** database system, return NULL. -*/ -lsm_db *tdb_lsm(TestDb *pDb); - -/* -** Return true if the db passed as an argument is a multi-threaded LSM -** connection. -*/ -int tdb_lsm_multithread(TestDb *pDb); - -/* -** Return a pointer to the lsm_env object used by all lsm database -** connections initialized as a copy of the object returned by -** lsm_default_env(). It may be modified (e.g. to override functions) -** if the caller can guarantee that it is not already in use. -*/ -lsm_env *tdb_lsm_env(void); - -/* -** The following functions only work with LSM database handles. It is -** illegal to call them with any other type of database handle specified -** as an argument. -*/ -void tdb_lsm_enable_log(TestDb *pDb, int bEnable); -void tdb_lsm_application_crash(TestDb *pDb); -void tdb_lsm_prepare_system_crash(TestDb *pDb); -void tdb_lsm_system_crash(TestDb *pDb); -void tdb_lsm_prepare_sync_crash(TestDb *pDb, int iSync); - - -void tdb_lsm_safety(TestDb *pDb, int eMode); -void tdb_lsm_config_work_hook(TestDb *pDb, void (*)(lsm_db *, void *), void *); -void tdb_lsm_write_hook(TestDb *, void(*)(void*,int,lsm_i64,int,int), void*); -int tdb_lsm_config_str(TestDb *pDb, const char *zStr); - -#ifdef __cplusplus -} /* End of the 'extern "C"' block */ -#endif - -#endif diff --git a/ext/lsm1/lsm-test/lsmtest_tdb2.cc b/ext/lsm1/lsm-test/lsmtest_tdb2.cc deleted file mode 100644 index 86ebb49583..0000000000 --- a/ext/lsm1/lsm-test/lsmtest_tdb2.cc +++ /dev/null @@ -1,369 +0,0 @@ - - -#include "lsmtest.h" -#include - -#ifdef HAVE_KYOTOCABINET -#include "kcpolydb.h" -extern "C" { - struct KcDb { - TestDb base; - kyotocabinet::TreeDB* db; - char *pVal; - }; -} - -int test_kc_open(const char *zFilename, int bClear, TestDb **ppDb){ - KcDb *pKcDb; - int ok; - int rc = 0; - - if( bClear ){ - char *zCmd = sqlite3_mprintf("rm -rf %s\n", zFilename); - system(zCmd); - sqlite3_free(zCmd); - } - - pKcDb = (KcDb *)malloc(sizeof(KcDb)); - memset(pKcDb, 0, sizeof(KcDb)); - - - pKcDb->db = new kyotocabinet::TreeDB(); - pKcDb->db->tune_page(TESTDB_DEFAULT_PAGE_SIZE); - pKcDb->db->tune_page_cache( - TESTDB_DEFAULT_PAGE_SIZE * TESTDB_DEFAULT_CACHE_SIZE - ); - ok = pKcDb->db->open(zFilename, - kyotocabinet::PolyDB::OWRITER | kyotocabinet::PolyDB::OCREATE - ); - if( ok==0 ){ - free(pKcDb); - pKcDb = 0; - rc = 1; - } - - *ppDb = (TestDb *)pKcDb; - return rc; -} - -int test_kc_close(TestDb *pDb){ - KcDb *pKcDb = (KcDb *)pDb; - if( pKcDb->pVal ){ - delete [] pKcDb->pVal; - } - pKcDb->db->close(); - delete pKcDb->db; - free(pKcDb); - return 0; -} - -int test_kc_write(TestDb *pDb, void *pKey, int nKey, void *pVal, int nVal){ - KcDb *pKcDb = (KcDb *)pDb; - int ok; - - ok = pKcDb->db->set((const char *)pKey, nKey, (const char *)pVal, nVal); - return (ok ? 0 : 1); -} - -int test_kc_delete(TestDb *pDb, void *pKey, int nKey){ - KcDb *pKcDb = (KcDb *)pDb; - int ok; - - ok = pKcDb->db->remove((const char *)pKey, nKey); - return (ok ? 0 : 1); -} - -int test_kc_delete_range( - TestDb *pDb, - void *pKey1, int nKey1, - void *pKey2, int nKey2 -){ - int res; - KcDb *pKcDb = (KcDb *)pDb; - kyotocabinet::DB::Cursor* pCur = pKcDb->db->cursor(); - - if( pKey1 ){ - res = pCur->jump((const char *)pKey1, nKey1); - }else{ - res = pCur->jump(); - } - - while( 1 ){ - const char *pKey; size_t nKey; - const char *pVal; size_t nVal; - - pKey = pCur->get(&nKey, &pVal, &nVal); - if( pKey==0 ) break; - -#ifndef NDEBUG - if( pKey1 ){ - res = memcmp(pKey, pKey1, MIN((size_t)nKey1, nKey)); - assert( res>0 || (res==0 && nKey>nKey1) ); - } -#endif - - if( pKey2 ){ - res = memcmp(pKey, pKey2, MIN((size_t)nKey2, nKey)); - if( res>0 || (res==0 && (size_t)nKey2remove(); - delete [] pKey; - } - - delete pCur; - return 0; -} - -int test_kc_fetch( - TestDb *pDb, - void *pKey, - int nKey, - void **ppVal, - int *pnVal -){ - KcDb *pKcDb = (KcDb *)pDb; - size_t nVal; - - if( pKcDb->pVal ){ - delete [] pKcDb->pVal; - pKcDb->pVal = 0; - } - - pKcDb->pVal = pKcDb->db->get((const char *)pKey, nKey, &nVal); - if( pKcDb->pVal ){ - *ppVal = pKcDb->pVal; - *pnVal = nVal; - }else{ - *ppVal = 0; - *pnVal = -1; - } - - return 0; -} - -int test_kc_scan( - TestDb *pDb, /* Database handle */ - void *pCtx, /* Context pointer to pass to xCallback */ - int bReverse, /* True for a reverse order scan */ - void *pKey1, int nKey1, /* Start of search */ - void *pKey2, int nKey2, /* End of search */ - void (*xCallback)(void *pCtx, void *pKey, int nKey, void *pVal, int nVal) -){ - KcDb *pKcDb = (KcDb *)pDb; - kyotocabinet::DB::Cursor* pCur = pKcDb->db->cursor(); - int res; - - if( bReverse==0 ){ - if( pKey1 ){ - res = pCur->jump((const char *)pKey1, nKey1); - }else{ - res = pCur->jump(); - } - }else{ - if( pKey2 ){ - res = pCur->jump_back((const char *)pKey2, nKey2); - }else{ - res = pCur->jump_back(); - } - } - - while( res ){ - const char *pKey; size_t nKey; - const char *pVal; size_t nVal; - pKey = pCur->get(&nKey, &pVal, &nVal); - - if( bReverse==0 && pKey2 ){ - res = memcmp(pKey, pKey2, MIN((size_t)nKey2, nKey)); - if( res>0 || (res==0 && (size_t)nKey2nKey) ){ - delete [] pKey; - break; - } - } - - xCallback(pCtx, (void *)pKey, (int)nKey, (void *)pVal, (int)nVal); - delete [] pKey; - - if( bReverse ){ - res = pCur->step_back(); - }else{ - res = pCur->step(); - } - } - - delete pCur; - return 0; -} -#endif /* HAVE_KYOTOCABINET */ - -#ifdef HAVE_MDB -#include "lmdb.h" - -extern "C" { - struct MdbDb { - TestDb base; - MDB_env *env; - MDB_dbi dbi; - }; -} - -int test_mdb_open( - const char *zSpec, - const char *zFilename, - int bClear, - TestDb **ppDb -){ - MDB_txn *txn; - MdbDb *pMdb; - int rc; - - if( bClear ){ - char *zCmd = sqlite3_mprintf("rm -rf %s\n", zFilename); - system(zCmd); - sqlite3_free(zCmd); - } - - pMdb = (MdbDb *)malloc(sizeof(MdbDb)); - memset(pMdb, 0, sizeof(MdbDb)); - - rc = mdb_env_create(&pMdb->env); - if( rc==0 ) rc = mdb_env_set_mapsize(pMdb->env, 1*1024*1024*1024); - if( rc==0 ) rc = mdb_env_open(pMdb->env, zFilename, MDB_NOSYNC|MDB_NOSUBDIR, 0600); - if( rc==0 ) rc = mdb_txn_begin(pMdb->env, NULL, 0, &txn); - if( rc==0 ){ - rc = mdb_open(txn, NULL, 0, &pMdb->dbi); - mdb_txn_commit(txn); - } - - *ppDb = (TestDb *)pMdb; - return rc; -} - -int test_mdb_close(TestDb *pDb){ - MdbDb *pMdb = (MdbDb *)pDb; - - mdb_close(pMdb->env, pMdb->dbi); - mdb_env_close(pMdb->env); - free(pMdb); - return 0; -} - -int test_mdb_write(TestDb *pDb, void *pKey, int nKey, void *pVal, int nVal){ - int rc; - MdbDb *pMdb = (MdbDb *)pDb; - MDB_val val; - MDB_val key; - MDB_txn *txn; - - val.mv_size = nVal; - val.mv_data = pVal; - key.mv_size = nKey; - key.mv_data = pKey; - - rc = mdb_txn_begin(pMdb->env, NULL, 0, &txn); - if( rc==0 ){ - rc = mdb_put(txn, pMdb->dbi, &key, &val, 0); - if( rc==0 ){ - rc = mdb_txn_commit(txn); - }else{ - mdb_txn_abort(txn); - } - } - - return rc; -} - -int test_mdb_delete(TestDb *pDb, void *pKey, int nKey){ - int rc; - MdbDb *pMdb = (MdbDb *)pDb; - MDB_val key; - MDB_txn *txn; - - key.mv_size = nKey; - key.mv_data = pKey; - rc = mdb_txn_begin(pMdb->env, NULL, 0, &txn); - if( rc==0 ){ - rc = mdb_del(txn, pMdb->dbi, &key, 0); - if( rc==0 ){ - rc = mdb_txn_commit(txn); - }else{ - mdb_txn_abort(txn); - } - } - - return rc; -} - -int test_mdb_fetch( - TestDb *pDb, - void *pKey, - int nKey, - void **ppVal, - int *pnVal -){ - int rc; - MdbDb *pMdb = (MdbDb *)pDb; - MDB_val key; - MDB_txn *txn; - - key.mv_size = nKey; - key.mv_data = pKey; - - rc = mdb_txn_begin(pMdb->env, NULL, MDB_RDONLY, &txn); - if( rc==0 ){ - MDB_val val = {0, 0}; - rc = mdb_get(txn, pMdb->dbi, &key, &val); - if( rc==MDB_NOTFOUND ){ - rc = 0; - *ppVal = 0; - *pnVal = -1; - }else{ - *ppVal = val.mv_data; - *pnVal = val.mv_size; - } - mdb_txn_commit(txn); - } - - return rc; -} - -int test_mdb_scan( - TestDb *pDb, /* Database handle */ - void *pCtx, /* Context pointer to pass to xCallback */ - int bReverse, /* True for a reverse order scan */ - void *pKey1, int nKey1, /* Start of search */ - void *pKey2, int nKey2, /* End of search */ - void (*xCallback)(void *pCtx, void *pKey, int nKey, void *pVal, int nVal) -){ - MdbDb *pMdb = (MdbDb *)pDb; - int rc; - MDB_cursor_op op = bReverse ? MDB_PREV : MDB_NEXT; - MDB_txn *txn; - - rc = mdb_txn_begin(pMdb->env, NULL, MDB_RDONLY, &txn); - if( rc==0 ){ - MDB_cursor *csr; - MDB_val key = {0, 0}; - MDB_val val = {0, 0}; - - rc = mdb_cursor_open(txn, pMdb->dbi, &csr); - if( rc==0 ){ - while( mdb_cursor_get(csr, &key, &val, op)==0 ){ - xCallback(pCtx, key.mv_data, key.mv_size, val.mv_data, val.mv_size); - } - mdb_cursor_close(csr); - } - } - - return rc; -} - -#endif /* HAVE_MDB */ diff --git a/ext/lsm1/lsm-test/lsmtest_tdb3.c b/ext/lsm1/lsm-test/lsmtest_tdb3.c deleted file mode 100644 index e29497af20..0000000000 --- a/ext/lsm1/lsm-test/lsmtest_tdb3.c +++ /dev/null @@ -1,1429 +0,0 @@ - -#include "lsmtest_tdb.h" -#include "lsm.h" -#include "lsmtest.h" - -#include -#include -#include -#ifndef _WIN32 -# include -#endif -#include - -#ifndef _WIN32 -# include -#endif - -typedef struct LsmDb LsmDb; -typedef struct LsmWorker LsmWorker; -typedef struct LsmFile LsmFile; - -#define LSMTEST_DFLT_MT_MAX_CKPT (8*1024) -#define LSMTEST_DFLT_MT_MIN_CKPT (2*1024) - -#ifdef LSM_MUTEX_PTHREADS -#include - -#define LSMTEST_THREAD_CKPT 1 -#define LSMTEST_THREAD_WORKER 2 -#define LSMTEST_THREAD_WORKER_AC 3 - -/* -** There are several different types of worker threads that run in different -** test configurations, depending on the value of LsmWorker.eType. -** -** 1. Checkpointer. -** 2. Worker with auto-checkpoint. -** 3. Worker without auto-checkpoint. -*/ -struct LsmWorker { - LsmDb *pDb; /* Main database structure */ - lsm_db *pWorker; /* Worker database handle */ - pthread_t worker_thread; /* Worker thread */ - pthread_cond_t worker_cond; /* Condition var the worker waits on */ - pthread_mutex_t worker_mutex; /* Mutex used with worker_cond */ - int bDoWork; /* Set to true by client when there is work */ - int worker_rc; /* Store error code here */ - int eType; /* LSMTEST_THREAD_XXX constant */ - int bBlock; -}; -#else -struct LsmWorker { int worker_rc; int bBlock; }; -#endif - -static void mt_shutdown(LsmDb *); - -lsm_env *tdb_lsm_env(void){ - static int bInit = 0; - static lsm_env env; - if( bInit==0 ){ - memcpy(&env, lsm_default_env(), sizeof(env)); - bInit = 1; - } - return &env; -} - -typedef struct FileSector FileSector; -typedef struct FileData FileData; - -struct FileSector { - u8 *aOld; /* Old data for this sector */ -}; - -struct FileData { - int nSector; /* Allocated size of apSector[] array */ - FileSector *aSector; /* Array of file sectors */ -}; - -/* -** bPrepareCrash: -** If non-zero, the file wrappers maintain enough in-memory data to -** simulate the effect of a power-failure on the file-system (i.e. that -** unsynced sectors may be written, not written, or overwritten with -** arbitrary data when the crash occurs). -** -** bCrashed: -** Set to true after a crash is simulated. Once this variable is true, all -** VFS methods other than xClose() return LSM_IOERR as soon as they are -** called (without affecting the contents of the file-system). -** -** env: -** The environment object used by all lsm_db* handles opened by this -** object (i.e. LsmDb.db plus any worker connections). Variable env.pVfsCtx -** always points to the containing LsmDb structure. -*/ -struct LsmDb { - TestDb base; /* Base class - methods table */ - lsm_env env; /* Environment used by connection db */ - char *zName; /* Database file name */ - lsm_db *db; /* LSM database handle */ - - lsm_cursor *pCsr; /* Cursor held open during read transaction */ - void *pBuf; /* Buffer for tdb_fetch() output */ - int nBuf; /* Allocated (not used) size of pBuf */ - - /* Crash testing related state */ - int bCrashed; /* True once a crash has occurred */ - int nAutoCrash; /* Number of syncs until a crash */ - int bPrepareCrash; /* True to store writes in memory */ - - /* Unsynced data (while crash testing) */ - int szSector; /* Assumed size of disk sectors (512B) */ - FileData aFile[2]; /* Database and log file data */ - - /* Other test instrumentation */ - int bNoRecovery; /* If true, assume DMS2 is locked */ - - /* Work hook redirection */ - void (*xWork)(lsm_db *, void *); - void *pWorkCtx; - - /* IO logging hook */ - void (*xWriteHook)(void *, int, lsm_i64, int, int); - void *pWriteCtx; - - /* Worker threads (for lsm_mt) */ - int nMtMinCkpt; - int nMtMaxCkpt; - int eMode; - int nWorker; - LsmWorker *aWorker; -}; - -#define LSMTEST_MODE_SINGLETHREAD 1 -#define LSMTEST_MODE_BACKGROUND_CKPT 2 -#define LSMTEST_MODE_BACKGROUND_WORK 3 -#define LSMTEST_MODE_BACKGROUND_BOTH 4 - -/************************************************************************* -************************************************************************** -** Begin test VFS code. -*/ - -struct LsmFile { - lsm_file *pReal; /* Real underlying file */ - int bLog; /* True for log file. False for db file */ - LsmDb *pDb; /* Database handle that uses this file */ -}; - -static int testEnvFullpath( - lsm_env *pEnv, /* Environment for current LsmDb */ - const char *zFile, /* Relative path name */ - char *zOut, /* Output buffer */ - int *pnOut /* IN/OUT: Size of output buffer */ -){ - lsm_env *pRealEnv = tdb_lsm_env(); - return pRealEnv->xFullpath(pRealEnv, zFile, zOut, pnOut); -} - -static int testEnvOpen( - lsm_env *pEnv, /* Environment for current LsmDb */ - const char *zFile, /* Name of file to open */ - int flags, - lsm_file **ppFile /* OUT: New file handle object */ -){ - lsm_env *pRealEnv = tdb_lsm_env(); - LsmDb *pDb = (LsmDb *)pEnv->pVfsCtx; - int rc; /* Return Code */ - LsmFile *pRet; /* The new file handle */ - int nFile; /* Length of string zFile in bytes */ - - nFile = strlen(zFile); - pRet = (LsmFile *)testMalloc(sizeof(LsmFile)); - pRet->pDb = pDb; - pRet->bLog = (nFile > 4 && 0==memcmp("-log", &zFile[nFile-4], 4)); - - rc = pRealEnv->xOpen(pRealEnv, zFile, flags, &pRet->pReal); - if( rc!=LSM_OK ){ - testFree(pRet); - pRet = 0; - } - - *ppFile = (lsm_file *)pRet; - return rc; -} - -static int testEnvRead(lsm_file *pFile, lsm_i64 iOff, void *pData, int nData){ - lsm_env *pRealEnv = tdb_lsm_env(); - LsmFile *p = (LsmFile *)pFile; - if( p->pDb->bCrashed ) return LSM_IOERR; - return pRealEnv->xRead(p->pReal, iOff, pData, nData); -} - -static int testEnvWrite(lsm_file *pFile, lsm_i64 iOff, void *pData, int nData){ - lsm_env *pRealEnv = tdb_lsm_env(); - LsmFile *p = (LsmFile *)pFile; - LsmDb *pDb = p->pDb; - - if( pDb->bCrashed ) return LSM_IOERR; - - if( pDb->bPrepareCrash ){ - FileData *pData2 = &pDb->aFile[p->bLog]; - int iFirst; - int iLast; - int iSector; - - iFirst = (int)(iOff / pDb->szSector); - iLast = (int)((iOff + nData - 1) / pDb->szSector); - - if( pData2->nSector<(iLast+1) ){ - int nNew = ( ((iLast + 1) + 63) / 64 ) * 64; - assert( nNew>iLast ); - pData2->aSector = (FileSector *)testRealloc( - pData2->aSector, nNew*sizeof(FileSector) - ); - memset(&pData2->aSector[pData2->nSector], - 0, (nNew - pData2->nSector) * sizeof(FileSector) - ); - pData2->nSector = nNew; - } - - for(iSector=iFirst; iSector<=iLast; iSector++){ - if( pData2->aSector[iSector].aOld==0 ){ - u8 *aOld = (u8 *)testMalloc(pDb->szSector); - pRealEnv->xRead( - p->pReal, (lsm_i64)iSector*pDb->szSector, aOld, pDb->szSector - ); - pData2->aSector[iSector].aOld = aOld; - } - } - } - - if( pDb->xWriteHook ){ - int rc; - int nUs; - struct timeval t1; - struct timeval t2; - - gettimeofday(&t1, 0); - assert( nData>0 ); - rc = pRealEnv->xWrite(p->pReal, iOff, pData, nData); - gettimeofday(&t2, 0); - - nUs = (t2.tv_sec - t1.tv_sec) * 1000000 + (t2.tv_usec - t1.tv_usec); - pDb->xWriteHook(pDb->pWriteCtx, p->bLog, iOff, nData, nUs); - return rc; - } - - return pRealEnv->xWrite(p->pReal, iOff, pData, nData); -} - -static void doSystemCrash(LsmDb *pDb); - -static int testEnvSync(lsm_file *pFile){ - lsm_env *pRealEnv = tdb_lsm_env(); - LsmFile *p = (LsmFile *)pFile; - LsmDb *pDb = p->pDb; - FileData *pData = &pDb->aFile[p->bLog]; - int i; - - if( pDb->bCrashed ) return LSM_IOERR; - - if( pDb->nAutoCrash ){ - pDb->nAutoCrash--; - if( pDb->nAutoCrash==0 ){ - doSystemCrash(pDb); - pDb->bCrashed = 1; - return LSM_IOERR; - } - } - - if( pDb->bPrepareCrash ){ - for(i=0; inSector; i++){ - testFree(pData->aSector[i].aOld); - pData->aSector[i].aOld = 0; - } - } - - if( pDb->xWriteHook ){ - int rc; - int nUs; - struct timeval t1; - struct timeval t2; - - gettimeofday(&t1, 0); - rc = pRealEnv->xSync(p->pReal); - gettimeofday(&t2, 0); - - nUs = (t2.tv_sec - t1.tv_sec) * 1000000 + (t2.tv_usec - t1.tv_usec); - pDb->xWriteHook(pDb->pWriteCtx, p->bLog, 0, 0, nUs); - return rc; - } - - return pRealEnv->xSync(p->pReal); -} - -static int testEnvTruncate(lsm_file *pFile, lsm_i64 iOff){ - lsm_env *pRealEnv = tdb_lsm_env(); - LsmFile *p = (LsmFile *)pFile; - if( p->pDb->bCrashed ) return LSM_IOERR; - return pRealEnv->xTruncate(p->pReal, iOff); -} - -static int testEnvSectorSize(lsm_file *pFile){ - lsm_env *pRealEnv = tdb_lsm_env(); - LsmFile *p = (LsmFile *)pFile; - return pRealEnv->xSectorSize(p->pReal); -} - -static int testEnvRemap( - lsm_file *pFile, - lsm_i64 iMin, - void **ppOut, - lsm_i64 *pnOut -){ - lsm_env *pRealEnv = tdb_lsm_env(); - LsmFile *p = (LsmFile *)pFile; - return pRealEnv->xRemap(p->pReal, iMin, ppOut, pnOut); -} - -static int testEnvFileid( - lsm_file *pFile, - void *ppOut, - int *pnOut -){ - lsm_env *pRealEnv = tdb_lsm_env(); - LsmFile *p = (LsmFile *)pFile; - return pRealEnv->xFileid(p->pReal, ppOut, pnOut); -} - -static int testEnvClose(lsm_file *pFile){ - lsm_env *pRealEnv = tdb_lsm_env(); - LsmFile *p = (LsmFile *)pFile; - - pRealEnv->xClose(p->pReal); - testFree(p); - return LSM_OK; -} - -static int testEnvUnlink(lsm_env *pEnv, const char *zFile){ - lsm_env *pRealEnv = tdb_lsm_env(); - unused_parameter(pEnv); - return pRealEnv->xUnlink(pRealEnv, zFile); -} - -static int testEnvLock(lsm_file *pFile, int iLock, int eType){ - LsmFile *p = (LsmFile *)pFile; - lsm_env *pRealEnv = tdb_lsm_env(); - - if( iLock==2 && eType==LSM_LOCK_EXCL && p->pDb->bNoRecovery ){ - return LSM_BUSY; - } - return pRealEnv->xLock(p->pReal, iLock, eType); -} - -static int testEnvTestLock(lsm_file *pFile, int iLock, int nLock, int eType){ - LsmFile *p = (LsmFile *)pFile; - lsm_env *pRealEnv = tdb_lsm_env(); - - if( iLock==2 && eType==LSM_LOCK_EXCL && p->pDb->bNoRecovery ){ - return LSM_BUSY; - } - return pRealEnv->xTestLock(p->pReal, iLock, nLock, eType); -} - -static int testEnvShmMap(lsm_file *pFile, int iRegion, int sz, void **pp){ - LsmFile *p = (LsmFile *)pFile; - lsm_env *pRealEnv = tdb_lsm_env(); - return pRealEnv->xShmMap(p->pReal, iRegion, sz, pp); -} - -static void testEnvShmBarrier(void){ -} - -static int testEnvShmUnmap(lsm_file *pFile, int bDel){ - LsmFile *p = (LsmFile *)pFile; - lsm_env *pRealEnv = tdb_lsm_env(); - return pRealEnv->xShmUnmap(p->pReal, bDel); -} - -static int testEnvSleep(lsm_env *pEnv, int us){ - lsm_env *pRealEnv = tdb_lsm_env(); - return pRealEnv->xSleep(pRealEnv, us); -} - -static void doSystemCrash(LsmDb *pDb){ - lsm_env *pEnv = tdb_lsm_env(); - int iFile; - int iSeed = pDb->aFile[0].nSector + pDb->aFile[1].nSector; - - char *zFile = pDb->zName; - char *zFree = 0; - - for(iFile=0; iFile<2; iFile++){ - lsm_file *pFile = 0; - int i; - - pEnv->xOpen(pEnv, zFile, 0, &pFile); - for(i=0; iaFile[iFile].nSector; i++){ - u8 *aOld = pDb->aFile[iFile].aSector[i].aOld; - if( aOld ){ - int iOpt = testPrngValue(iSeed++) % 3; - switch( iOpt ){ - case 0: - break; - - case 1: - testPrngArray(iSeed++, (u32 *)aOld, pDb->szSector/4); - /* Fall-through */ - - case 2: - pEnv->xWrite( - pFile, (lsm_i64)i * pDb->szSector, aOld, pDb->szSector - ); - break; - } - testFree(aOld); - pDb->aFile[iFile].aSector[i].aOld = 0; - } - } - pEnv->xClose(pFile); - zFree = zFile = sqlite3_mprintf("%s-log", pDb->zName); - } - - sqlite3_free(zFree); -} -/* -** End test VFS code. -************************************************************************** -*************************************************************************/ - -/************************************************************************* -************************************************************************** -** Begin test compression hooks. -*/ - -#ifdef HAVE_ZLIB -#include - -static int testZipBound(void *pCtx, int nSrc){ - return compressBound(nSrc); -} - -static int testZipCompress( - void *pCtx, /* Context pointer */ - char *aOut, int *pnOut, /* OUT: Buffer containing compressed data */ - const char *aIn, int nIn /* Buffer containing input data */ -){ - uLongf n = *pnOut; /* In/out buffer size for compress() */ - int rc; /* compress() return code */ - - rc = compress((Bytef*)aOut, &n, (Bytef*)aIn, nIn); - *pnOut = n; - return (rc==Z_OK ? 0 : LSM_ERROR); -} - -static int testZipUncompress( - void *pCtx, /* Context pointer */ - char *aOut, int *pnOut, /* OUT: Buffer containing uncompressed data */ - const char *aIn, int nIn /* Buffer containing input data */ -){ - uLongf n = *pnOut; /* In/out buffer size for uncompress() */ - int rc; /* uncompress() return code */ - - rc = uncompress((Bytef*)aOut, &n, (Bytef*)aIn, nIn); - *pnOut = n; - return (rc==Z_OK ? 0 : LSM_ERROR); -} - -static int testConfigureCompression(lsm_db *pDb){ - static lsm_compress zip = { - 0, /* Context pointer (unused) */ - 1, /* Id value */ - testZipBound, /* xBound method */ - testZipCompress, /* xCompress method */ - testZipUncompress /* xUncompress method */ - }; - return lsm_config(pDb, LSM_CONFIG_SET_COMPRESSION, &zip); -} -#endif /* ifdef HAVE_ZLIB */ - -/* -** End test compression hooks. -************************************************************************** -*************************************************************************/ - -static int test_lsm_close(TestDb *pTestDb){ - int i; - int rc = LSM_OK; - LsmDb *pDb = (LsmDb *)pTestDb; - - lsm_csr_close(pDb->pCsr); - lsm_close(pDb->db); - - /* If this is a multi-threaded database, wait on the worker threads. */ - mt_shutdown(pDb); - for(i=0; inWorker && rc==LSM_OK; i++){ - rc = pDb->aWorker[i].worker_rc; - } - - for(i=0; iaFile[0].nSector; i++){ - testFree(pDb->aFile[0].aSector[i].aOld); - } - testFree(pDb->aFile[0].aSector); - for(i=0; iaFile[1].nSector; i++){ - testFree(pDb->aFile[1].aSector[i].aOld); - } - testFree(pDb->aFile[1].aSector); - - memset(pDb, sizeof(LsmDb), 0x11); - testFree((char *)pDb->pBuf); - testFree((char *)pDb); - return rc; -} - -static void mt_signal_worker(LsmDb*, int); - -static int waitOnCheckpointer(LsmDb *pDb, lsm_db *db){ - int nSleep = 0; - int nKB; - int rc; - - do { - nKB = 0; - rc = lsm_info(db, LSM_INFO_CHECKPOINT_SIZE, &nKB); - if( rc!=LSM_OK || nKBnMtMaxCkpt ) break; -#ifdef LSM_MUTEX_PTHREADS - mt_signal_worker(pDb, - (pDb->eMode==LSMTEST_MODE_BACKGROUND_CKPT ? 0 : 1) - ); -#endif - usleep(5000); - nSleep += 5; - }while( 1 ); - -#if 0 - if( nSleep ) printf("# waitOnCheckpointer(): nSleep=%d\n", nSleep); -#endif - - return rc; -} - -static int waitOnWorker(LsmDb *pDb){ - int rc; - int nLimit = -1; - int nSleep = 0; - - rc = lsm_config(pDb->db, LSM_CONFIG_AUTOFLUSH, &nLimit); - do { - int nOld, nNew, rc2; - rc2 = lsm_info(pDb->db, LSM_INFO_TREE_SIZE, &nOld, &nNew); - if( rc2!=LSM_OK ) return rc2; - if( nOld==0 || nNew<(nLimit/2) ) break; -#ifdef LSM_MUTEX_PTHREADS - mt_signal_worker(pDb, 0); -#endif - usleep(5000); - nSleep += 5; - }while( 1 ); - -#if 0 - if( nSleep ) printf("# waitOnWorker(): nSleep=%d\n", nSleep); -#endif - - return rc; -} - -static int test_lsm_write( - TestDb *pTestDb, - void *pKey, - int nKey, - void *pVal, - int nVal -){ - LsmDb *pDb = (LsmDb *)pTestDb; - int rc = LSM_OK; - - if( pDb->eMode==LSMTEST_MODE_BACKGROUND_CKPT ){ - rc = waitOnCheckpointer(pDb, pDb->db); - }else if( - pDb->eMode==LSMTEST_MODE_BACKGROUND_WORK - || pDb->eMode==LSMTEST_MODE_BACKGROUND_BOTH - ){ - rc = waitOnWorker(pDb); - } - - if( rc==LSM_OK ){ - rc = lsm_insert(pDb->db, pKey, nKey, pVal, nVal); - } - return rc; -} - -static int test_lsm_delete(TestDb *pTestDb, void *pKey, int nKey){ - LsmDb *pDb = (LsmDb *)pTestDb; - return lsm_delete(pDb->db, pKey, nKey); -} - -static int test_lsm_delete_range( - TestDb *pTestDb, - void *pKey1, int nKey1, - void *pKey2, int nKey2 -){ - LsmDb *pDb = (LsmDb *)pTestDb; - return lsm_delete_range(pDb->db, pKey1, nKey1, pKey2, nKey2); -} - -static int test_lsm_fetch( - TestDb *pTestDb, - void *pKey, - int nKey, - void **ppVal, - int *pnVal -){ - int rc; - LsmDb *pDb = (LsmDb *)pTestDb; - lsm_cursor *csr; - - if( pKey==0 ) return LSM_OK; - - if( pDb->pCsr==0 ){ - rc = lsm_csr_open(pDb->db, &csr); - if( rc!=LSM_OK ) return rc; - }else{ - csr = pDb->pCsr; - } - - rc = lsm_csr_seek(csr, pKey, nKey, LSM_SEEK_EQ); - if( rc==LSM_OK ){ - if( lsm_csr_valid(csr) ){ - const void *pVal; int nVal; - rc = lsm_csr_value(csr, &pVal, &nVal); - if( nVal>pDb->nBuf ){ - testFree(pDb->pBuf); - pDb->pBuf = testMalloc(nVal*2); - pDb->nBuf = nVal*2; - } - memcpy(pDb->pBuf, pVal, nVal); - *ppVal = pDb->pBuf; - *pnVal = nVal; - }else{ - *ppVal = 0; - *pnVal = -1; - } - } - if( pDb->pCsr==0 ){ - lsm_csr_close(csr); - } - return rc; -} - -static int test_lsm_scan( - TestDb *pTestDb, - void *pCtx, - int bReverse, - void *pFirst, int nFirst, - void *pLast, int nLast, - void (*xCallback)(void *, void *, int , void *, int) -){ - LsmDb *pDb = (LsmDb *)pTestDb; - lsm_cursor *csr; - lsm_cursor *csr2 = 0; - int rc; - - if( pDb->pCsr==0 ){ - rc = lsm_csr_open(pDb->db, &csr); - if( rc!=LSM_OK ) return rc; - }else{ - rc = LSM_OK; - csr = pDb->pCsr; - } - - /* To enhance testing, if both pLast and pFirst are defined, seek the - ** cursor to the "end" boundary here. Then the next block seeks it to - ** the "start" ready for the scan. The point is to test that cursors - ** can be reused. */ - if( pLast && pFirst ){ - if( bReverse ){ - rc = lsm_csr_seek(csr, pFirst, nFirst, LSM_SEEK_LE); - }else{ - rc = lsm_csr_seek(csr, pLast, nLast, LSM_SEEK_GE); - } - } - - if( bReverse ){ - if( pLast ){ - rc = lsm_csr_seek(csr, pLast, nLast, LSM_SEEK_LE); - }else{ - rc = lsm_csr_last(csr); - } - }else{ - if( pFirst ){ - rc = lsm_csr_seek(csr, pFirst, nFirst, LSM_SEEK_GE); - }else{ - rc = lsm_csr_first(csr); - } - } - - while( rc==LSM_OK && lsm_csr_valid(csr) ){ - const void *pKey; int nKey; - const void *pVal; int nVal; - int cmp; - - lsm_csr_key(csr, &pKey, &nKey); - lsm_csr_value(csr, &pVal, &nVal); - - if( bReverse && pFirst ){ - cmp = memcmp(pFirst, pKey, MIN(nKey, nFirst)); - if( cmp>0 || (cmp==0 && nFirst>nKey) ) break; - }else if( bReverse==0 && pLast ){ - cmp = memcmp(pLast, pKey, MIN(nKey, nLast)); - if( cmp<0 || (cmp==0 && nLastpCsr==0 ){ - lsm_csr_close(csr); - } - return rc; -} - -static int test_lsm_begin(TestDb *pTestDb, int iLevel){ - int rc = LSM_OK; - LsmDb *pDb = (LsmDb *)pTestDb; - - /* iLevel==0 is a no-op. */ - if( iLevel==0 ) return 0; - - if( pDb->pCsr==0 ) rc = lsm_csr_open(pDb->db, &pDb->pCsr); - if( rc==LSM_OK && iLevel>1 ){ - rc = lsm_begin(pDb->db, iLevel-1); - } - - return rc; -} -static int test_lsm_commit(TestDb *pTestDb, int iLevel){ - LsmDb *pDb = (LsmDb *)pTestDb; - - /* If iLevel==0, close any open read transaction */ - if( iLevel==0 && pDb->pCsr ){ - lsm_csr_close(pDb->pCsr); - pDb->pCsr = 0; - } - - /* If iLevel==0, close any open read transaction */ - return lsm_commit(pDb->db, MAX(0, iLevel-1)); -} -static int test_lsm_rollback(TestDb *pTestDb, int iLevel){ - LsmDb *pDb = (LsmDb *)pTestDb; - - /* If iLevel==0, close any open read transaction */ - if( iLevel==0 && pDb->pCsr ){ - lsm_csr_close(pDb->pCsr); - pDb->pCsr = 0; - } - - return lsm_rollback(pDb->db, MAX(0, iLevel-1)); -} - -/* -** A log message callback registered with lsm connections. Prints all -** messages to stderr. -*/ -static void xLog(void *pCtx, int rc, const char *z){ - unused_parameter(rc); - /* fprintf(stderr, "lsm: rc=%d \"%s\"\n", rc, z); */ - if( pCtx ) fprintf(stderr, "%s: ", (char *)pCtx); - fprintf(stderr, "%s\n", z); - fflush(stderr); -} - -static void xWorkHook(lsm_db *db, void *pArg){ - LsmDb *p = (LsmDb *)pArg; - if( p->xWork ) p->xWork(db, p->pWorkCtx); -} - -#define TEST_NO_RECOVERY -1 -#define TEST_COMPRESSION -3 - -#define TEST_MT_MODE -2 -#define TEST_MT_MIN_CKPT -4 -#define TEST_MT_MAX_CKPT -5 - - -int test_lsm_config_str( - LsmDb *pLsm, - lsm_db *db, - int bWorker, - const char *zStr, - int *pnThread -){ - struct CfgParam { - const char *zParam; - int bWorker; - int eParam; - } aParam[] = { - { "autoflush", 0, LSM_CONFIG_AUTOFLUSH }, - { "page_size", 0, LSM_CONFIG_PAGE_SIZE }, - { "block_size", 0, LSM_CONFIG_BLOCK_SIZE }, - { "safety", 0, LSM_CONFIG_SAFETY }, - { "autowork", 0, LSM_CONFIG_AUTOWORK }, - { "autocheckpoint", 0, LSM_CONFIG_AUTOCHECKPOINT }, - { "mmap", 0, LSM_CONFIG_MMAP }, - { "use_log", 0, LSM_CONFIG_USE_LOG }, - { "automerge", 0, LSM_CONFIG_AUTOMERGE }, - { "max_freelist", 0, LSM_CONFIG_MAX_FREELIST }, - { "multi_proc", 0, LSM_CONFIG_MULTIPLE_PROCESSES }, - { "worker_automerge", 1, LSM_CONFIG_AUTOMERGE }, - { "test_no_recovery", 0, TEST_NO_RECOVERY }, - { "bg_min_ckpt", 0, TEST_NO_RECOVERY }, - - { "mt_mode", 0, TEST_MT_MODE }, - { "mt_min_ckpt", 0, TEST_MT_MIN_CKPT }, - { "mt_max_ckpt", 0, TEST_MT_MAX_CKPT }, - -#ifdef HAVE_ZLIB - { "compression", 0, TEST_COMPRESSION }, -#endif - { 0, 0 } - }; - const char *z = zStr; - int nThread = 1; - - if( zStr==0 ) return 0; - - assert( db ); - while( z[0] ){ - const char *zStart; - - /* Skip whitespace */ - while( *z==' ' ) z++; - zStart = z; - - while( *z && *z!='=' ) z++; - if( *z ){ - int eParam; - int i; - int iVal; - int iMul = 1; - int rc; - char zParam[32]; - int nParam = z-zStart; - if( nParam==0 || nParam>sizeof(zParam)-1 ) goto syntax_error; - - memcpy(zParam, zStart, nParam); - zParam[nParam] = '\0'; - rc = testArgSelect(aParam, "param", zParam, &i); - if( rc!=0 ) return rc; - eParam = aParam[i].eParam; - - z++; - zStart = z; - while( *z>='0' && *z<='9' ) z++; - if( *z=='k' || *z=='K' ){ - iMul = 1; - z++; - }else if( *z=='M' || *z=='M' ){ - iMul = 1024; - z++; - } - nParam = z-zStart; - if( nParam==0 || nParam>sizeof(zParam)-1 ) goto syntax_error; - memcpy(zParam, zStart, nParam); - zParam[nParam] = '\0'; - iVal = atoi(zParam) * iMul; - - if( eParam>0 ){ - if( bWorker || aParam[i].bWorker==0 ){ - lsm_config(db, eParam, &iVal); - } - }else{ - switch( eParam ){ - case TEST_NO_RECOVERY: - if( pLsm ) pLsm->bNoRecovery = iVal; - break; - case TEST_MT_MODE: - if( pLsm ) nThread = iVal; - break; - case TEST_MT_MIN_CKPT: - if( pLsm && iVal>0 ) pLsm->nMtMinCkpt = iVal*1024; - break; - case TEST_MT_MAX_CKPT: - if( pLsm && iVal>0 ) pLsm->nMtMaxCkpt = iVal*1024; - break; -#ifdef HAVE_ZLIB - case TEST_COMPRESSION: - testConfigureCompression(db); - break; -#endif - } - } - }else if( z!=zStart ){ - goto syntax_error; - } - } - - if( pnThread ) *pnThread = nThread; - if( pLsm && pLsm->nMtMaxCkpt < pLsm->nMtMinCkpt ){ - pLsm->nMtMinCkpt = pLsm->nMtMaxCkpt; - } - - return 0; - syntax_error: - testPrintError("syntax error at: \"%s\"\n", z); - return 1; -} - -int tdb_lsm_config_str(TestDb *pDb, const char *zStr){ - int rc = 0; - if( tdb_lsm(pDb) ){ -#ifdef LSM_MUTEX_PTHREADS - int i; -#endif - LsmDb *pLsm = (LsmDb *)pDb; - - rc = test_lsm_config_str(pLsm, pLsm->db, 0, zStr, 0); -#ifdef LSM_MUTEX_PTHREADS - for(i=0; rc==0 && inWorker; i++){ - rc = test_lsm_config_str(0, pLsm->aWorker[i].pWorker, 1, zStr, 0); - } -#endif - } - return rc; -} - -int tdb_lsm_configure(lsm_db *db, const char *zConfig){ - return test_lsm_config_str(0, db, 0, zConfig, 0); -} - -static int testLsmStartWorkers(LsmDb *, int, const char *, const char *); - -static int testLsmOpen( - const char *zCfg, - const char *zFilename, - int bClear, - TestDb **ppDb -){ - static const DatabaseMethods LsmMethods = { - test_lsm_close, - test_lsm_write, - test_lsm_delete, - test_lsm_delete_range, - test_lsm_fetch, - test_lsm_scan, - test_lsm_begin, - test_lsm_commit, - test_lsm_rollback - }; - - int rc; - int nFilename; - LsmDb *pDb; - - /* If the bClear flag is set, delete any existing database. */ - assert( zFilename); - if( bClear ) testDeleteLsmdb(zFilename); - nFilename = strlen(zFilename); - - pDb = (LsmDb *)testMalloc(sizeof(LsmDb) + nFilename + 1); - memset(pDb, 0, sizeof(LsmDb)); - pDb->base.pMethods = &LsmMethods; - pDb->zName = (char *)&pDb[1]; - memcpy(pDb->zName, zFilename, nFilename + 1); - - /* Default the sector size used for crash simulation to 512 bytes. - ** Todo: There should be an OS method to obtain this value - just as - ** there is in SQLite. For now, LSM assumes that it is smaller than - ** the page size (default 4KB). - */ - pDb->szSector = 256; - - /* Default values for the mt_min_ckpt and mt_max_ckpt parameters. */ - pDb->nMtMinCkpt = LSMTEST_DFLT_MT_MIN_CKPT; - pDb->nMtMaxCkpt = LSMTEST_DFLT_MT_MAX_CKPT; - - memcpy(&pDb->env, tdb_lsm_env(), sizeof(lsm_env)); - pDb->env.pVfsCtx = (void *)pDb; - pDb->env.xFullpath = testEnvFullpath; - pDb->env.xOpen = testEnvOpen; - pDb->env.xRead = testEnvRead; - pDb->env.xWrite = testEnvWrite; - pDb->env.xTruncate = testEnvTruncate; - pDb->env.xSync = testEnvSync; - pDb->env.xSectorSize = testEnvSectorSize; - pDb->env.xRemap = testEnvRemap; - pDb->env.xFileid = testEnvFileid; - pDb->env.xClose = testEnvClose; - pDb->env.xUnlink = testEnvUnlink; - pDb->env.xLock = testEnvLock; - pDb->env.xTestLock = testEnvTestLock; - pDb->env.xShmBarrier = testEnvShmBarrier; - pDb->env.xShmMap = testEnvShmMap; - pDb->env.xShmUnmap = testEnvShmUnmap; - pDb->env.xSleep = testEnvSleep; - - rc = lsm_new(&pDb->env, &pDb->db); - if( rc==LSM_OK ){ - int nThread = 1; - lsm_config_log(pDb->db, xLog, 0); - lsm_config_work_hook(pDb->db, xWorkHook, (void *)pDb); - - rc = test_lsm_config_str(pDb, pDb->db, 0, zCfg, &nThread); - if( rc==LSM_OK ) rc = lsm_open(pDb->db, zFilename); - - pDb->eMode = nThread; -#ifdef LSM_MUTEX_PTHREADS - if( rc==LSM_OK && nThread>1 ){ - testLsmStartWorkers(pDb, nThread, zFilename, zCfg); - } -#endif - - if( rc!=LSM_OK ){ - test_lsm_close((TestDb *)pDb); - pDb = 0; - } - } - - *ppDb = (TestDb *)pDb; - return rc; -} - -int test_lsm_open( - const char *zSpec, - const char *zFilename, - int bClear, - TestDb **ppDb -){ - return testLsmOpen(zSpec, zFilename, bClear, ppDb); -} - -int test_lsm_small_open( - const char *zSpec, - const char *zFile, - int bClear, - TestDb **ppDb -){ - const char *zCfg = "page_size=256 block_size=64 mmap=1024"; - return testLsmOpen(zCfg, zFile, bClear, ppDb); -} - -int test_lsm_lomem_open( - const char *zSpec, - const char *zFilename, - int bClear, - TestDb **ppDb -){ - /* "max_freelist=4 autocheckpoint=32" */ - const char *zCfg = - "page_size=256 block_size=64 autoflush=16 " - "autocheckpoint=32" - "mmap=0 " - ; - return testLsmOpen(zCfg, zFilename, bClear, ppDb); -} - -int test_lsm_lomem2_open( - const char *zSpec, - const char *zFilename, - int bClear, - TestDb **ppDb -){ - /* "max_freelist=4 autocheckpoint=32" */ - const char *zCfg = - "page_size=512 block_size=64 autoflush=0 mmap=0 " - ; - return testLsmOpen(zCfg, zFilename, bClear, ppDb); -} - -int test_lsm_zip_open( - const char *zSpec, - const char *zFilename, - int bClear, - TestDb **ppDb -){ - const char *zCfg = - "page_size=256 block_size=64 autoflush=16 " - "autocheckpoint=32 compression=1 mmap=0 " - ; - return testLsmOpen(zCfg, zFilename, bClear, ppDb); -} - -lsm_db *tdb_lsm(TestDb *pDb){ - if( pDb->pMethods->xClose==test_lsm_close ){ - return ((LsmDb *)pDb)->db; - } - return 0; -} - -int tdb_lsm_multithread(TestDb *pDb){ - int ret = 0; - if( tdb_lsm(pDb) ){ - ret = ((LsmDb*)pDb)->eMode!=LSMTEST_MODE_SINGLETHREAD; - } - return ret; -} - -void tdb_lsm_enable_log(TestDb *pDb, int bEnable){ - lsm_db *db = tdb_lsm(pDb); - if( db ){ - lsm_config_log(db, (bEnable ? xLog : 0), (void *)"client"); - } -} - -void tdb_lsm_application_crash(TestDb *pDb){ - if( tdb_lsm(pDb) ){ - LsmDb *p = (LsmDb *)pDb; - p->bCrashed = 1; - } -} - -void tdb_lsm_prepare_system_crash(TestDb *pDb){ - if( tdb_lsm(pDb) ){ - LsmDb *p = (LsmDb *)pDb; - p->bPrepareCrash = 1; - } -} - -void tdb_lsm_system_crash(TestDb *pDb){ - if( tdb_lsm(pDb) ){ - LsmDb *p = (LsmDb *)pDb; - p->bCrashed = 1; - doSystemCrash(p); - } -} - -void tdb_lsm_safety(TestDb *pDb, int eMode){ - assert( eMode==LSM_SAFETY_OFF - || eMode==LSM_SAFETY_NORMAL - || eMode==LSM_SAFETY_FULL - ); - if( tdb_lsm(pDb) ){ - int iParam = eMode; - LsmDb *p = (LsmDb *)pDb; - lsm_config(p->db, LSM_CONFIG_SAFETY, &iParam); - } -} - -void tdb_lsm_prepare_sync_crash(TestDb *pDb, int iSync){ - assert( iSync>0 ); - if( tdb_lsm(pDb) ){ - LsmDb *p = (LsmDb *)pDb; - p->nAutoCrash = iSync; - p->bPrepareCrash = 1; - } -} - -void tdb_lsm_config_work_hook( - TestDb *pDb, - void (*xWork)(lsm_db *, void *), - void *pWorkCtx -){ - if( tdb_lsm(pDb) ){ - LsmDb *p = (LsmDb *)pDb; - p->xWork = xWork; - p->pWorkCtx = pWorkCtx; - } -} - -void tdb_lsm_write_hook( - TestDb *pDb, - void (*xWrite)(void *, int, lsm_i64, int, int), - void *pWriteCtx -){ - if( tdb_lsm(pDb) ){ - LsmDb *p = (LsmDb *)pDb; - p->xWriteHook = xWrite; - p->pWriteCtx = pWriteCtx; - } -} - -int tdb_lsm_open(const char *zCfg, const char *zDb, int bClear, TestDb **ppDb){ - return testLsmOpen(zCfg, zDb, bClear, ppDb); -} - -#ifdef LSM_MUTEX_PTHREADS - -/* -** Signal worker thread iWorker that there may be work to do. -*/ -static void mt_signal_worker(LsmDb *pDb, int iWorker){ - LsmWorker *p = &pDb->aWorker[iWorker]; - pthread_mutex_lock(&p->worker_mutex); - p->bDoWork = 1; - pthread_cond_signal(&p->worker_cond); - pthread_mutex_unlock(&p->worker_mutex); -} - -/* -** This routine is used as the main() for all worker threads. -*/ -static void *worker_main(void *pArg){ - LsmWorker *p = (LsmWorker *)pArg; - lsm_db *pWorker; /* Connection to access db through */ - - pthread_mutex_lock(&p->worker_mutex); - while( (pWorker = p->pWorker) ){ - int rc = LSM_OK; - - /* Do some work. If an error occurs, exit. */ - - pthread_mutex_unlock(&p->worker_mutex); - if( p->eType==LSMTEST_THREAD_CKPT ){ - int nKB = 0; - rc = lsm_info(pWorker, LSM_INFO_CHECKPOINT_SIZE, &nKB); - if( rc==LSM_OK && nKB>=p->pDb->nMtMinCkpt ){ - rc = lsm_checkpoint(pWorker, 0); - } - }else{ - int nWrite; - do { - - if( p->eType==LSMTEST_THREAD_WORKER ){ - waitOnCheckpointer(p->pDb, pWorker); - } - - nWrite = 0; - rc = lsm_work(pWorker, 0, 256, &nWrite); - - if( p->eType==LSMTEST_THREAD_WORKER && nWrite ){ - mt_signal_worker(p->pDb, 1); - } - }while( nWrite && p->pWorker ); - } - pthread_mutex_lock(&p->worker_mutex); - - if( rc!=LSM_OK && rc!=LSM_BUSY ){ - p->worker_rc = rc; - break; - } - - /* The thread will wake up when it is signaled either because another - ** thread has created some work for this one or because the connection - ** is being closed. */ - if( p->pWorker && p->bDoWork==0 ){ - pthread_cond_wait(&p->worker_cond, &p->worker_mutex); - } - p->bDoWork = 0; - } - pthread_mutex_unlock(&p->worker_mutex); - - return 0; -} - - -static void mt_stop_worker(LsmDb *pDb, int iWorker){ - LsmWorker *p = &pDb->aWorker[iWorker]; - if( p->pWorker ){ - void *pDummy; - lsm_db *pWorker; - - /* Signal the worker to stop */ - pthread_mutex_lock(&p->worker_mutex); - pWorker = p->pWorker; - p->pWorker = 0; - pthread_cond_signal(&p->worker_cond); - pthread_mutex_unlock(&p->worker_mutex); - - /* Join the worker thread. */ - pthread_join(p->worker_thread, &pDummy); - - /* Free resources allocated in mt_start_worker() */ - pthread_cond_destroy(&p->worker_cond); - pthread_mutex_destroy(&p->worker_mutex); - lsm_close(pWorker); - } -} - -static void mt_shutdown(LsmDb *pDb){ - int i; - for(i=0; inWorker; i++){ - mt_stop_worker(pDb, i); - } -} - -/* -** This callback is invoked by LSM when the client database writes to -** the database file (i.e. to flush the contents of the in-memory tree). -** This implies there may be work to do on the database, so signal -** the worker threads. -*/ -static void mt_client_work_hook(lsm_db *db, void *pArg){ - LsmDb *pDb = (LsmDb *)pArg; /* LsmDb database handle */ - - /* Invoke the user level work-hook, if any. */ - if( pDb->xWork ) pDb->xWork(db, pDb->pWorkCtx); - - /* Wake up worker thread 0. */ - mt_signal_worker(pDb, 0); -} - -static void mt_worker_work_hook(lsm_db *db, void *pArg){ - LsmDb *pDb = (LsmDb *)pArg; /* LsmDb database handle */ - - /* Invoke the user level work-hook, if any. */ - if( pDb->xWork ) pDb->xWork(db, pDb->pWorkCtx); -} - -/* -** Launch worker thread iWorker for database connection pDb. -*/ -static int mt_start_worker( - LsmDb *pDb, /* Main database structure */ - int iWorker, /* Worker number to start */ - const char *zFilename, /* File name of database to open */ - const char *zCfg, /* Connection configuration string */ - int eType /* Type of worker thread */ -){ - int rc = 0; /* Return code */ - LsmWorker *p; /* Object to initialize */ - - assert( iWorkernWorker ); - assert( eType==LSMTEST_THREAD_CKPT - || eType==LSMTEST_THREAD_WORKER - || eType==LSMTEST_THREAD_WORKER_AC - ); - - p = &pDb->aWorker[iWorker]; - p->eType = eType; - p->pDb = pDb; - - /* Open the worker connection */ - if( rc==0 ) rc = lsm_new(&pDb->env, &p->pWorker); - if( zCfg ){ - test_lsm_config_str(pDb, p->pWorker, 1, zCfg, 0); - } - if( rc==0 ) rc = lsm_open(p->pWorker, zFilename); - lsm_config_log(p->pWorker, xLog, (void *)"worker"); - - /* Configure the work-hook */ - if( rc==0 ){ - lsm_config_work_hook(p->pWorker, mt_worker_work_hook, (void *)pDb); - } - - if( eType==LSMTEST_THREAD_WORKER ){ - test_lsm_config_str(0, p->pWorker, 1, "autocheckpoint=0", 0); - } - - /* Kick off the worker thread. */ - if( rc==0 ) rc = pthread_cond_init(&p->worker_cond, 0); - if( rc==0 ) rc = pthread_mutex_init(&p->worker_mutex, 0); - if( rc==0 ) rc = pthread_create(&p->worker_thread, 0, worker_main, (void *)p); - - return rc; -} - - -static int testLsmStartWorkers( - LsmDb *pDb, int eModel, const char *zFilename, const char *zCfg -){ - int rc; - - if( eModel<1 || eModel>4 ) return 1; - if( eModel==1 ) return 0; - - /* Configure a work-hook for the client connection. Worker 0 is signalled - ** every time the users connection writes to the database. */ - lsm_config_work_hook(pDb->db, mt_client_work_hook, (void *)pDb); - - /* Allocate space for two worker connections. They may not both be - ** used, but both are allocated. */ - pDb->aWorker = (LsmWorker *)testMalloc(sizeof(LsmWorker) * 2); - memset(pDb->aWorker, 0, sizeof(LsmWorker) * 2); - - switch( eModel ){ - case LSMTEST_MODE_BACKGROUND_CKPT: - pDb->nWorker = 1; - test_lsm_config_str(0, pDb->db, 0, "autocheckpoint=0", 0); - rc = mt_start_worker(pDb, 0, zFilename, zCfg, LSMTEST_THREAD_CKPT); - break; - - case LSMTEST_MODE_BACKGROUND_WORK: - pDb->nWorker = 1; - test_lsm_config_str(0, pDb->db, 0, "autowork=0", 0); - rc = mt_start_worker(pDb, 0, zFilename, zCfg, LSMTEST_THREAD_WORKER_AC); - break; - - case LSMTEST_MODE_BACKGROUND_BOTH: - pDb->nWorker = 2; - test_lsm_config_str(0, pDb->db, 0, "autowork=0", 0); - rc = mt_start_worker(pDb, 0, zFilename, zCfg, LSMTEST_THREAD_WORKER); - if( rc==0 ){ - rc = mt_start_worker(pDb, 1, zFilename, zCfg, LSMTEST_THREAD_CKPT); - } - break; - } - - return rc; -} - - -int test_lsm_mt2( - const char *zSpec, - const char *zFilename, - int bClear, - TestDb **ppDb -){ - const char *zCfg = "mt_mode=2"; - return testLsmOpen(zCfg, zFilename, bClear, ppDb); -} - -int test_lsm_mt3( - const char *zSpec, - const char *zFilename, - int bClear, - TestDb **ppDb -){ - const char *zCfg = "mt_mode=4"; - return testLsmOpen(zCfg, zFilename, bClear, ppDb); -} - -#else -static void mt_shutdown(LsmDb *pDb) { - unused_parameter(pDb); -} -int test_lsm_mt(const char *zFilename, int bClear, TestDb **ppDb){ - unused_parameter(zFilename); - unused_parameter(bClear); - unused_parameter(ppDb); - testPrintError("threads unavailable - recompile with LSM_MUTEX_PTHREADS\n"); - return 1; -} -#endif diff --git a/ext/lsm1/lsm-test/lsmtest_tdb4.c b/ext/lsm1/lsm-test/lsmtest_tdb4.c deleted file mode 100644 index 1f92928522..0000000000 --- a/ext/lsm1/lsm-test/lsmtest_tdb4.c +++ /dev/null @@ -1,980 +0,0 @@ - -/* -** This file contains the TestDb bt wrapper. -*/ - -#include "lsmtest_tdb.h" -#include "lsmtest.h" -#include -#include "bt.h" - -#include - -typedef struct BtDb BtDb; -typedef struct BtFile BtFile; - -/* Background checkpointer interface (see implementations below). */ -typedef struct bt_ckpter bt_ckpter; -static int bgc_attach(BtDb *pDb, const char*); -static int bgc_detach(BtDb *pDb); - -/* -** Each database or log file opened by a database handle is wrapped by -** an object of the following type. -*/ -struct BtFile { - BtDb *pBt; /* Database handle that opened this file */ - bt_env *pVfs; /* Underlying VFS */ - bt_file *pFile; /* File handle belonging to underlying VFS */ - int nSectorSize; /* Size of sectors in bytes */ - int nSector; /* Allocated size of nSector array */ - u8 **apSector; /* Original sector data */ -}; - -/* -** nCrashSync: -** If this value is non-zero, then a "crash-test" is running. If -** nCrashSync==1, then the crash is simulated during the very next -** call to the xSync() VFS method (on either the db or log file). -** If nCrashSync==2, the following call to xSync(), and so on. -** -** bCrash: -** After a crash is simulated, this variable is set. Any subsequent -** attempts to write to a file or modify the file system in any way -** fail once this is set. All the caller can do is close the connection. -** -** bFastInsert: -** If this variable is set to true, then a BT_CONTROL_FAST_INSERT_OP -** control is issued before each callto BtReplace() or BtCsrOpen(). -*/ -struct BtDb { - TestDb base; /* Base class */ - bt_db *pBt; /* bt database handle */ - sqlite4_env *pEnv; /* SQLite environment (for malloc/free) */ - bt_env *pVfs; /* Underlying VFS */ - int bFastInsert; /* True to use fast-insert */ - - /* Space for bt_fetch() results */ - u8 *aBuffer; /* Space to store results */ - int nBuffer; /* Allocated size of aBuffer[] in bytes */ - int nRef; - - /* Background checkpointer used by mt connections */ - bt_ckpter *pCkpter; - - /* Stuff used for crash test simulation */ - BtFile *apFile[2]; /* Database and log files used by pBt */ - bt_env env; /* Private VFS for this object */ - int nCrashSync; /* Number of syncs until crash (see above) */ - int bCrash; /* True once a crash has been simulated */ -}; - -static int btVfsFullpath( - sqlite4_env *pEnv, - bt_env *pVfs, - const char *z, - char **pzOut -){ - BtDb *pBt = (BtDb*)pVfs->pVfsCtx; - if( pBt->bCrash ) return SQLITE4_IOERR; - return pBt->pVfs->xFullpath(pEnv, pBt->pVfs, z, pzOut); -} - -static int btVfsOpen( - sqlite4_env *pEnv, - bt_env *pVfs, - const char *zFile, - int flags, bt_file **ppFile -){ - BtFile *p; - BtDb *pBt = (BtDb*)pVfs->pVfsCtx; - int rc; - - if( pBt->bCrash ) return SQLITE4_IOERR; - - p = (BtFile*)testMalloc(sizeof(BtFile)); - if( !p ) return SQLITE4_NOMEM; - if( flags & BT_OPEN_DATABASE ){ - pBt->apFile[0] = p; - }else if( flags & BT_OPEN_LOG ){ - pBt->apFile[1] = p; - } - if( (flags & BT_OPEN_SHARED)==0 ){ - p->pBt = pBt; - } - p->pVfs = pBt->pVfs; - - rc = pBt->pVfs->xOpen(pEnv, pVfs, zFile, flags, &p->pFile); - if( rc!=SQLITE4_OK ){ - testFree(p); - p = 0; - }else{ - pBt->nRef++; - } - - *ppFile = (bt_file*)p; - return rc; -} - -static int btVfsSize(bt_file *pFile, sqlite4_int64 *piRes){ - BtFile *p = (BtFile*)pFile; - if( p->pBt && p->pBt->bCrash ) return SQLITE4_IOERR; - return p->pVfs->xSize(p->pFile, piRes); -} - -static int btVfsRead(bt_file *pFile, sqlite4_int64 iOff, void *pBuf, int nBuf){ - BtFile *p = (BtFile*)pFile; - if( p->pBt && p->pBt->bCrash ) return SQLITE4_IOERR; - return p->pVfs->xRead(p->pFile, iOff, pBuf, nBuf); -} - -static int btFlushSectors(BtFile *p, int iFile){ - sqlite4_int64 iSz; - int rc; - int i; - u8 *aTmp = 0; - - rc = p->pBt->pVfs->xSize(p->pFile, &iSz); - for(i=0; rc==SQLITE4_OK && inSector; i++){ - if( p->pBt->bCrash && p->apSector[i] ){ - - /* The system is simulating a crash. There are three choices for - ** this sector: - ** - ** 1) Leave it as it is (simulating a successful write), - ** 2) Restore the original data (simulating a lost write), - ** 3) Populate the disk sector with garbage data. - */ - sqlite4_int64 iSOff = p->nSectorSize*i; - int nWrite = MIN(p->nSectorSize, iSz - iSOff); - - if( nWrite ){ - u8 *aWrite = 0; - int iOpt = (testPrngValue(i) % 3) + 1; - if( iOpt==1 ){ - aWrite = p->apSector[i]; - }else if( iOpt==3 ){ - if( aTmp==0 ) aTmp = testMalloc(p->nSectorSize); - aWrite = aTmp; - testPrngArray(i*13, (u32*)aWrite, nWrite/sizeof(u32)); - } - -#if 0 -fprintf(stderr, "handle sector %d of %s with %s\n", i, - iFile==0 ? "db" : "log", - iOpt==1 ? "rollback" : iOpt==2 ? "write" : "omit" -); -fflush(stderr); -#endif - - if( aWrite ){ - rc = p->pBt->pVfs->xWrite(p->pFile, iSOff, aWrite, nWrite); - } - } - } - testFree(p->apSector[i]); - p->apSector[i] = 0; - } - - testFree(aTmp); - return rc; -} - -static int btSaveSectors(BtFile *p, sqlite4_int64 iOff, int nBuf){ - int rc; - sqlite4_int64 iSz; /* Size of file on disk */ - int iFirst; /* First sector affected */ - int iSector; /* Current sector */ - int iLast; /* Last sector affected */ - - if( p->nSectorSize==0 ){ - p->nSectorSize = p->pBt->pVfs->xSectorSize(p->pFile); - if( p->nSectorSize<512 ) p->nSectorSize = 512; - } - iLast = (iOff+nBuf-1) / p->nSectorSize; - iFirst = iOff / p->nSectorSize; - - rc = p->pBt->pVfs->xSize(p->pFile, &iSz); - for(iSector=iFirst; rc==SQLITE4_OK && iSector<=iLast; iSector++){ - int nRead; - sqlite4_int64 iSOff = iSector * p->nSectorSize; - u8 *aBuf = testMalloc(p->nSectorSize); - nRead = MIN(p->nSectorSize, (iSz - iSOff)); - if( nRead>0 ){ - rc = p->pBt->pVfs->xRead(p->pFile, iSOff, aBuf, nRead); - } - - while( rc==SQLITE4_OK && iSector>=p->nSector ){ - int nNew = p->nSector + 32; - u8 **apNew = (u8**)testMalloc(nNew * sizeof(u8*)); - memcpy(apNew, p->apSector, p->nSector*sizeof(u8*)); - testFree(p->apSector); - p->apSector = apNew; - p->nSector = nNew; - } - - p->apSector[iSector] = aBuf; - } - - return rc; -} - -static int btVfsWrite(bt_file *pFile, sqlite4_int64 iOff, void *pBuf, int nBuf){ - BtFile *p = (BtFile*)pFile; - if( p->pBt && p->pBt->bCrash ) return SQLITE4_IOERR; - if( p->pBt && p->pBt->nCrashSync ){ - btSaveSectors(p, iOff, nBuf); - } - return p->pVfs->xWrite(p->pFile, iOff, pBuf, nBuf); -} - -static int btVfsTruncate(bt_file *pFile, sqlite4_int64 iOff){ - BtFile *p = (BtFile*)pFile; - if( p->pBt && p->pBt->bCrash ) return SQLITE4_IOERR; - return p->pVfs->xTruncate(p->pFile, iOff); -} - -static int btVfsSync(bt_file *pFile){ - int rc = SQLITE4_OK; - BtFile *p = (BtFile*)pFile; - BtDb *pBt = p->pBt; - - if( pBt ){ - if( pBt->bCrash ) return SQLITE4_IOERR; - if( pBt->nCrashSync ){ - pBt->nCrashSync--; - pBt->bCrash = (pBt->nCrashSync==0); - if( pBt->bCrash ){ - btFlushSectors(pBt->apFile[0], 0); - btFlushSectors(pBt->apFile[1], 1); - rc = SQLITE4_IOERR; - }else{ - btFlushSectors(p, 0); - } - } - } - - if( rc==SQLITE4_OK ){ - rc = p->pVfs->xSync(p->pFile); - } - return rc; -} - -static int btVfsSectorSize(bt_file *pFile){ - BtFile *p = (BtFile*)pFile; - return p->pVfs->xSectorSize(p->pFile); -} - -static void btDeref(BtDb *p){ - p->nRef--; - assert( p->nRef>=0 ); - if( p->nRef<=0 ) testFree(p); -} - -static int btVfsClose(bt_file *pFile){ - BtFile *p = (BtFile*)pFile; - BtDb *pBt = p->pBt; - int rc; - if( pBt ){ - btFlushSectors(p, 0); - if( p==pBt->apFile[0] ) pBt->apFile[0] = 0; - if( p==pBt->apFile[1] ) pBt->apFile[1] = 0; - } - testFree(p->apSector); - rc = p->pVfs->xClose(p->pFile); -#if 0 - btDeref(p->pBt); -#endif - testFree(p); - return rc; -} - -static int btVfsUnlink(sqlite4_env *pEnv, bt_env *pVfs, const char *zFile){ - BtDb *pBt = (BtDb*)pVfs->pVfsCtx; - if( pBt->bCrash ) return SQLITE4_IOERR; - return pBt->pVfs->xUnlink(pEnv, pBt->pVfs, zFile); -} - -static int btVfsLock(bt_file *pFile, int iLock, int eType){ - BtFile *p = (BtFile*)pFile; - if( p->pBt && p->pBt->bCrash ) return SQLITE4_IOERR; - return p->pVfs->xLock(p->pFile, iLock, eType); -} - -static int btVfsTestLock(bt_file *pFile, int iLock, int nLock, int eType){ - BtFile *p = (BtFile*)pFile; - if( p->pBt && p->pBt->bCrash ) return SQLITE4_IOERR; - return p->pVfs->xTestLock(p->pFile, iLock, nLock, eType); -} - -static int btVfsShmMap(bt_file *pFile, int iChunk, int sz, void **ppOut){ - BtFile *p = (BtFile*)pFile; - if( p->pBt && p->pBt->bCrash ) return SQLITE4_IOERR; - return p->pVfs->xShmMap(p->pFile, iChunk, sz, ppOut); -} - -static void btVfsShmBarrier(bt_file *pFile){ - BtFile *p = (BtFile*)pFile; - return p->pVfs->xShmBarrier(p->pFile); -} - -static int btVfsShmUnmap(bt_file *pFile, int bDelete){ - BtFile *p = (BtFile*)pFile; - if( p->pBt && p->pBt->bCrash ) return SQLITE4_IOERR; - return p->pVfs->xShmUnmap(p->pFile, bDelete); -} - -static int bt_close(TestDb *pTestDb){ - BtDb *p = (BtDb*)pTestDb; - int rc = sqlite4BtClose(p->pBt); - free(p->aBuffer); - if( p->apFile[0] ) p->apFile[0]->pBt = 0; - if( p->apFile[1] ) p->apFile[1]->pBt = 0; - bgc_detach(p); - testFree(p); - return rc; -} - -static int btMinTransaction(BtDb *p, int iMin, int *piLevel){ - int iLevel; - int rc = SQLITE4_OK; - - iLevel = sqlite4BtTransactionLevel(p->pBt); - if( iLevelpBt, iMin); - *piLevel = iLevel; - }else{ - *piLevel = -1; - } - - return rc; -} -static int btRestoreTransaction(BtDb *p, int iLevel, int rcin){ - int rc = rcin; - if( iLevel>=0 ){ - if( rc==SQLITE4_OK ){ - rc = sqlite4BtCommit(p->pBt, iLevel); - }else{ - sqlite4BtRollback(p->pBt, iLevel); - } - assert( iLevel==sqlite4BtTransactionLevel(p->pBt) ); - } - return rc; -} - -static int bt_write(TestDb *pTestDb, void *pK, int nK, void *pV, int nV){ - BtDb *p = (BtDb*)pTestDb; - int iLevel; - int rc; - - rc = btMinTransaction(p, 2, &iLevel); - if( rc==SQLITE4_OK ){ - if( p->bFastInsert ) sqlite4BtControl(p->pBt, BT_CONTROL_FAST_INSERT_OP, 0); - rc = sqlite4BtReplace(p->pBt, pK, nK, pV, nV); - rc = btRestoreTransaction(p, iLevel, rc); - } - return rc; -} - -static int bt_delete(TestDb *pTestDb, void *pK, int nK){ - return bt_write(pTestDb, pK, nK, 0, -1); -} - -static int bt_delete_range( - TestDb *pTestDb, - void *pKey1, int nKey1, - void *pKey2, int nKey2 -){ - BtDb *p = (BtDb*)pTestDb; - bt_cursor *pCsr = 0; - int rc = SQLITE4_OK; - int iLevel; - - rc = btMinTransaction(p, 2, &iLevel); - if( rc==SQLITE4_OK ){ - if( p->bFastInsert ) sqlite4BtControl(p->pBt, BT_CONTROL_FAST_INSERT_OP, 0); - rc = sqlite4BtCsrOpen(p->pBt, 0, &pCsr); - } - while( rc==SQLITE4_OK ){ - const void *pK; - int n; - int nCmp; - int res; - - rc = sqlite4BtCsrSeek(pCsr, pKey1, nKey1, BT_SEEK_GE); - if( rc==SQLITE4_INEXACT ) rc = SQLITE4_OK; - if( rc!=SQLITE4_OK ) break; - - rc = sqlite4BtCsrKey(pCsr, &pK, &n); - if( rc!=SQLITE4_OK ) break; - - nCmp = MIN(n, nKey1); - res = memcmp(pKey1, pK, nCmp); - assert( res<0 || (res==0 && nKey1<=n) ); - if( res==0 && nKey1==n ){ - rc = sqlite4BtCsrNext(pCsr); - if( rc!=SQLITE4_OK ) break; - rc = sqlite4BtCsrKey(pCsr, &pK, &n); - if( rc!=SQLITE4_OK ) break; - } - - nCmp = MIN(n, nKey2); - res = memcmp(pKey2, pK, nCmp); - if( res<0 || (res==0 && nKey2<=n) ) break; - - rc = sqlite4BtDelete(pCsr); - } - if( rc==SQLITE4_NOTFOUND ) rc = SQLITE4_OK; - - sqlite4BtCsrClose(pCsr); - - rc = btRestoreTransaction(p, iLevel, rc); - return rc; -} - -static int bt_fetch( - TestDb *pTestDb, - void *pK, int nK, - void **ppVal, int *pnVal -){ - BtDb *p = (BtDb*)pTestDb; - bt_cursor *pCsr = 0; - int iLevel; - int rc = SQLITE4_OK; - - iLevel = sqlite4BtTransactionLevel(p->pBt); - if( iLevel==0 ){ - rc = sqlite4BtBegin(p->pBt, 1); - if( rc!=SQLITE4_OK ) return rc; - } - - if( p->bFastInsert ) sqlite4BtControl(p->pBt, BT_CONTROL_FAST_INSERT_OP, 0); - rc = sqlite4BtCsrOpen(p->pBt, 0, &pCsr); - if( rc==SQLITE4_OK ){ - rc = sqlite4BtCsrSeek(pCsr, pK, nK, BT_SEEK_EQ); - if( rc==SQLITE4_OK ){ - const void *pV = 0; - int nV = 0; - rc = sqlite4BtCsrData(pCsr, 0, -1, &pV, &nV); - if( rc==SQLITE4_OK ){ - if( nV>p->nBuffer ){ - free(p->aBuffer); - p->aBuffer = (u8*)malloc(nV*2); - p->nBuffer = nV*2; - } - memcpy(p->aBuffer, pV, nV); - *pnVal = nV; - *ppVal = (void*)(p->aBuffer); - } - - }else if( rc==SQLITE4_INEXACT || rc==SQLITE4_NOTFOUND ){ - *ppVal = 0; - *pnVal = -1; - rc = SQLITE4_OK; - } - sqlite4BtCsrClose(pCsr); - } - - if( iLevel==0 ) sqlite4BtCommit(p->pBt, 0); - return rc; -} - -static int bt_scan( - TestDb *pTestDb, - void *pCtx, - int bReverse, - void *pFirst, int nFirst, - void *pLast, int nLast, - void (*xCallback)(void *, void *, int , void *, int) -){ - BtDb *p = (BtDb*)pTestDb; - bt_cursor *pCsr = 0; - int rc; - int iLevel; - - rc = btMinTransaction(p, 1, &iLevel); - - if( rc==SQLITE4_OK ){ - if( p->bFastInsert ) sqlite4BtControl(p->pBt, BT_CONTROL_FAST_INSERT_OP, 0); - rc = sqlite4BtCsrOpen(p->pBt, 0, &pCsr); - } - if( rc==SQLITE4_OK ){ - if( bReverse ){ - if( pLast ){ - rc = sqlite4BtCsrSeek(pCsr, pLast, nLast, BT_SEEK_LE); - }else{ - rc = sqlite4BtCsrLast(pCsr); - } - }else{ - rc = sqlite4BtCsrSeek(pCsr, pFirst, nFirst, BT_SEEK_GE); - } - if( rc==SQLITE4_INEXACT ) rc = SQLITE4_OK; - - while( rc==SQLITE4_OK ){ - const void *pK = 0; int nK = 0; - const void *pV = 0; int nV = 0; - - rc = sqlite4BtCsrKey(pCsr, &pK, &nK); - if( rc==SQLITE4_OK ){ - rc = sqlite4BtCsrData(pCsr, 0, -1, &pV, &nV); - } - - if( rc!=SQLITE4_OK ) break; - if( bReverse ){ - if( pFirst ){ - int res; - int nCmp = MIN(nK, nFirst); - res = memcmp(pFirst, pK, nCmp); - if( res>0 || (res==0 && nKnLast) ) break; - } - } - - xCallback(pCtx, (void*)pK, nK, (void*)pV, nV); - if( bReverse ){ - rc = sqlite4BtCsrPrev(pCsr); - }else{ - rc = sqlite4BtCsrNext(pCsr); - } - } - if( rc==SQLITE4_NOTFOUND ) rc = SQLITE4_OK; - - sqlite4BtCsrClose(pCsr); - } - - rc = btRestoreTransaction(p, iLevel, rc); - return rc; -} - -static int bt_begin(TestDb *pTestDb, int iLvl){ - BtDb *p = (BtDb*)pTestDb; - int rc = sqlite4BtBegin(p->pBt, iLvl); - return rc; -} - -static int bt_commit(TestDb *pTestDb, int iLvl){ - BtDb *p = (BtDb*)pTestDb; - int rc = sqlite4BtCommit(p->pBt, iLvl); - return rc; -} - -static int bt_rollback(TestDb *pTestDb, int iLvl){ - BtDb *p = (BtDb*)pTestDb; - int rc = sqlite4BtRollback(p->pBt, iLvl); - return rc; -} - -static int testParseOption( - const char **pzIn, /* IN/OUT: pointer to next option */ - const char **pzOpt, /* OUT: nul-terminated option name */ - const char **pzArg, /* OUT: nul-terminated option argument */ - char *pSpace /* Temporary space for output params */ -){ - const char *p = *pzIn; - const char *pStart; - int n; - - char *pOut = pSpace; - - while( *p==' ' ) p++; - pStart = p; - while( *p && *p!='=' ) p++; - if( *p==0 ) return 1; - - n = (p - pStart); - memcpy(pOut, pStart, n); - *pzOpt = pOut; - pOut += n; - *pOut++ = '\0'; - - p++; - pStart = p; - while( *p && *p!=' ' ) p++; - n = (p - pStart); - - memcpy(pOut, pStart, n); - *pzArg = pOut; - pOut += n; - *pOut++ = '\0'; - - *pzIn = p; - return 0; -} - -static int testParseInt(const char *z, int *piVal){ - int i = 0; - const char *p = z; - - while( *p>='0' && *p<='9' ){ - i = i*10 + (*p - '0'); - p++; - } - if( *p=='K' || *p=='k' ){ - i = i * 1024; - p++; - }else if( *p=='M' || *p=='m' ){ - i = i * 1024 * 1024; - p++; - } - - if( *p ) return SQLITE4_ERROR; - *piVal = i; - return SQLITE4_OK; -} - -static int testBtConfigure(BtDb *pDb, const char *zCfg, int *pbMt){ - int rc = SQLITE4_OK; - - if( zCfg ){ - struct CfgParam { - const char *zParam; - int eParam; - } aParam[] = { - { "safety", BT_CONTROL_SAFETY }, - { "autockpt", BT_CONTROL_AUTOCKPT }, - { "multiproc", BT_CONTROL_MULTIPROC }, - { "blksz", BT_CONTROL_BLKSZ }, - { "pagesz", BT_CONTROL_PAGESZ }, - { "mt", -1 }, - { "fastinsert", -2 }, - { 0, 0 } - }; - const char *z = zCfg; - int n = strlen(z); - char *aSpace; - const char *zOpt; - const char *zArg; - - aSpace = (char*)testMalloc(n+2); - while( rc==SQLITE4_OK && 0==testParseOption(&z, &zOpt, &zArg, aSpace) ){ - int i; - int iVal; - rc = testArgSelect(aParam, "param", zOpt, &i); - if( rc!=SQLITE4_OK ) break; - - rc = testParseInt(zArg, &iVal); - if( rc!=SQLITE4_OK ) break; - - switch( aParam[i].eParam ){ - case -1: - *pbMt = iVal; - break; - case -2: - pDb->bFastInsert = 1; - break; - default: - rc = sqlite4BtControl(pDb->pBt, aParam[i].eParam, (void*)&iVal); - break; - } - } - testFree(aSpace); - } - - return rc; -} - - -int test_bt_open( - const char *zSpec, - const char *zFilename, - int bClear, - TestDb **ppDb -){ - - static const DatabaseMethods SqlMethods = { - bt_close, - bt_write, - bt_delete, - bt_delete_range, - bt_fetch, - bt_scan, - bt_begin, - bt_commit, - bt_rollback - }; - BtDb *p = 0; - bt_db *pBt = 0; - int rc; - sqlite4_env *pEnv = sqlite4_env_default(); - - if( bClear && zFilename && zFilename[0] ){ - char *zLog = sqlite3_mprintf("%s-wal", zFilename); - unlink(zFilename); - unlink(zLog); - sqlite3_free(zLog); - } - - rc = sqlite4BtNew(pEnv, 0, &pBt); - if( rc==SQLITE4_OK ){ - int mt = 0; /* True for multi-threaded connection */ - - p = (BtDb*)testMalloc(sizeof(BtDb)); - p->base.pMethods = &SqlMethods; - p->pBt = pBt; - p->pEnv = pEnv; - p->nRef = 1; - - p->env.pVfsCtx = (void*)p; - p->env.xFullpath = btVfsFullpath; - p->env.xOpen = btVfsOpen; - p->env.xSize = btVfsSize; - p->env.xRead = btVfsRead; - p->env.xWrite = btVfsWrite; - p->env.xTruncate = btVfsTruncate; - p->env.xSync = btVfsSync; - p->env.xSectorSize = btVfsSectorSize; - p->env.xClose = btVfsClose; - p->env.xUnlink = btVfsUnlink; - p->env.xLock = btVfsLock; - p->env.xTestLock = btVfsTestLock; - p->env.xShmMap = btVfsShmMap; - p->env.xShmBarrier = btVfsShmBarrier; - p->env.xShmUnmap = btVfsShmUnmap; - - sqlite4BtControl(pBt, BT_CONTROL_GETVFS, (void*)&p->pVfs); - sqlite4BtControl(pBt, BT_CONTROL_SETVFS, (void*)&p->env); - - rc = testBtConfigure(p, zSpec, &mt); - if( rc==SQLITE4_OK ){ - rc = sqlite4BtOpen(pBt, zFilename); - } - - if( rc==SQLITE4_OK && mt ){ - int nAuto = 0; - rc = bgc_attach(p, zSpec); - sqlite4BtControl(pBt, BT_CONTROL_AUTOCKPT, (void*)&nAuto); - } - } - - if( rc!=SQLITE4_OK && p ){ - bt_close(&p->base); - } - - *ppDb = &p->base; - return rc; -} - -int test_fbt_open( - const char *zSpec, - const char *zFilename, - int bClear, - TestDb **ppDb -){ - return test_bt_open("fast=1", zFilename, bClear, ppDb); -} - -int test_fbts_open( - const char *zSpec, - const char *zFilename, - int bClear, - TestDb **ppDb -){ - return test_bt_open("fast=1 blksz=32K pagesz=512", zFilename, bClear, ppDb); -} - - -void tdb_bt_prepare_sync_crash(TestDb *pTestDb, int iSync){ - BtDb *p = (BtDb*)pTestDb; - assert( pTestDb->pMethods->xClose==bt_close ); - assert( p->bCrash==0 ); - p->nCrashSync = iSync; -} - -bt_db *tdb_bt(TestDb *pDb){ - if( pDb->pMethods->xClose==bt_close ){ - return ((BtDb *)pDb)->pBt; - } - return 0; -} - -/************************************************************************* -** Beginning of code for background checkpointer. -*/ - -struct bt_ckpter { - sqlite4_buffer file; /* File name */ - sqlite4_buffer spec; /* Options */ - int nLogsize; /* Minimum log size to checkpoint */ - int nRef; /* Number of clients */ - - int bDoWork; /* Set by client threads */ - pthread_t ckpter_thread; /* Checkpointer thread */ - pthread_cond_t ckpter_cond; /* Condition var the ckpter waits on */ - pthread_mutex_t ckpter_mutex; /* Mutex used with ckpter_cond */ - - bt_ckpter *pNext; /* Next object in list at gBgc.pCkpter */ -}; - -static struct GlobalBackgroundCheckpointer { - bt_ckpter *pCkpter; /* Linked list of checkpointers */ -} gBgc; - -static void *bgc_main(void *pArg){ - BtDb *pDb = 0; - int rc; - int mt; - bt_ckpter *pCkpter = (bt_ckpter*)pArg; - - rc = test_bt_open("", (char*)pCkpter->file.p, 0, (TestDb**)&pDb); - assert( rc==SQLITE4_OK ); - rc = testBtConfigure(pDb, (char*)pCkpter->spec.p, &mt); - - while( pCkpter->nRef>0 ){ - bt_db *db = pDb->pBt; - int nLog = 0; - - sqlite4BtBegin(db, 1); - sqlite4BtCommit(db, 0); - sqlite4BtControl(db, BT_CONTROL_LOGSIZE, (void*)&nLog); - - if( nLog>=pCkpter->nLogsize ){ - int rc; - bt_checkpoint ckpt; - memset(&ckpt, 0, sizeof(bt_checkpoint)); - ckpt.nFrameBuffer = nLog/2; - rc = sqlite4BtControl(db, BT_CONTROL_CHECKPOINT, (void*)&ckpt); - assert( rc==SQLITE4_OK ); - sqlite4BtControl(db, BT_CONTROL_LOGSIZE, (void*)&nLog); - } - - /* The thread will wake up when it is signaled either because another - ** thread has created some work for this one or because the connection - ** is being closed. */ - pthread_mutex_lock(&pCkpter->ckpter_mutex); - if( pCkpter->bDoWork==0 ){ - pthread_cond_wait(&pCkpter->ckpter_cond, &pCkpter->ckpter_mutex); - } - pCkpter->bDoWork = 0; - pthread_mutex_unlock(&pCkpter->ckpter_mutex); - } - - if( pDb ) bt_close((TestDb*)pDb); - return 0; -} - -static void bgc_logsize_cb(void *pCtx, int nLogsize){ - bt_ckpter *p = (bt_ckpter*)pCtx; - if( nLogsize>=p->nLogsize ){ - pthread_mutex_lock(&p->ckpter_mutex); - p->bDoWork = 1; - pthread_cond_signal(&p->ckpter_cond); - pthread_mutex_unlock(&p->ckpter_mutex); - } -} - -static int bgc_attach(BtDb *pDb, const char *zSpec){ - int rc; - int n; - bt_info info; - bt_ckpter *pCkpter; - - /* Figure out the full path to the database opened by handle pDb. */ - info.eType = BT_INFO_FILENAME; - info.pgno = 0; - sqlite4_buffer_init(&info.output, 0); - rc = sqlite4BtControl(pDb->pBt, BT_CONTROL_INFO, (void*)&info); - if( rc!=SQLITE4_OK ) return rc; - - sqlite4_mutex_enter(sqlite4_mutex_alloc(pDb->pEnv, SQLITE4_MUTEX_STATIC_KV)); - - /* Search for an existing bt_ckpter object. */ - n = info.output.n; - for(pCkpter=gBgc.pCkpter; pCkpter; pCkpter=pCkpter->pNext){ - if( n==pCkpter->file.n && 0==memcmp(info.output.p, pCkpter->file.p, n) ){ - break; - } - } - - /* Failed to find a suitable checkpointer. Create a new one. */ - if( pCkpter==0 ){ - bt_logsizecb cb; - - pCkpter = testMalloc(sizeof(bt_ckpter)); - memcpy(&pCkpter->file, &info.output, sizeof(sqlite4_buffer)); - info.output.p = 0; - pCkpter->pNext = gBgc.pCkpter; - pCkpter->nLogsize = 1000; - gBgc.pCkpter = pCkpter; - pCkpter->nRef = 1; - - sqlite4_buffer_init(&pCkpter->spec, 0); - rc = sqlite4_buffer_set(&pCkpter->spec, zSpec, strlen(zSpec)+1); - assert( rc==SQLITE4_OK ); - - /* Kick off the checkpointer thread. */ - if( rc==0 ) rc = pthread_cond_init(&pCkpter->ckpter_cond, 0); - if( rc==0 ) rc = pthread_mutex_init(&pCkpter->ckpter_mutex, 0); - if( rc==0 ){ - rc = pthread_create(&pCkpter->ckpter_thread, 0, bgc_main, (void*)pCkpter); - } - assert( rc==0 ); /* todo: Fix this */ - - /* Set up the logsize callback for the client thread */ - cb.pCtx = (void*)pCkpter; - cb.xLogsize = bgc_logsize_cb; - sqlite4BtControl(pDb->pBt, BT_CONTROL_LOGSIZECB, (void*)&cb); - }else{ - pCkpter->nRef++; - } - - /* Assuming a checkpointer was encountered or effected, attach the - ** connection to it. */ - if( pCkpter ){ - pDb->pCkpter = pCkpter; - } - - sqlite4_mutex_leave(sqlite4_mutex_alloc(pDb->pEnv, SQLITE4_MUTEX_STATIC_KV)); - sqlite4_buffer_clear(&info.output); - return rc; -} - -static int bgc_detach(BtDb *pDb){ - int rc = SQLITE4_OK; - bt_ckpter *pCkpter = pDb->pCkpter; - if( pCkpter ){ - int bShutdown = 0; /* True if this is the last reference */ - - sqlite4_mutex_enter(sqlite4_mutex_alloc(pDb->pEnv,SQLITE4_MUTEX_STATIC_KV)); - pCkpter->nRef--; - if( pCkpter->nRef==0 ){ - bt_ckpter **pp; - - *pp = pCkpter->pNext; - for(pp=&gBgc.pCkpter; *pp!=pCkpter; pp=&((*pp)->pNext)); - bShutdown = 1; - } - sqlite4_mutex_leave(sqlite4_mutex_alloc(pDb->pEnv,SQLITE4_MUTEX_STATIC_KV)); - - if( bShutdown ){ - void *pDummy; - - /* Signal the checkpointer thread. */ - pthread_mutex_lock(&pCkpter->ckpter_mutex); - pCkpter->bDoWork = 1; - pthread_cond_signal(&pCkpter->ckpter_cond); - pthread_mutex_unlock(&pCkpter->ckpter_mutex); - - /* Join the checkpointer thread. */ - pthread_join(pCkpter->ckpter_thread, &pDummy); - pthread_cond_destroy(&pCkpter->ckpter_cond); - pthread_mutex_destroy(&pCkpter->ckpter_mutex); - - sqlite4_buffer_clear(&pCkpter->file); - sqlite4_buffer_clear(&pCkpter->spec); - testFree(pCkpter); - } - - pDb->pCkpter = 0; - } - return rc; -} - -/* -** End of background checkpointer. -*************************************************************************/ diff --git a/ext/lsm1/lsm-test/lsmtest_util.c b/ext/lsm1/lsm-test/lsmtest_util.c deleted file mode 100644 index adab8a53e8..0000000000 --- a/ext/lsm1/lsm-test/lsmtest_util.c +++ /dev/null @@ -1,223 +0,0 @@ - -#include "lsmtest.h" -#include -#include -#include -#ifndef _WIN32 -# include -#endif - -/* -** Global variables used within this module. -*/ -static struct TestutilGlobal { - char **argv; - int argc; -} g = {0, 0}; - -static struct TestutilRnd { - unsigned int aRand1[2048]; /* Bits 0..10 */ - unsigned int aRand2[2048]; /* Bits 11..21 */ - unsigned int aRand3[1024]; /* Bits 22..31 */ -} r; - -/************************************************************************* -** The following block is a copy of the implementation of SQLite function -** sqlite3_randomness. This version has two important differences: -** -** 1. It always uses the same seed. So the sequence of random data output -** is the same for every run of the program. -** -** 2. It is not threadsafe. -*/ -static struct sqlite3PrngType { - unsigned char i, j; /* State variables */ - unsigned char s[256]; /* State variables */ -} sqlite3Prng = { - 0xAF, 0x28, - { - 0x71, 0xF5, 0xB4, 0x6E, 0x80, 0xAB, 0x1D, 0xB8, - 0xFB, 0xB7, 0x49, 0xBF, 0xFF, 0x72, 0x2D, 0x14, - 0x79, 0x09, 0xE3, 0x78, 0x76, 0xB0, 0x2C, 0x0A, - 0x8E, 0x23, 0xEE, 0xDF, 0xE0, 0x9A, 0x2F, 0x67, - 0xE1, 0xBE, 0x0E, 0xA7, 0x08, 0x97, 0xEB, 0x77, - 0x78, 0xBA, 0x9D, 0xCA, 0x49, 0x4C, 0x60, 0x9A, - 0xF6, 0xBD, 0xDA, 0x7F, 0xBC, 0x48, 0x58, 0x52, - 0xE5, 0xCD, 0x83, 0x72, 0x23, 0x52, 0xFF, 0x6D, - 0xEF, 0x0F, 0x82, 0x29, 0xA0, 0x83, 0x3F, 0x7D, - 0xA4, 0x88, 0x31, 0xE7, 0x88, 0x92, 0x3B, 0x9B, - 0x3B, 0x2C, 0xC2, 0x4C, 0x71, 0xA2, 0xB0, 0xEA, - 0x36, 0xD0, 0x00, 0xF1, 0xD3, 0x39, 0x17, 0x5D, - 0x2A, 0x7A, 0xE4, 0xAD, 0xE1, 0x64, 0xCE, 0x0F, - 0x9C, 0xD9, 0xF5, 0xED, 0xB0, 0x22, 0x5E, 0x62, - 0x97, 0x02, 0xA3, 0x8C, 0x67, 0x80, 0xFC, 0x88, - 0x14, 0x0B, 0x15, 0x10, 0x0F, 0xC7, 0x40, 0xD4, - 0xF1, 0xF9, 0x0E, 0x1A, 0xCE, 0xB9, 0x1E, 0xA1, - 0x72, 0x8E, 0xD7, 0x78, 0x39, 0xCD, 0xF4, 0x5D, - 0x2A, 0x59, 0x26, 0x34, 0xF2, 0x73, 0x0B, 0xA0, - 0x02, 0x51, 0x2C, 0x03, 0xA3, 0xA7, 0x43, 0x13, - 0xE8, 0x98, 0x2B, 0xD2, 0x53, 0xF8, 0xEE, 0x91, - 0x7D, 0xE7, 0xE3, 0xDA, 0xD5, 0xBB, 0xC0, 0x92, - 0x9D, 0x98, 0x01, 0x2C, 0xF9, 0xB9, 0xA0, 0xEB, - 0xCF, 0x32, 0xFA, 0x01, 0x49, 0xA5, 0x1D, 0x9A, - 0x76, 0x86, 0x3F, 0x40, 0xD4, 0x89, 0x8F, 0x9C, - 0xE2, 0xE3, 0x11, 0x31, 0x37, 0xB2, 0x49, 0x28, - 0x35, 0xC0, 0x99, 0xB6, 0xD0, 0xBC, 0x66, 0x35, - 0xF7, 0x83, 0x5B, 0xD7, 0x37, 0x1A, 0x2B, 0x18, - 0xA6, 0xFF, 0x8D, 0x7C, 0x81, 0xA8, 0xFC, 0x9E, - 0xC4, 0xEC, 0x80, 0xD0, 0x98, 0xA7, 0x76, 0xCC, - 0x9C, 0x2F, 0x7B, 0xFF, 0x8E, 0x0E, 0xBB, 0x90, - 0xAE, 0x13, 0x06, 0xF5, 0x1C, 0x4E, 0x52, 0xF7 - } -}; - -/* Generate and return single random byte */ -static unsigned char randomByte(void){ - unsigned char t; - sqlite3Prng.i++; - t = sqlite3Prng.s[sqlite3Prng.i]; - sqlite3Prng.j += t; - sqlite3Prng.s[sqlite3Prng.i] = sqlite3Prng.s[sqlite3Prng.j]; - sqlite3Prng.s[sqlite3Prng.j] = t; - t += sqlite3Prng.s[sqlite3Prng.i]; - return sqlite3Prng.s[t]; -} - -/* -** Return N random bytes. -*/ -static void randomBlob(int nBuf, unsigned char *zBuf){ - int i; - for(i=0; i>11) & 0x000007FF] ^ - r.aRand3[(iVal>>22) & 0x000003FF] - ; -} - -void testPrngArray(unsigned int iVal, unsigned int *aOut, int nOut){ - int i; - for(i=0; izName; - pEntry=(struct Entry *)&((unsigned char *)pEntry)[sz] - ){ - if( zPrev ){ testPrintError("%s, ", zPrev); } - zPrev = pEntry->zName; - } - testPrintError("or %s\n", zPrev); -} - -int testArgSelectX( - void *aData, - const char *zType, - int sz, - const char *zArg, - int *piOut -){ - struct Entry { const char *zName; }; - struct Entry *pEntry; - int nArg = strlen(zArg); - - int i = 0; - int iOut = -1; - int nOut = 0; - - for(pEntry=(struct Entry *)aData; - pEntry->zName; - pEntry=(struct Entry *)&((unsigned char *)pEntry)[sz] - ){ - int nName = strlen(pEntry->zName); - if( nArg<=nName && memcmp(pEntry->zName, zArg, nArg)==0 ){ - iOut = i; - if( nName==nArg ){ - nOut = 1; - break; - } - nOut++; - } - i++; - } - - if( nOut!=1 ){ - argError(aData, zType, sz, zArg); - }else{ - *piOut = iOut; - } - return (nOut!=1); -} - -struct timeval zero_time; - -void testTimeInit(void){ - gettimeofday(&zero_time, 0); -} - -int testTimeGet(void){ - struct timeval now; - gettimeofday(&now, 0); - return - (((int)now.tv_sec - (int)zero_time.tv_sec)*1000) + - (((int)now.tv_usec - (int)zero_time.tv_usec)/1000); -} diff --git a/ext/lsm1/lsm-test/lsmtest_win32.c b/ext/lsm1/lsm-test/lsmtest_win32.c deleted file mode 100644 index 9472723368..0000000000 --- a/ext/lsm1/lsm-test/lsmtest_win32.c +++ /dev/null @@ -1,30 +0,0 @@ - -#include "lsmtest.h" - -#ifdef _WIN32 - -#define TICKS_PER_SECOND (10000000) -#define TICKS_PER_MICROSECOND (10) -#define TICKS_UNIX_EPOCH (116444736000000000LL) - -int win32GetTimeOfDay( - struct timeval *tp, - void *tzp -){ - FILETIME fileTime; - ULONGLONG ticks; - ULONGLONG unixTicks; - - unused_parameter(tzp); - memset(&fileTime, 0, sizeof(FILETIME)); - GetSystemTimeAsFileTime(&fileTime); - ticks = (ULONGLONG)fileTime.dwHighDateTime << 32; - ticks |= (ULONGLONG)fileTime.dwLowDateTime; - unixTicks = ticks - TICKS_UNIX_EPOCH; - tp->tv_sec = (long)(unixTicks / TICKS_PER_SECOND); - unixTicks -= ((ULONGLONG)tp->tv_sec * TICKS_PER_SECOND); - tp->tv_usec = (long)(unixTicks / TICKS_PER_MICROSECOND); - - return 0; -} -#endif diff --git a/ext/lsm1/lsm.h b/ext/lsm1/lsm.h deleted file mode 100644 index 48701c4c5e..0000000000 --- a/ext/lsm1/lsm.h +++ /dev/null @@ -1,684 +0,0 @@ -/* -** 2011-08-10 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** -** This file defines the LSM API. -*/ -#ifndef _LSM_H -#define _LSM_H -#include -#ifdef __cplusplus -extern "C" { -#endif - -/* -** Opaque handle types. -*/ -typedef struct lsm_compress lsm_compress; /* Compression library functions */ -typedef struct lsm_compress_factory lsm_compress_factory; -typedef struct lsm_cursor lsm_cursor; /* Database cursor handle */ -typedef struct lsm_db lsm_db; /* Database connection handle */ -typedef struct lsm_env lsm_env; /* Runtime environment */ -typedef struct lsm_file lsm_file; /* OS file handle */ -typedef struct lsm_mutex lsm_mutex; /* Mutex handle */ - -/* 64-bit integer type used for file offsets. */ -typedef long long int lsm_i64; /* 64-bit signed integer type */ - -/* Candidate values for the 3rd argument to lsm_env.xLock() */ -#define LSM_LOCK_UNLOCK 0 -#define LSM_LOCK_SHARED 1 -#define LSM_LOCK_EXCL 2 - -/* Flags for lsm_env.xOpen() */ -#define LSM_OPEN_READONLY 0x0001 - -/* -** CAPI: Database Runtime Environment -** -** Run-time environment used by LSM -*/ -struct lsm_env { - int nByte; /* Size of this structure in bytes */ - int iVersion; /* Version number of this structure (1) */ - /****** file i/o ***********************************************/ - void *pVfsCtx; - int (*xFullpath)(lsm_env*, const char *, char *, int *); - int (*xOpen)(lsm_env*, const char *, int flags, lsm_file **); - int (*xRead)(lsm_file *, lsm_i64, void *, int); - int (*xWrite)(lsm_file *, lsm_i64, void *, int); - int (*xTruncate)(lsm_file *, lsm_i64); - int (*xSync)(lsm_file *); - int (*xSectorSize)(lsm_file *); - int (*xRemap)(lsm_file *, lsm_i64, void **, lsm_i64*); - int (*xFileid)(lsm_file *, void *pBuf, int *pnBuf); - int (*xClose)(lsm_file *); - int (*xUnlink)(lsm_env*, const char *); - int (*xLock)(lsm_file*, int, int); - int (*xTestLock)(lsm_file*, int, int, int); - int (*xShmMap)(lsm_file*, int, int, void **); - void (*xShmBarrier)(void); - int (*xShmUnmap)(lsm_file*, int); - /****** memory allocation ****************************************/ - void *pMemCtx; - void *(*xMalloc)(lsm_env*, size_t); /* malloc(3) function */ - void *(*xRealloc)(lsm_env*, void *, size_t); /* realloc(3) function */ - void (*xFree)(lsm_env*, void *); /* free(3) function */ - size_t (*xSize)(lsm_env*, void *); /* xSize function */ - /****** mutexes ****************************************************/ - void *pMutexCtx; - int (*xMutexStatic)(lsm_env*,int,lsm_mutex**); /* Obtain a static mutex */ - int (*xMutexNew)(lsm_env*, lsm_mutex**); /* Get a new dynamic mutex */ - void (*xMutexDel)(lsm_mutex *); /* Delete an allocated mutex */ - void (*xMutexEnter)(lsm_mutex *); /* Grab a mutex */ - int (*xMutexTry)(lsm_mutex *); /* Attempt to obtain a mutex */ - void (*xMutexLeave)(lsm_mutex *); /* Leave a mutex */ - int (*xMutexHeld)(lsm_mutex *); /* Return true if mutex is held */ - int (*xMutexNotHeld)(lsm_mutex *); /* Return true if mutex not held */ - /****** other ****************************************************/ - int (*xSleep)(lsm_env*, int microseconds); - - /* New fields may be added in future releases, in which case the - ** iVersion value will increase. */ -}; - -/* -** Values that may be passed as the second argument to xMutexStatic. -*/ -#define LSM_MUTEX_GLOBAL 1 -#define LSM_MUTEX_HEAP 2 - -/* -** CAPI: LSM Error Codes -*/ -#define LSM_OK 0 -#define LSM_ERROR 1 -#define LSM_BUSY 5 -#define LSM_NOMEM 7 -#define LSM_READONLY 8 -#define LSM_IOERR 10 -#define LSM_CORRUPT 11 -#define LSM_FULL 13 -#define LSM_CANTOPEN 14 -#define LSM_PROTOCOL 15 -#define LSM_MISUSE 21 - -#define LSM_MISMATCH 50 - - -#define LSM_IOERR_NOENT (LSM_IOERR | (1<<8)) - -/* -** CAPI: Creating and Destroying Database Connection Handles -** -** Open and close a database connection handle. -*/ -int lsm_new(lsm_env*, lsm_db **ppDb); -int lsm_close(lsm_db *pDb); - -/* -** CAPI: Connecting to a Database -*/ -int lsm_open(lsm_db *pDb, const char *zFilename); - -/* -** CAPI: Obtaining pointers to database environments -** -** Return a pointer to the environment used by the database connection -** passed as the first argument. Assuming the argument is valid, this -** function always returns a valid environment pointer - it cannot fail. -*/ -lsm_env *lsm_get_env(lsm_db *pDb); - -/* -** The lsm_default_env() function returns a pointer to the default LSM -** environment for the current platform. -*/ -lsm_env *lsm_default_env(void); - - -/* -** CAPI: Configuring a database connection. -** -** The lsm_config() function is used to configure a database connection. -*/ -int lsm_config(lsm_db *, int, ...); - -/* -** The following values may be passed as the second argument to lsm_config(). -** -** LSM_CONFIG_AUTOFLUSH: -** A read/write integer parameter. -** -** This value determines the amount of data allowed to accumulate in a -** live in-memory tree before it is marked as old. After committing a -** transaction, a connection checks if the size of the live in-memory tree, -** including data structure overhead, is greater than the value of this -** option in KB. If it is, and there is not already an old in-memory tree, -** the live in-memory tree is marked as old. -** -** The maximum allowable value is 1048576 (1GB). There is no minimum -** value. If this parameter is set to zero, then an attempt is made to -** mark the live in-memory tree as old after each transaction is committed. -** -** The default value is 1024 (1MB). -** -** LSM_CONFIG_PAGE_SIZE: -** A read/write integer parameter. This parameter may only be set before -** lsm_open() has been called. -** -** LSM_CONFIG_BLOCK_SIZE: -** A read/write integer parameter. -** -** This parameter may only be set before lsm_open() has been called. It -** must be set to a power of two between 64 and 65536, inclusive (block -** sizes between 64KB and 64MB). -** -** If the connection creates a new database, the block size of the new -** database is set to the value of this option in KB. After lsm_open() -** has been called, querying this parameter returns the actual block -** size of the opened database. -** -** The default value is 1024 (1MB blocks). -** -** LSM_CONFIG_SAFETY: -** A read/write integer parameter. Valid values are 0, 1 (the default) -** and 2. This parameter determines how robust the database is in the -** face of a system crash (e.g. a power failure or operating system -** crash). As follows: -** -** 0 (off): No robustness. A system crash may corrupt the database. -** -** 1 (normal): Some robustness. A system crash may not corrupt the -** database file, but recently committed transactions may -** be lost following recovery. -** -** 2 (full): Full robustness. A system crash may not corrupt the -** database file. Following recovery the database file -** contains all successfully committed transactions. -** -** LSM_CONFIG_AUTOWORK: -** A read/write integer parameter. -** -** LSM_CONFIG_AUTOCHECKPOINT: -** A read/write integer parameter. -** -** If this option is set to non-zero value N, then a checkpoint is -** automatically attempted after each N KB of data have been written to -** the database file. -** -** The amount of uncheckpointed data already written to the database file -** is a global parameter. After performing database work (writing to the -** database file), the process checks if the total amount of uncheckpointed -** data exceeds the value of this paramter. If so, a checkpoint is performed. -** This means that this option may cause the connection to perform a -** checkpoint even if the current connection has itself written very little -** data into the database file. -** -** The default value is 2048 (checkpoint every 2MB). -** -** LSM_CONFIG_MMAP: -** A read/write integer parameter. If this value is set to 0, then the -** database file is accessed using ordinary read/write IO functions. Or, -** if it is set to 1, then the database file is memory mapped and accessed -** that way. If this parameter is set to any value N greater than 1, then -** up to the first N KB of the file are memory mapped, and any remainder -** accessed using read/write IO. -** -** The default value is 1 on 64-bit platforms and 32768 on 32-bit platforms. -** -** -** LSM_CONFIG_USE_LOG: -** A read/write boolean parameter. True (the default) to use the log -** file normally. False otherwise. -** -** LSM_CONFIG_AUTOMERGE: -** A read/write integer parameter. The minimum number of segments to -** merge together at a time. Default value 4. -** -** LSM_CONFIG_MAX_FREELIST: -** A read/write integer parameter. The maximum number of free-list -** entries that are stored in a database checkpoint (the others are -** stored elsewhere in the database). -** -** There is no reason for an application to configure or query this -** parameter. It is only present because configuring a small value -** makes certain parts of the lsm code easier to test. -** -** LSM_CONFIG_MULTIPLE_PROCESSES: -** A read/write boolean parameter. This parameter may only be set before -** lsm_open() has been called. If true, the library uses shared-memory -** and posix advisory locks to co-ordinate access by clients from within -** multiple processes. Otherwise, if false, all database clients must be -** located in the same process. The default value is true. -** -** LSM_CONFIG_SET_COMPRESSION: -** Set the compression methods used to compress and decompress database -** content. The argument to this option should be a pointer to a structure -** of type lsm_compress. The lsm_config() method takes a copy of the -** structures contents. -** -** This option may only be used before lsm_open() is called. Invoking it -** after lsm_open() has been called results in an LSM_MISUSE error. -** -** LSM_CONFIG_GET_COMPRESSION: -** Query the compression methods used to compress and decompress database -** content. -** -** LSM_CONFIG_SET_COMPRESSION_FACTORY: -** Configure a factory method to be invoked in case of an LSM_MISMATCH -** error. -** -** LSM_CONFIG_READONLY: -** A read/write boolean parameter. This parameter may only be set before -** lsm_open() is called. -*/ -#define LSM_CONFIG_AUTOFLUSH 1 -#define LSM_CONFIG_PAGE_SIZE 2 -#define LSM_CONFIG_SAFETY 3 -#define LSM_CONFIG_BLOCK_SIZE 4 -#define LSM_CONFIG_AUTOWORK 5 -#define LSM_CONFIG_MMAP 7 -#define LSM_CONFIG_USE_LOG 8 -#define LSM_CONFIG_AUTOMERGE 9 -#define LSM_CONFIG_MAX_FREELIST 10 -#define LSM_CONFIG_MULTIPLE_PROCESSES 11 -#define LSM_CONFIG_AUTOCHECKPOINT 12 -#define LSM_CONFIG_SET_COMPRESSION 13 -#define LSM_CONFIG_GET_COMPRESSION 14 -#define LSM_CONFIG_SET_COMPRESSION_FACTORY 15 -#define LSM_CONFIG_READONLY 16 - -#define LSM_SAFETY_OFF 0 -#define LSM_SAFETY_NORMAL 1 -#define LSM_SAFETY_FULL 2 - -/* -** CAPI: Compression and/or Encryption Hooks -*/ -struct lsm_compress { - void *pCtx; - unsigned int iId; - int (*xBound)(void *, int nSrc); - int (*xCompress)(void *, char *, int *, const char *, int); - int (*xUncompress)(void *, char *, int *, const char *, int); - void (*xFree)(void *pCtx); -}; - -struct lsm_compress_factory { - void *pCtx; - int (*xFactory)(void *, lsm_db *, unsigned int); - void (*xFree)(void *pCtx); -}; - -#define LSM_COMPRESSION_EMPTY 0 -#define LSM_COMPRESSION_NONE 1 - -/* -** CAPI: Allocating and Freeing Memory -** -** Invoke the memory allocation functions that belong to environment -** pEnv. Or the system defaults if no memory allocation functions have -** been registered. -*/ -void *lsm_malloc(lsm_env*, size_t); -void *lsm_realloc(lsm_env*, void *, size_t); -void lsm_free(lsm_env*, void *); - -/* -** CAPI: Querying a Connection For Operational Data -** -** Query a database connection for operational statistics or data. -*/ -int lsm_info(lsm_db *, int, ...); - -int lsm_get_user_version(lsm_db *, unsigned int *); -int lsm_set_user_version(lsm_db *, unsigned int); - -/* -** The following values may be passed as the second argument to lsm_info(). -** -** LSM_INFO_NWRITE: -** The third parameter should be of type (int *). The location pointed -** to by the third parameter is set to the number of 4KB pages written to -** the database file during the lifetime of this connection. -** -** LSM_INFO_NREAD: -** The third parameter should be of type (int *). The location pointed -** to by the third parameter is set to the number of 4KB pages read from -** the database file during the lifetime of this connection. -** -** LSM_INFO_DB_STRUCTURE: -** The third argument should be of type (char **). The location pointed -** to is populated with a pointer to a nul-terminated string containing -** the string representation of a Tcl data-structure reflecting the -** current structure of the database file. Specifically, the current state -** of the worker snapshot. The returned string should be eventually freed -** by the caller using lsm_free(). -** -** The returned list contains one element for each level in the database, -** in order from most to least recent. Each element contains a -** single element for each segment comprising the corresponding level, -** starting with the lhs segment, then each of the rhs segments (if any) -** in order from most to least recent. -** -** Each segment element is itself a list of 4 integer values, as follows: -** -**

    1. First page of segment -**
    2. Last page of segment -**
    3. Root page of segment (if applicable) -**
    4. Total number of pages in segment -**
    -** -** LSM_INFO_ARRAY_STRUCTURE: -** There should be two arguments passed following this option (i.e. a -** total of four arguments passed to lsm_info()). The first argument -** should be the page number of the first page in a database array -** (perhaps obtained from an earlier INFO_DB_STRUCTURE call). The second -** trailing argument should be of type (char **). The location pointed -** to is populated with a pointer to a nul-terminated string that must -** be eventually freed using lsm_free() by the caller. -** -** The output string contains the text representation of a Tcl list of -** integers. Each pair of integers represent a range of pages used by -** the identified array. For example, if the array occupies database -** pages 993 to 1024, then pages 2048 to 2777, then the returned string -** will be "993 1024 2048 2777". -** -** If the specified integer argument does not correspond to the first -** page of any database array, LSM_ERROR is returned and the output -** pointer is set to a NULL value. -** -** LSM_INFO_LOG_STRUCTURE: -** The third argument should be of type (char **). The location pointed -** to is populated with a pointer to a nul-terminated string containing -** the string representation of a Tcl data-structure. The returned -** string should be eventually freed by the caller using lsm_free(). -** -** The Tcl structure returned is a list of six integers that describe -** the current structure of the log file. -** -** LSM_INFO_ARRAY_PAGES: -** -** LSM_INFO_PAGE_ASCII_DUMP: -** As with LSM_INFO_ARRAY_STRUCTURE, there should be two arguments passed -** with calls that specify this option - an integer page number and a -** (char **) used to return a nul-terminated string that must be later -** freed using lsm_free(). In this case the output string is populated -** with a human-readable description of the page content. -** -** If the page cannot be decoded, it is not an error. In this case the -** human-readable output message will report the systems failure to -** interpret the page data. -** -** LSM_INFO_PAGE_HEX_DUMP: -** This argument is similar to PAGE_ASCII_DUMP, except that keys and -** values are represented using hexadecimal notation instead of ascii. -** -** LSM_INFO_FREELIST: -** The third argument should be of type (char **). The location pointed -** to is populated with a pointer to a nul-terminated string containing -** the string representation of a Tcl data-structure. The returned -** string should be eventually freed by the caller using lsm_free(). -** -** The Tcl structure returned is a list containing one element for each -** free block in the database. The element itself consists of two -** integers - the block number and the id of the snapshot that freed it. -** -** LSM_INFO_CHECKPOINT_SIZE: -** The third argument should be of type (int *). The location pointed to -** by this argument is populated with the number of KB written to the -** database file since the most recent checkpoint. -** -** LSM_INFO_TREE_SIZE: -** If this value is passed as the second argument to an lsm_info() call, it -** should be followed by two arguments of type (int *) (for a total of four -** arguments). -** -** At any time, there are either one or two tree structures held in shared -** memory that new database clients will access (there may also be additional -** tree structures being used by older clients - this API does not provide -** information on them). One tree structure - the current tree - is used to -** accumulate new data written to the database. The other tree structure - -** the old tree - is a read-only tree holding older data and may be flushed -** to disk at any time. -** -** Assuming no error occurs, the location pointed to by the first of the two -** (int *) arguments is set to the size of the old in-memory tree in KB. -** The second is set to the size of the current, or live in-memory tree. -** -** LSM_INFO_COMPRESSION_ID: -** This value should be followed by a single argument of type -** (unsigned int *). If successful, the location pointed to is populated -** with the database compression id before returning. -*/ -#define LSM_INFO_NWRITE 1 -#define LSM_INFO_NREAD 2 -#define LSM_INFO_DB_STRUCTURE 3 -#define LSM_INFO_LOG_STRUCTURE 4 -#define LSM_INFO_ARRAY_STRUCTURE 5 -#define LSM_INFO_PAGE_ASCII_DUMP 6 -#define LSM_INFO_PAGE_HEX_DUMP 7 -#define LSM_INFO_FREELIST 8 -#define LSM_INFO_ARRAY_PAGES 9 -#define LSM_INFO_CHECKPOINT_SIZE 10 -#define LSM_INFO_TREE_SIZE 11 -#define LSM_INFO_FREELIST_SIZE 12 -#define LSM_INFO_COMPRESSION_ID 13 - - -/* -** CAPI: Opening and Closing Write Transactions -** -** These functions are used to open and close transactions and nested -** sub-transactions. -** -** The lsm_begin() function is used to open transactions and sub-transactions. -** A successful call to lsm_begin() ensures that there are at least iLevel -** nested transactions open. To open a top-level transaction, pass iLevel=1. -** To open a sub-transaction within the top-level transaction, iLevel=2. -** Passing iLevel=0 is a no-op. -** -** lsm_commit() is used to commit transactions and sub-transactions. A -** successful call to lsm_commit() ensures that there are at most iLevel -** nested transactions open. To commit a top-level transaction, pass iLevel=0. -** To commit all sub-transactions inside the main transaction, pass iLevel=1. -** -** Function lsm_rollback() is used to roll back transactions and -** sub-transactions. A successful call to lsm_rollback() restores the database -** to the state it was in when the iLevel'th nested sub-transaction (if any) -** was first opened. And then closes transactions to ensure that there are -** at most iLevel nested transactions open. Passing iLevel=0 rolls back and -** closes the top-level transaction. iLevel=1 also rolls back the top-level -** transaction, but leaves it open. iLevel=2 rolls back the sub-transaction -** nested directly inside the top-level transaction (and leaves it open). -*/ -int lsm_begin(lsm_db *pDb, int iLevel); -int lsm_commit(lsm_db *pDb, int iLevel); -int lsm_rollback(lsm_db *pDb, int iLevel); - -/* -** CAPI: Writing to a Database -** -** Write a new value into the database. If a value with a duplicate key -** already exists it is replaced. -*/ -int lsm_insert(lsm_db*, const void *pKey, int nKey, const void *pVal, int nVal); - -/* -** Delete a value from the database. No error is returned if the specified -** key value does not exist in the database. -*/ -int lsm_delete(lsm_db *, const void *pKey, int nKey); - -/* -** Delete all database entries with keys that are greater than (pKey1/nKey1) -** and smaller than (pKey2/nKey2). Note that keys (pKey1/nKey1) and -** (pKey2/nKey2) themselves, if they exist in the database, are not deleted. -** -** Return LSM_OK if successful, or an LSM error code otherwise. -*/ -int lsm_delete_range(lsm_db *, - const void *pKey1, int nKey1, const void *pKey2, int nKey2 -); - -/* -** CAPI: Explicit Database Work and Checkpointing -** -** This function is called by a thread to work on the database structure. -*/ -int lsm_work(lsm_db *pDb, int nMerge, int nKB, int *pnWrite); - -int lsm_flush(lsm_db *pDb); - -/* -** Attempt to checkpoint the current database snapshot. Return an LSM -** error code if an error occurs or LSM_OK otherwise. -** -** If the current snapshot has already been checkpointed, calling this -** function is a no-op. In this case if pnKB is not NULL, *pnKB is -** set to 0. Or, if the current snapshot is successfully checkpointed -** by this function and pbKB is not NULL, *pnKB is set to the number -** of bytes written to the database file since the previous checkpoint -** (the same measure as returned by the LSM_INFO_CHECKPOINT_SIZE query). -*/ -int lsm_checkpoint(lsm_db *pDb, int *pnKB); - -/* -** CAPI: Opening and Closing Database Cursors -** -** Open and close a database cursor. -*/ -int lsm_csr_open(lsm_db *pDb, lsm_cursor **ppCsr); -int lsm_csr_close(lsm_cursor *pCsr); - -/* -** CAPI: Positioning Database Cursors -** -** If the fourth parameter is LSM_SEEK_EQ, LSM_SEEK_GE or LSM_SEEK_LE, -** this function searches the database for an entry with key (pKey/nKey). -** If an error occurs, an LSM error code is returned. Otherwise, LSM_OK. -** -** If no error occurs and the requested key is present in the database, the -** cursor is left pointing to the entry with the specified key. Or, if the -** specified key is not present in the database the state of the cursor -** depends on the value passed as the final parameter, as follows: -** -** LSM_SEEK_EQ: -** The cursor is left at EOF (invalidated). A call to lsm_csr_valid() -** returns non-zero. -** -** LSM_SEEK_LE: -** The cursor is left pointing to the largest key in the database that -** is smaller than (pKey/nKey). If the database contains no keys smaller -** than (pKey/nKey), the cursor is left at EOF. -** -** LSM_SEEK_GE: -** The cursor is left pointing to the smallest key in the database that -** is larger than (pKey/nKey). If the database contains no keys larger -** than (pKey/nKey), the cursor is left at EOF. -** -** If the fourth parameter is LSM_SEEK_LEFAST, this function searches the -** database in a similar manner to LSM_SEEK_LE, with two differences: -** -**
    1. Even if a key can be found (the cursor is not left at EOF), the -** lsm_csr_value() function may not be used (attempts to do so return -** LSM_MISUSE). -** -**
    2. The key that the cursor is left pointing to may be one that has -** been recently deleted from the database. In this case it is -** guaranteed that the returned key is larger than any key currently -** in the database that is less than or equal to (pKey/nKey). -**
    -** -** LSM_SEEK_LEFAST requests are intended to be used to allocate database -** keys. -*/ -int lsm_csr_seek(lsm_cursor *pCsr, const void *pKey, int nKey, int eSeek); - -int lsm_csr_first(lsm_cursor *pCsr); -int lsm_csr_last(lsm_cursor *pCsr); - -/* -** Advance the specified cursor to the next or previous key in the database. -** Return LSM_OK if successful, or an LSM error code otherwise. -** -** Functions lsm_csr_seek(), lsm_csr_first() and lsm_csr_last() are "seek" -** functions. Whether or not lsm_csr_next and lsm_csr_prev may be called -** successfully also depends on the most recent seek function called on -** the cursor. Specifically: -** -**
      -**
    • At least one seek function must have been called on the cursor. -**
    • To call lsm_csr_next(), the most recent call to a seek function must -** have been either lsm_csr_first() or a call to lsm_csr_seek() specifying -** LSM_SEEK_GE. -**
    • To call lsm_csr_prev(), the most recent call to a seek function must -** have been either lsm_csr_last() or a call to lsm_csr_seek() specifying -** LSM_SEEK_LE. -**
    -** -** Otherwise, if the above conditions are not met when lsm_csr_next or -** lsm_csr_prev is called, LSM_MISUSE is returned and the cursor position -** remains unchanged. -*/ -int lsm_csr_next(lsm_cursor *pCsr); -int lsm_csr_prev(lsm_cursor *pCsr); - -/* -** Values that may be passed as the fourth argument to lsm_csr_seek(). -*/ -#define LSM_SEEK_LEFAST -2 -#define LSM_SEEK_LE -1 -#define LSM_SEEK_EQ 0 -#define LSM_SEEK_GE 1 - -/* -** CAPI: Extracting Data From Database Cursors -** -** Retrieve data from a database cursor. -*/ -int lsm_csr_valid(lsm_cursor *pCsr); -int lsm_csr_key(lsm_cursor *pCsr, const void **ppKey, int *pnKey); -int lsm_csr_value(lsm_cursor *pCsr, const void **ppVal, int *pnVal); - -/* -** If no error occurs, this function compares the database key passed via -** the pKey/nKey arguments with the key that the cursor passed as the first -** argument currently points to. If the cursors key is less than, equal to -** or greater than pKey/nKey, *piRes is set to less than, equal to or greater -** than zero before returning. LSM_OK is returned in this case. -** -** Or, if an error occurs, an LSM error code is returned and the final -** value of *piRes is undefined. If the cursor does not point to a valid -** key when this function is called, LSM_MISUSE is returned. -*/ -int lsm_csr_cmp(lsm_cursor *pCsr, const void *pKey, int nKey, int *piRes); - -/* -** CAPI: Change these!! -** -** Configure a callback to which debugging and other messages should -** be directed. Only useful for debugging lsm. -*/ -void lsm_config_log(lsm_db *, void (*)(void *, int, const char *), void *); - -/* -** Configure a callback that is invoked if the database connection ever -** writes to the database file. -*/ -void lsm_config_work_hook(lsm_db *, void (*)(lsm_db *, void *), void *); - -/* ENDOFAPI */ -#ifdef __cplusplus -} /* End of the 'extern "C"' block */ -#endif -#endif /* ifndef _LSM_H */ diff --git a/ext/lsm1/lsmInt.h b/ext/lsm1/lsmInt.h deleted file mode 100644 index 0f822e4793..0000000000 --- a/ext/lsm1/lsmInt.h +++ /dev/null @@ -1,993 +0,0 @@ -/* -** 2011-08-18 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** Internal structure definitions for the LSM module. -*/ -#ifndef _LSM_INT_H -#define _LSM_INT_H - -#include "lsm.h" -#include -#include - -#include -#include -#include -#include - -#ifdef _WIN32 -# ifdef _MSC_VER -# define snprintf _snprintf -# endif -#else -# include -#endif - -#ifdef NDEBUG -# ifdef LSM_DEBUG_EXPENSIVE -# undef LSM_DEBUG_EXPENSIVE -# endif -# ifdef LSM_DEBUG -# undef LSM_DEBUG -# endif -#else -# ifndef LSM_DEBUG -# define LSM_DEBUG -# endif -#endif - -/* -** Default values for various data structure parameters. These may be -** overridden by calls to lsm_config(). -*/ -#define LSM_DFLT_PAGE_SIZE (4 * 1024) -#define LSM_DFLT_BLOCK_SIZE (1 * 1024 * 1024) -#define LSM_DFLT_AUTOFLUSH (1 * 1024 * 1024) -#define LSM_DFLT_AUTOCHECKPOINT (i64)(2 * 1024 * 1024) -#define LSM_DFLT_AUTOWORK 1 -#define LSM_DFLT_LOG_SIZE (128*1024) -#define LSM_DFLT_AUTOMERGE 4 -#define LSM_DFLT_SAFETY LSM_SAFETY_NORMAL -#define LSM_DFLT_MMAP (LSM_IS_64_BIT ? 1 : 32768) -#define LSM_DFLT_MULTIPLE_PROCESSES 1 -#define LSM_DFLT_USE_LOG 1 - -/* Initial values for log file checksums. These are only used if the -** database file does not contain a valid checkpoint. */ -#define LSM_CKSUM0_INIT 42 -#define LSM_CKSUM1_INIT 42 - -/* "mmap" mode is currently only used in environments with 64-bit address -** spaces. The following macro is used to test for this. */ -#define LSM_IS_64_BIT (sizeof(void*)==8) - -#define LSM_AUTOWORK_QUANT 32 - -typedef struct Database Database; -typedef struct DbLog DbLog; -typedef struct FileSystem FileSystem; -typedef struct Freelist Freelist; -typedef struct FreelistEntry FreelistEntry; -typedef struct Level Level; -typedef struct LogMark LogMark; -typedef struct LogRegion LogRegion; -typedef struct LogWriter LogWriter; -typedef struct LsmString LsmString; -typedef struct Mempool Mempool; -typedef struct Merge Merge; -typedef struct MergeInput MergeInput; -typedef struct MetaPage MetaPage; -typedef struct MultiCursor MultiCursor; -typedef struct Page Page; -typedef struct Redirect Redirect; -typedef struct Segment Segment; -typedef struct SegmentMerger SegmentMerger; -typedef struct ShmChunk ShmChunk; -typedef struct ShmHeader ShmHeader; -typedef struct ShmReader ShmReader; -typedef struct Snapshot Snapshot; -typedef struct TransMark TransMark; -typedef struct Tree Tree; -typedef struct TreeCursor TreeCursor; -typedef struct TreeHeader TreeHeader; -typedef struct TreeMark TreeMark; -typedef struct TreeRoot TreeRoot; - -#ifndef _SQLITEINT_H_ -typedef unsigned char u8; -typedef unsigned short int u16; -typedef unsigned int u32; -typedef lsm_i64 i64; -typedef unsigned long long int u64; -#endif - -/* A page number is a 64-bit integer. */ -typedef i64 LsmPgno; - -#ifdef LSM_DEBUG -int lsmErrorBkpt(int); -#else -# define lsmErrorBkpt(x) (x) -#endif - -#define LSM_PROTOCOL_BKPT lsmErrorBkpt(LSM_PROTOCOL) -#define LSM_IOERR_BKPT lsmErrorBkpt(LSM_IOERR) -#define LSM_NOMEM_BKPT lsmErrorBkpt(LSM_NOMEM) -#define LSM_CORRUPT_BKPT lsmErrorBkpt(LSM_CORRUPT) -#define LSM_MISUSE_BKPT lsmErrorBkpt(LSM_MISUSE) - -#define unused_parameter(x) (void)(x) -#define array_size(x) (sizeof(x)/sizeof(x[0])) - - -/* The size of each shared-memory chunk */ -#define LSM_SHM_CHUNK_SIZE (32*1024) - -/* The number of bytes reserved at the start of each shm chunk for MM. */ -#define LSM_SHM_CHUNK_HDR (sizeof(ShmChunk)) - -/* The number of available read locks. */ -#define LSM_LOCK_NREADER 6 - -/* The number of available read-write client locks. */ -#define LSM_LOCK_NRWCLIENT 16 - -/* Lock definitions. -*/ -#define LSM_LOCK_DMS1 1 /* Serialize connect/disconnect ops */ -#define LSM_LOCK_DMS2 2 /* Read-write connections */ -#define LSM_LOCK_DMS3 3 /* Read-only connections */ -#define LSM_LOCK_WRITER 4 -#define LSM_LOCK_WORKER 5 -#define LSM_LOCK_CHECKPOINTER 6 -#define LSM_LOCK_ROTRANS 7 -#define LSM_LOCK_READER(i) ((i) + LSM_LOCK_ROTRANS + 1) -#define LSM_LOCK_RWCLIENT(i) ((i) + LSM_LOCK_READER(LSM_LOCK_NREADER)) - -#define LSM_N_LOCK LSM_LOCK_RWCLIENT(LSM_LOCK_NRWCLIENT) - -/* -** Meta-page size and usable size. -*/ -#define LSM_META_PAGE_SIZE 4096 - -#define LSM_META_RW_PAGE_SIZE (LSM_META_PAGE_SIZE - LSM_N_LOCK) - -/* -** Hard limit on the number of free-list entries that may be stored in -** a checkpoint (the remainder are stored as a system record in the LSM). -** See also LSM_CONFIG_MAX_FREELIST. -*/ -#define LSM_MAX_FREELIST_ENTRIES 24 - -#define LSM_MAX_BLOCK_REDIRECTS 16 - -#define LSM_ATTEMPTS_BEFORE_PROTOCOL 10000 - - -/* -** Each entry stored in the LSM (or in-memory tree structure) has an -** associated mask of the following flags. -*/ -#define LSM_START_DELETE 0x01 /* Start of open-ended delete range */ -#define LSM_END_DELETE 0x02 /* End of open-ended delete range */ -#define LSM_POINT_DELETE 0x04 /* Delete this key */ -#define LSM_INSERT 0x08 /* Insert this key and value */ -#define LSM_SEPARATOR 0x10 /* True if entry is separator key only */ -#define LSM_SYSTEMKEY 0x20 /* True if entry is a system key (FREELIST) */ - -#define LSM_CONTIGUOUS 0x40 /* Used in lsm_tree.c */ - -/* -** A string that can grow by appending. -*/ -struct LsmString { - lsm_env *pEnv; /* Run-time environment */ - int n; /* Size of string. -1 indicates error */ - int nAlloc; /* Space allocated for z[] */ - char *z; /* The string content */ -}; - -typedef struct LsmFile LsmFile; -struct LsmFile { - lsm_file *pFile; - LsmFile *pNext; -}; - -/* -** An instance of the following type is used to store an ordered list of -** u32 values. -** -** Note: This is a place-holder implementation. It should be replaced by -** a version that avoids making a single large allocation when the array -** contains a large number of values. For this reason, the internals of -** this object should only manipulated by the intArrayXXX() functions in -** lsm_tree.c. -*/ -typedef struct IntArray IntArray; -struct IntArray { - int nAlloc; - int nArray; - u32 *aArray; -}; - -struct Redirect { - int n; /* Number of redirects */ - struct RedirectEntry { - int iFrom; - int iTo; - } *a; -}; - -/* -** An instance of this structure represents a point in the history of the -** tree structure to roll back to. Refer to comments in lsm_tree.c for -** details. -*/ -struct TreeMark { - u32 iRoot; /* Offset of root node in shm file */ - u32 nHeight; /* Current height of tree structure */ - u32 iWrite; /* Write offset in shm file */ - u32 nChunk; /* Number of chunks in shared-memory file */ - u32 iFirst; /* First chunk in linked list */ - u32 iNextShmid; /* Next id to allocate */ - int iRollback; /* Index in lsm->rollback to revert to */ -}; - -/* -** An instance of this structure represents a point in the database log. -*/ -struct LogMark { - i64 iOff; /* Offset into log (see lsm_log.c) */ - int nBuf; /* Size of in-memory buffer here */ - u8 aBuf[8]; /* Bytes of content in aBuf[] */ - u32 cksum0; /* Checksum 0 at offset (iOff-nBuf) */ - u32 cksum1; /* Checksum 1 at offset (iOff-nBuf) */ -}; - -struct TransMark { - TreeMark tree; - LogMark log; -}; - -/* -** A structure that defines the start and end offsets of a region in the -** log file. The size of the region in bytes is (iEnd - iStart), so if -** iEnd==iStart the region is zero bytes in size. -*/ -struct LogRegion { - i64 iStart; /* Start of region in log file */ - i64 iEnd; /* End of region in log file */ -}; - -struct DbLog { - u32 cksum0; /* Checksum 0 at offset iOff */ - u32 cksum1; /* Checksum 1 at offset iOff */ - i64 iSnapshotId; /* Log space has been reclaimed to this ss */ - LogRegion aRegion[3]; /* Log file regions (see docs in lsm_log.c) */ -}; - -struct TreeRoot { - u32 iRoot; - u32 nHeight; - u32 nByte; /* Total size of this tree in bytes */ - u32 iTransId; -}; - -/* -** Tree header structure. -*/ -struct TreeHeader { - u32 iUsedShmid; /* Id of first shm chunk used by this tree */ - u32 iNextShmid; /* Shm-id of next chunk allocated */ - u32 iFirst; /* Chunk number of smallest shm-id */ - u32 nChunk; /* Number of chunks in shared-memory file */ - TreeRoot root; /* Root and height of current tree */ - u32 iWrite; /* Write offset in shm file */ - TreeRoot oldroot; /* Root and height of the previous tree */ - u32 iOldShmid; /* Last shm-id used by previous tree */ - u32 iUsrVersion; /* get/set_user_version() value */ - i64 iOldLog; /* Log offset associated with old tree */ - u32 oldcksum0; - u32 oldcksum1; - DbLog log; /* Current layout of log file */ - u32 aCksum[2]; /* Checksums 1 and 2. */ -}; - -/* -** Database handle structure. -** -** mLock: -** A bitmask representing the locks currently held by the connection. -** An LSM database supports N distinct locks, where N is some number less -** than or equal to 32. Locks are numbered starting from 1 (see the -** definitions for LSM_LOCK_WRITER and co.). -** -** The least significant 32-bits in mLock represent EXCLUSIVE locks. The -** most significant are SHARED locks. So, if a connection holds a SHARED -** lock on lock region iLock, then the following is true: -** -** (mLock & ((iLock+32-1) << 1)) -** -** Or for an EXCLUSIVE lock: -** -** (mLock & ((iLock-1) << 1)) -** -** pCsr: -** Points to the head of a linked list that contains all currently open -** cursors. Once this list becomes empty, the user has no outstanding -** cursors and the database handle can be successfully closed. -** -** pCsrCache: -** This list contains cursor objects that have been closed using -** lsm_csr_close(). Each time a cursor is closed, it is shifted from -** the pCsr list to this list. When a new cursor is opened, this list -** is inspected to see if there exists a cursor object that can be -** reused. This is an optimization only. -*/ -struct lsm_db { - - /* Database handle configuration */ - lsm_env *pEnv; /* runtime environment */ - int (*xCmp)(void *, int, void *, int); /* Compare function */ - - /* Values configured by calls to lsm_config */ - int eSafety; /* LSM_SAFETY_OFF, NORMAL or FULL */ - int bAutowork; /* Configured by LSM_CONFIG_AUTOWORK */ - int nTreeLimit; /* Configured by LSM_CONFIG_AUTOFLUSH */ - int nMerge; /* Configured by LSM_CONFIG_AUTOMERGE */ - int bUseLog; /* Configured by LSM_CONFIG_USE_LOG */ - int nDfltPgsz; /* Configured by LSM_CONFIG_PAGE_SIZE */ - int nDfltBlksz; /* Configured by LSM_CONFIG_BLOCK_SIZE */ - int nMaxFreelist; /* Configured by LSM_CONFIG_MAX_FREELIST */ - int iMmap; /* Configured by LSM_CONFIG_MMAP */ - i64 nAutockpt; /* Configured by LSM_CONFIG_AUTOCHECKPOINT */ - int bMultiProc; /* Configured by L_C_MULTIPLE_PROCESSES */ - int bReadonly; /* Configured by LSM_CONFIG_READONLY */ - lsm_compress compress; /* Compression callbacks */ - lsm_compress_factory factory; /* Compression callback factory */ - - /* Sub-system handles */ - FileSystem *pFS; /* On-disk portion of database */ - Database *pDatabase; /* Database shared data */ - - int iRwclient; /* Read-write client lock held (-1 == none) */ - - /* Client transaction context */ - Snapshot *pClient; /* Client snapshot */ - int iReader; /* Read lock held (-1 == unlocked) */ - int bRoTrans; /* True if a read-only db trans is open */ - MultiCursor *pCsr; /* List of all open cursors */ - LogWriter *pLogWriter; /* Context for writing to the log file */ - int nTransOpen; /* Number of opened write transactions */ - int nTransAlloc; /* Allocated size of aTrans[] array */ - TransMark *aTrans; /* Array of marks for transaction rollback */ - IntArray rollback; /* List of tree-nodes to roll back */ - int bDiscardOld; /* True if lsmTreeDiscardOld() was called */ - - MultiCursor *pCsrCache; /* List of all closed cursors */ - - /* Worker context */ - Snapshot *pWorker; /* Worker snapshot (or NULL) */ - Freelist *pFreelist; /* See sortedNewToplevel() */ - int bUseFreelist; /* True to use pFreelist */ - int bIncrMerge; /* True if currently doing a merge */ - - int bInFactory; /* True if within factory.xFactory() */ - - /* Debugging message callback */ - void (*xLog)(void *, int, const char *); - void *pLogCtx; - - /* Work done notification callback */ - void (*xWork)(lsm_db *, void *); - void *pWorkCtx; - - u64 mLock; /* Mask of current locks. See lsmShmLock(). */ - lsm_db *pNext; /* Next connection to same database */ - - int nShm; /* Size of apShm[] array */ - void **apShm; /* Shared memory chunks */ - ShmHeader *pShmhdr; /* Live shared-memory header */ - TreeHeader treehdr; /* Local copy of tree-header */ - u32 aSnapshot[LSM_META_PAGE_SIZE / sizeof(u32)]; -}; - -struct Segment { - LsmPgno iFirst; /* First page of this run */ - LsmPgno iLastPg; /* Last page of this run */ - LsmPgno iRoot; /* Root page number (if any) */ - int nSize; /* Size of this run in pages */ - - Redirect *pRedirect; /* Block redirects (or NULL) */ -}; - -/* -** iSplitTopic/pSplitKey/nSplitKey: -** If nRight>0, this buffer contains a copy of the largest key that has -** already been written to the left-hand-side of the level. -*/ -struct Level { - Segment lhs; /* Left-hand (main) segment */ - int nRight; /* Size of apRight[] array */ - Segment *aRhs; /* Old segments being merged into this */ - int iSplitTopic; /* Split key topic (if nRight>0) */ - void *pSplitKey; /* Pointer to split-key (if nRight>0) */ - int nSplitKey; /* Number of bytes in split-key */ - - u16 iAge; /* Number of times data has been written */ - u16 flags; /* Mask of LEVEL_XXX bits */ - Merge *pMerge; /* Merge operation currently underway */ - Level *pNext; /* Next level in tree */ -}; - -/* -** The Level.flags field is set to a combination of the following bits. -** -** LEVEL_FREELIST_ONLY: -** Set if the level consists entirely of free-list entries. -** -** LEVEL_INCOMPLETE: -** This is set while a new toplevel level is being constructed. It is -** never set for any level other than a new toplevel. -*/ -#define LEVEL_FREELIST_ONLY 0x0001 -#define LEVEL_INCOMPLETE 0x0002 - - -/* -** A structure describing an ongoing merge. There is an instance of this -** structure for every Level currently undergoing a merge in the worker -** snapshot. -** -** It is assumed that code that uses an instance of this structure has -** access to the associated Level struct. -** -** iOutputOff: -** The byte offset to write to next within the last page of the -** output segment. -*/ -struct MergeInput { - LsmPgno iPg; /* Page on which next input is stored */ - int iCell; /* Cell containing next input to merge */ -}; -struct Merge { - int nInput; /* Number of input runs being merged */ - MergeInput *aInput; /* Array nInput entries in size */ - MergeInput splitkey; /* Location in file of current splitkey */ - int nSkip; /* Number of separators entries to skip */ - int iOutputOff; /* Write offset on output page */ - LsmPgno iCurrentPtr; /* Current pointer value */ -}; - -/* -** The first argument to this macro is a pointer to a Segment structure. -** Returns true if the structure instance indicates that the separators -** array is valid. -*/ -#define segmentHasSeparators(pSegment) ((pSegment)->sep.iFirst>0) - -/* -** The values that accompany the lock held by a database reader. -*/ -struct ShmReader { - u32 iTreeId; - i64 iLsmId; -}; - -/* -** An instance of this structure is stored in the first shared-memory -** page. The shared-memory header. -** -** bWriter: -** Immediately after opening a write transaction taking the WRITER lock, -** each writer client sets this flag. It is cleared right before the -** WRITER lock is relinquished. If a subsequent writer finds that this -** flag is already set when a write transaction is opened, this indicates -** that a previous writer failed mid-transaction. -** -** iMetaPage: -** If the database file does not contain a valid, synced, checkpoint, this -** value is set to 0. Otherwise, it is set to the meta-page number that -** contains the most recently written checkpoint (either 1 or 2). -** -** hdr1, hdr2: -** The two copies of the in-memory tree header. Two copies are required -** in case a writer fails while updating one of them. -*/ -struct ShmHeader { - u32 aSnap1[LSM_META_PAGE_SIZE / 4]; - u32 aSnap2[LSM_META_PAGE_SIZE / 4]; - u32 bWriter; - u32 iMetaPage; - TreeHeader hdr1; - TreeHeader hdr2; - ShmReader aReader[LSM_LOCK_NREADER]; -}; - -/* -** An instance of this structure is stored at the start of each shared-memory -** chunk except the first (which is the header chunk - see above). -*/ -struct ShmChunk { - u32 iShmid; - u32 iNext; -}; - -/* -** Maximum number of shared-memory chunks allowed in the *-shm file. Since -** each shared-memory chunk is 32KB in size, this is a theoretical limit only. -*/ -#define LSM_MAX_SHMCHUNKS (1<<30) - -/* Return true if shm-sequence "a" is larger than or equal to "b" */ -#define shm_sequence_ge(a, b) (((u32)a-(u32)b) < LSM_MAX_SHMCHUNKS) - -#define LSM_APPLIST_SZ 4 - -/* -** An instance of the following structure stores the in-memory part of -** the current free block list. This structure is to the free block list -** as the in-memory tree is to the users database content. The contents -** of the free block list is found by merging the in-memory components -** with those stored in the LSM, just as the contents of the database is -** found by merging the in-memory tree with the user data entries in the -** LSM. -** -** Each FreelistEntry structure in the array represents either an insert -** or delete operation on the free-list. For deletes, the FreelistEntry.iId -** field is set to -1. For inserts, it is set to zero or greater. -** -** The array of FreelistEntry structures is always sorted in order of -** block number (ascending). -** -** When the in-memory free block list is written into the LSM, each insert -** operation is written separately. The entry key is the bitwise inverse -** of the block number as a 32-bit big-endian integer. This is done so that -** the entries in the LSM are sorted in descending order of block id. -** The associated value is the snapshot id, formated as a varint. -*/ -struct Freelist { - FreelistEntry *aEntry; /* Free list entries */ - int nEntry; /* Number of valid slots in aEntry[] */ - int nAlloc; /* Allocated size of aEntry[] */ -}; -struct FreelistEntry { - u32 iBlk; /* Block number */ - i64 iId; /* Largest snapshot id to use this block */ -}; - -/* -** A snapshot of a database. A snapshot contains all the information required -** to read or write a database file on disk. See the description of struct -** Database below for futher details. -*/ -struct Snapshot { - Database *pDatabase; /* Database this snapshot belongs to */ - u32 iCmpId; /* Id of compression scheme */ - Level *pLevel; /* Pointer to level 0 of snapshot (or NULL) */ - i64 iId; /* Snapshot id */ - i64 iLogOff; /* Log file offset */ - Redirect redirect; /* Block redirection array */ - - /* Used by worker snapshots only */ - int nBlock; /* Number of blocks in database file */ - LsmPgno aiAppend[LSM_APPLIST_SZ]; /* Append point list */ - Freelist freelist; /* Free block list */ - u32 nWrite; /* Total number of pages written to disk */ -}; -#define LSM_INITIAL_SNAPSHOT_ID 11 - -/* -** Functions from file "lsm_ckpt.c". -*/ -int lsmCheckpointWrite(lsm_db *, u32 *); -int lsmCheckpointLevels(lsm_db *, int, void **, int *); -int lsmCheckpointLoadLevels(lsm_db *pDb, void *pVal, int nVal); - -int lsmCheckpointRecover(lsm_db *); -int lsmCheckpointDeserialize(lsm_db *, int, u32 *, Snapshot **); - -int lsmCheckpointLoadWorker(lsm_db *pDb); -int lsmCheckpointStore(lsm_db *pDb, int); - -int lsmCheckpointLoad(lsm_db *pDb, int *); -int lsmCheckpointLoadOk(lsm_db *pDb, int); -int lsmCheckpointClientCacheOk(lsm_db *); - -u32 lsmCheckpointNBlock(u32 *); -i64 lsmCheckpointId(u32 *, int); -u32 lsmCheckpointNWrite(u32 *, int); -i64 lsmCheckpointLogOffset(u32 *); -int lsmCheckpointPgsz(u32 *); -int lsmCheckpointBlksz(u32 *); -void lsmCheckpointLogoffset(u32 *aCkpt, DbLog *pLog); -void lsmCheckpointZeroLogoffset(lsm_db *); - -int lsmCheckpointSaveWorker(lsm_db *pDb, int); -int lsmDatabaseFull(lsm_db *pDb); -int lsmCheckpointSynced(lsm_db *pDb, i64 *piId, i64 *piLog, u32 *pnWrite); - -int lsmCheckpointSize(lsm_db *db, int *pnByte); - -int lsmInfoCompressionId(lsm_db *db, u32 *piCmpId); - -/* -** Functions from file "lsm_tree.c". -*/ -int lsmTreeNew(lsm_env *, int (*)(void *, int, void *, int), Tree **ppTree); -void lsmTreeRelease(lsm_env *, Tree *); -int lsmTreeInit(lsm_db *); -int lsmTreeRepair(lsm_db *); - -void lsmTreeMakeOld(lsm_db *pDb); -void lsmTreeDiscardOld(lsm_db *pDb); -int lsmTreeHasOld(lsm_db *pDb); - -int lsmTreeSize(lsm_db *); -int lsmTreeEndTransaction(lsm_db *pDb, int bCommit); -int lsmTreeLoadHeader(lsm_db *pDb, int *); -int lsmTreeLoadHeaderOk(lsm_db *, int); - -int lsmTreeInsert(lsm_db *pDb, void *pKey, int nKey, void *pVal, int nVal); -int lsmTreeDelete(lsm_db *db, void *pKey1, int nKey1, void *pKey2, int nKey2); -void lsmTreeRollback(lsm_db *pDb, TreeMark *pMark); -void lsmTreeMark(lsm_db *pDb, TreeMark *pMark); - -int lsmTreeCursorNew(lsm_db *pDb, int, TreeCursor **); -void lsmTreeCursorDestroy(TreeCursor *); - -int lsmTreeCursorSeek(TreeCursor *pCsr, void *pKey, int nKey, int *pRes); -int lsmTreeCursorNext(TreeCursor *pCsr); -int lsmTreeCursorPrev(TreeCursor *pCsr); -int lsmTreeCursorEnd(TreeCursor *pCsr, int bLast); -void lsmTreeCursorReset(TreeCursor *pCsr); -int lsmTreeCursorKey(TreeCursor *pCsr, int *pFlags, void **ppKey, int *pnKey); -int lsmTreeCursorFlags(TreeCursor *pCsr); -int lsmTreeCursorValue(TreeCursor *pCsr, void **ppVal, int *pnVal); -int lsmTreeCursorValid(TreeCursor *pCsr); -int lsmTreeCursorSave(TreeCursor *pCsr); - -void lsmFlagsToString(int flags, char *zFlags); - -/* -** Functions from file "mem.c". -*/ -void *lsmMalloc(lsm_env*, size_t); -void lsmFree(lsm_env*, void *); -void *lsmRealloc(lsm_env*, void *, size_t); -void *lsmReallocOrFree(lsm_env*, void *, size_t); -void *lsmReallocOrFreeRc(lsm_env *, void *, size_t, int *); - -void *lsmMallocZeroRc(lsm_env*, size_t, int *); -void *lsmMallocRc(lsm_env*, size_t, int *); - -void *lsmMallocZero(lsm_env *pEnv, size_t); -char *lsmMallocStrdup(lsm_env *pEnv, const char *); - -/* -** Functions from file "lsm_mutex.c". -*/ -int lsmMutexStatic(lsm_env*, int, lsm_mutex **); -int lsmMutexNew(lsm_env*, lsm_mutex **); -void lsmMutexDel(lsm_env*, lsm_mutex *); -void lsmMutexEnter(lsm_env*, lsm_mutex *); -int lsmMutexTry(lsm_env*, lsm_mutex *); -void lsmMutexLeave(lsm_env*, lsm_mutex *); - -#ifndef NDEBUG -int lsmMutexHeld(lsm_env *, lsm_mutex *); -int lsmMutexNotHeld(lsm_env *, lsm_mutex *); -#endif - -/************************************************************************** -** Start of functions from "lsm_file.c". -*/ -int lsmFsOpen(lsm_db *, const char *, int); -int lsmFsOpenLog(lsm_db *, int *); -void lsmFsCloseLog(lsm_db *); -void lsmFsClose(FileSystem *); - -int lsmFsUnmap(FileSystem *); - -int lsmFsConfigure(lsm_db *db); - -int lsmFsBlockSize(FileSystem *); -void lsmFsSetBlockSize(FileSystem *, int); -int lsmFsMoveBlock(FileSystem *pFS, Segment *pSeg, int iTo, int iFrom); - -int lsmFsPageSize(FileSystem *); -void lsmFsSetPageSize(FileSystem *, int); - -int lsmFsFileid(lsm_db *pDb, void **ppId, int *pnId); - -/* Creating, populating, gobbling and deleting sorted runs. */ -void lsmFsGobble(lsm_db *, Segment *, LsmPgno *, int); -int lsmFsSortedDelete(FileSystem *, Snapshot *, int, Segment *); -int lsmFsSortedFinish(FileSystem *, Segment *); -int lsmFsSortedAppend(FileSystem *, Snapshot *, Level *, int, Page **); -int lsmFsSortedPadding(FileSystem *, Snapshot *, Segment *); - -/* Functions to retrieve the lsm_env pointer from a FileSystem or Page object */ -lsm_env *lsmFsEnv(FileSystem *); -lsm_env *lsmPageEnv(Page *); -FileSystem *lsmPageFS(Page *); - -int lsmFsSectorSize(FileSystem *); - -void lsmSortedSplitkey(lsm_db *, Level *, int *); - -/* Reading sorted run content. */ -int lsmFsDbPageLast(FileSystem *pFS, Segment *pSeg, Page **ppPg); -int lsmFsDbPageGet(FileSystem *, Segment *, LsmPgno, Page **); -int lsmFsDbPageNext(Segment *, Page *, int eDir, Page **); - -u8 *lsmFsPageData(Page *, int *); -int lsmFsPageRelease(Page *); -int lsmFsPagePersist(Page *); -void lsmFsPageRef(Page *); -LsmPgno lsmFsPageNumber(Page *); - -int lsmFsNRead(FileSystem *); -int lsmFsNWrite(FileSystem *); - -int lsmFsMetaPageGet(FileSystem *, int, int, MetaPage **); -int lsmFsMetaPageRelease(MetaPage *); -u8 *lsmFsMetaPageData(MetaPage *, int *); - -#ifdef LSM_DEBUG -int lsmFsDbPageIsLast(Segment *pSeg, Page *pPg); -int lsmFsIntegrityCheck(lsm_db *); -#endif - -LsmPgno lsmFsRedirectPage(FileSystem *, Redirect *, LsmPgno); - -int lsmFsPageWritable(Page *); - -/* Functions to read, write and sync the log file. */ -int lsmFsWriteLog(FileSystem *pFS, i64 iOff, LsmString *pStr); -int lsmFsSyncLog(FileSystem *pFS); -int lsmFsReadLog(FileSystem *pFS, i64 iOff, int nRead, LsmString *pStr); -int lsmFsTruncateLog(FileSystem *pFS, i64 nByte); -int lsmFsTruncateDb(FileSystem *pFS, i64 nByte); -int lsmFsCloseAndDeleteLog(FileSystem *pFS); - -LsmFile *lsmFsDeferClose(FileSystem *pFS); - -/* And to sync the db file */ -int lsmFsSyncDb(FileSystem *, int); - -void lsmFsFlushWaiting(FileSystem *, int *); - -/* Used by lsm_info(ARRAY_STRUCTURE) and lsm_config(MMAP) */ -int lsmInfoArrayStructure(lsm_db *pDb, int bBlock, LsmPgno iFirst, char **pz); -int lsmInfoArrayPages(lsm_db *pDb, LsmPgno iFirst, char **pzOut); -int lsmConfigMmap(lsm_db *pDb, int *piParam); - -int lsmEnvOpen(lsm_env *, const char *, int, lsm_file **); -int lsmEnvClose(lsm_env *pEnv, lsm_file *pFile); -int lsmEnvLock(lsm_env *pEnv, lsm_file *pFile, int iLock, int eLock); -int lsmEnvTestLock(lsm_env *pEnv, lsm_file *pFile, int iLock, int nLock, int); - -int lsmEnvShmMap(lsm_env *, lsm_file *, int, int, void **); -void lsmEnvShmBarrier(lsm_env *); -void lsmEnvShmUnmap(lsm_env *, lsm_file *, int); - -void lsmEnvSleep(lsm_env *, int); - -int lsmFsReadSyncedId(lsm_db *db, int, i64 *piVal); - -int lsmFsSegmentContainsPg(FileSystem *pFS, Segment *, LsmPgno, int *); - -void lsmFsPurgeCache(FileSystem *); - -/* -** End of functions from "lsm_file.c". -**************************************************************************/ - -/* -** Functions from file "lsm_sorted.c". -*/ -int lsmInfoPageDump(lsm_db *, LsmPgno, int, char **); -void lsmSortedCleanup(lsm_db *); -int lsmSortedAutoWork(lsm_db *, int nUnit); - -int lsmSortedWalkFreelist(lsm_db *, int, int (*)(void *, int, i64), void *); - -int lsmSaveWorker(lsm_db *, int); - -int lsmFlushTreeToDisk(lsm_db *pDb); - -void lsmSortedRemap(lsm_db *pDb); - -void lsmSortedFreeLevel(lsm_env *pEnv, Level *); - -int lsmSortedAdvanceAll(lsm_db *pDb); - -int lsmSortedLoadMerge(lsm_db *, Level *, u32 *, int *); -int lsmSortedLoadFreelist(lsm_db *pDb, void **, int *); - -void *lsmSortedSplitKey(Level *pLevel, int *pnByte); - -void lsmSortedSaveTreeCursors(lsm_db *); - -int lsmMCursorNew(lsm_db *, MultiCursor **); -void lsmMCursorClose(MultiCursor *, int); -int lsmMCursorSeek(MultiCursor *, int, void *, int , int); -int lsmMCursorFirst(MultiCursor *); -int lsmMCursorPrev(MultiCursor *); -int lsmMCursorLast(MultiCursor *); -int lsmMCursorValid(MultiCursor *); -int lsmMCursorNext(MultiCursor *); -int lsmMCursorKey(MultiCursor *, void **, int *); -int lsmMCursorValue(MultiCursor *, void **, int *); -int lsmMCursorType(MultiCursor *, int *); -lsm_db *lsmMCursorDb(MultiCursor *); -void lsmMCursorFreeCache(lsm_db *); - -int lsmSaveCursors(lsm_db *pDb); -int lsmRestoreCursors(lsm_db *pDb); - -void lsmSortedDumpStructure(lsm_db *pDb, Snapshot *, int, int, const char *); -void lsmFsDumpBlocklists(lsm_db *); - -void lsmSortedExpandBtreePage(Page *pPg, int nOrig); - -void lsmPutU32(u8 *, u32); -u32 lsmGetU32(u8 *); -u64 lsmGetU64(u8 *); - -/* -** Functions from "lsm_varint.c". -*/ -int lsmVarintPut32(u8 *, int); -int lsmVarintGet32(u8 *, int *); -int lsmVarintPut64(u8 *aData, i64 iVal); -int lsmVarintGet64(const u8 *aData, i64 *piVal); - -int lsmVarintLen32(int); -int lsmVarintSize(u8 c); - -/* -** Functions from file "main.c". -*/ -void lsmLogMessage(lsm_db *, int, const char *, ...); -int lsmInfoFreelist(lsm_db *pDb, char **pzOut); - -/* -** Functions from file "lsm_log.c". -*/ -int lsmLogBegin(lsm_db *pDb); -int lsmLogWrite(lsm_db *, int, void *, int, void *, int); -int lsmLogCommit(lsm_db *); -void lsmLogEnd(lsm_db *pDb, int bCommit); -void lsmLogTell(lsm_db *, LogMark *); -void lsmLogSeek(lsm_db *, LogMark *); -void lsmLogClose(lsm_db *); - -int lsmLogRecover(lsm_db *); -int lsmInfoLogStructure(lsm_db *pDb, char **pzVal); - -/* Valid values for the second argument to lsmLogWrite(). */ -#define LSM_WRITE 0x06 -#define LSM_DELETE 0x08 -#define LSM_DRANGE 0x0A - -/************************************************************************** -** Functions from file "lsm_shared.c". -*/ - -int lsmDbDatabaseConnect(lsm_db*, const char *); -void lsmDbDatabaseRelease(lsm_db *); - -int lsmBeginReadTrans(lsm_db *); -int lsmBeginWriteTrans(lsm_db *); -int lsmBeginFlush(lsm_db *); - -int lsmDetectRoTrans(lsm_db *db, int *); -int lsmBeginRoTrans(lsm_db *db); - -int lsmBeginWork(lsm_db *); -void lsmFinishWork(lsm_db *, int, int *); - -int lsmFinishRecovery(lsm_db *); -void lsmFinishReadTrans(lsm_db *); -int lsmFinishWriteTrans(lsm_db *, int); -int lsmFinishFlush(lsm_db *, int); - -int lsmSnapshotSetFreelist(lsm_db *, int *, int); - -Snapshot *lsmDbSnapshotClient(lsm_db *); -Snapshot *lsmDbSnapshotWorker(lsm_db *); - -void lsmSnapshotSetCkptid(Snapshot *, i64); - -Level *lsmDbSnapshotLevel(Snapshot *); -void lsmDbSnapshotSetLevel(Snapshot *, Level *); - -void lsmDbRecoveryComplete(lsm_db *, int); - -int lsmBlockAllocate(lsm_db *, int, int *); -int lsmBlockFree(lsm_db *, int); -int lsmBlockRefree(lsm_db *, int); - -void lsmFreelistDeltaBegin(lsm_db *); -void lsmFreelistDeltaEnd(lsm_db *); -int lsmFreelistDelta(lsm_db *pDb); - -DbLog *lsmDatabaseLog(lsm_db *pDb); - -#ifdef LSM_DEBUG - int lsmHoldingClientMutex(lsm_db *pDb); - int lsmShmAssertLock(lsm_db *db, int iLock, int eOp); - int lsmShmAssertWorker(lsm_db *db); -#endif - -void lsmFreeSnapshot(lsm_env *, Snapshot *); - - -/* Candidate values for the 3rd argument to lsmShmLock() */ -#define LSM_LOCK_UNLOCK 0 -#define LSM_LOCK_SHARED 1 -#define LSM_LOCK_EXCL 2 - -int lsmShmCacheChunks(lsm_db *db, int nChunk); -int lsmShmLock(lsm_db *db, int iLock, int eOp, int bBlock); -int lsmShmTestLock(lsm_db *db, int iLock, int nLock, int eOp); -void lsmShmBarrier(lsm_db *db); - -#ifdef LSM_DEBUG -void lsmShmHasLock(lsm_db *db, int iLock, int eOp); -#else -# define lsmShmHasLock(x,y,z) -#endif - -int lsmReadlock(lsm_db *, i64 iLsm, u32 iShmMin, u32 iShmMax); - -int lsmLsmInUse(lsm_db *db, i64 iLsmId, int *pbInUse); -int lsmTreeInUse(lsm_db *db, u32 iLsmId, int *pbInUse); -int lsmFreelistAppend(lsm_env *pEnv, Freelist *p, int iBlk, i64 iId); - -int lsmDbMultiProc(lsm_db *); -void lsmDbDeferredClose(lsm_db *, lsm_file *, LsmFile *); -LsmFile *lsmDbRecycleFd(lsm_db *); - -int lsmWalkFreelist(lsm_db *, int, int (*)(void *, int, i64), void *); - -int lsmCheckCompressionId(lsm_db *, u32); - - -/************************************************************************** -** functions in lsm_str.c -*/ -void lsmStringInit(LsmString*, lsm_env *pEnv); -int lsmStringExtend(LsmString*, int); -int lsmStringAppend(LsmString*, const char *, int); -void lsmStringVAppendf(LsmString*, const char *zFormat, va_list, va_list); -void lsmStringAppendf(LsmString*, const char *zFormat, ...); -void lsmStringClear(LsmString*); -char *lsmMallocPrintf(lsm_env*, const char*, ...); -int lsmStringBinAppend(LsmString *pStr, const u8 *a, int n); - -int lsmStrlen(const char *zName); - - - -/* -** Round up a number to the next larger multiple of 8. This is used -** to force 8-byte alignment on 64-bit architectures. -*/ -#define ROUND8(x) (((x)+7)&~7) - -#define LSM_MIN(x,y) ((x)>(y) ? (y) : (x)) -#define LSM_MAX(x,y) ((x)>(y) ? (x) : (y)) - -#endif diff --git a/ext/lsm1/lsm_ckpt.c b/ext/lsm1/lsm_ckpt.c deleted file mode 100644 index ba92a823cf..0000000000 --- a/ext/lsm1/lsm_ckpt.c +++ /dev/null @@ -1,1239 +0,0 @@ -/* -** 2011-09-11 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** -** This file contains code to read and write checkpoints. -** -** A checkpoint represents the database layout at a single point in time. -** It includes a log offset. When an existing database is opened, the -** current state is determined by reading the newest checkpoint and updating -** it with all committed transactions from the log that follow the specified -** offset. -*/ -#include "lsmInt.h" - -/* -** CHECKPOINT BLOB FORMAT: -** -** A checkpoint blob is a series of unsigned 32-bit integers stored in -** big-endian byte order. As follows: -** -** Checkpoint header (see the CKPT_HDR_XXX #defines): -** -** 1. The checkpoint id MSW. -** 2. The checkpoint id LSW. -** 3. The number of integer values in the entire checkpoint, including -** the two checksum values. -** 4. The compression scheme id. -** 5. The total number of blocks in the database. -** 6. The block size. -** 7. The number of levels. -** 8. The nominal database page size. -** 9. The number of pages (in total) written to the database file. -** -** Log pointer: -** -** 1. The log offset MSW. -** 2. The log offset LSW. -** 3. Log checksum 0. -** 4. Log checksum 1. -** -** Note that the "log offset" is not the literal byte offset. Instead, -** it is the byte offset multiplied by 2, with least significant bit -** toggled each time the log pointer value is changed. This is to make -** sure that this field changes each time the log pointer is updated, -** even if the log file itself is disabled. See lsmTreeMakeOld(). -** -** See ckptExportLog() and ckptImportLog(). -** -** Append points: -** -** 8 integers (4 * 64-bit page numbers). See ckptExportAppendlist(). -** -** For each level in the database, a level record. Formatted as follows: -** -** 0. Age of the level (least significant 16-bits). And flags mask (most -** significant 16-bits). -** 1. The number of right-hand segments (nRight, possibly 0), -** 2. Segment record for left-hand segment (8 integers defined below), -** 3. Segment record for each right-hand segment (8 integers defined below), -** 4. If nRight>0, The number of segments involved in the merge -** 5. if nRight>0, Current nSkip value (see Merge structure defn.), -** 6. For each segment in the merge: -** 5a. Page number of next cell to read during merge (this field -** is 64-bits - 2 integers) -** 5b. Cell number of next cell to read during merge -** 7. Page containing current split-key (64-bits - 2 integers). -** 8. Cell within page containing current split-key. -** 9. Current pointer value (64-bits - 2 integers). -** -** The block redirect array: -** -** 1. Number of redirections (maximum LSM_MAX_BLOCK_REDIRECTS). -** 2. For each redirection: -** a. "from" block number -** b. "to" block number -** -** The in-memory freelist entries. Each entry is either an insert or a -** delete. The in-memory freelist is to the free-block-list as the -** in-memory tree is to the users database content. -** -** 1. Number of free-list entries stored in checkpoint header. -** 2. Number of free blocks (in total). -** 3. Total number of blocks freed during database lifetime. -** 4. For each entry: -** 2a. Block number of free block. -** 2b. A 64-bit integer (MSW followed by LSW). -1 for a delete entry, -** or the associated checkpoint id for an insert. -** -** The checksum: -** -** 1. Checksum value 1. -** 2. Checksum value 2. -** -** In the above, a segment record consists of the following four 64-bit -** fields (converted to 2 * u32 by storing the MSW followed by LSW): -** -** 1. First page of array, -** 2. Last page of array, -** 3. Root page of array (or 0), -** 4. Size of array in pages. -*/ - -/* -** LARGE NUMBERS OF LEVEL RECORDS: -** -** A limit on the number of rhs segments that may be present in the database -** file. Defining this limit ensures that all level records fit within -** the 4096 byte limit for checkpoint blobs. -** -** The number of right-hand-side segments in a database is counted as -** follows: -** -** * For each level in the database not undergoing a merge, add 1. -** -** * For each level in the database that is undergoing a merge, add -** the number of segments on the rhs of the level. -** -** A level record not undergoing a merge is 10 integers. A level record -** with nRhs rhs segments and (nRhs+1) input segments (i.e. including the -** separators from the next level) is (11*nRhs+20) integers. The maximum -** per right-hand-side level is therefore 21 integers. So the maximum -** size of all level records in a checkpoint is 21*40=820 integers. -** -** TODO: Before pointer values were changed from 32 to 64 bits, the above -** used to come to 420 bytes - leaving significant space for a free-list -** prefix. No more. To fix this, reduce the size of the level records in -** a db snapshot, and improve management of the free-list tail in -** lsm_sorted.c. -*/ -#define LSM_MAX_RHS_SEGMENTS 40 - -/* -** LARGE NUMBERS OF FREELIST ENTRIES: -** -** There is also a limit (LSM_MAX_FREELIST_ENTRIES - defined in lsmInt.h) -** on the number of free-list entries stored in a checkpoint. Since each -** free-list entry consists of 3 integers, the maximum free-list size is -** 3*100=300 integers. Combined with the limit on rhs segments defined -** above, this ensures that a checkpoint always fits within a 4096 byte -** meta page. -** -** If the database contains more than 100 free blocks, the "overflow" flag -** in the checkpoint header is set and the remainder are stored in the -** system FREELIST entry in the LSM (along with user data). The value -** accompanying the FREELIST key in the LSM is, like a checkpoint, an array -** of 32-bit big-endian integers. As follows: -** -** For each entry: -** a. Block number of free block. -** b. MSW of associated checkpoint id. -** c. LSW of associated checkpoint id. -** -** The number of entries is not required - it is implied by the size of the -** value blob containing the integer array. -** -** Note that the limit defined by LSM_MAX_FREELIST_ENTRIES is a hard limit. -** The actual value used may be configured using LSM_CONFIG_MAX_FREELIST. -*/ - -/* -** The argument to this macro must be of type u32. On a little-endian -** architecture, it returns the u32 value that results from interpreting -** the 4 bytes as a big-endian value. On a big-endian architecture, it -** returns the value that would be produced by intepreting the 4 bytes -** of the input value as a little-endian integer. -*/ -#define BYTESWAP32(x) ( \ - (((x)&0x000000FF)<<24) + (((x)&0x0000FF00)<<8) \ - + (((x)&0x00FF0000)>>8) + (((x)&0xFF000000)>>24) \ -) - -static const int one = 1; -#define LSM_LITTLE_ENDIAN (*(u8 *)(&one)) - -/* Sizes, in integers, of various parts of the checkpoint. */ -#define CKPT_HDR_SIZE 9 -#define CKPT_LOGPTR_SIZE 4 -#define CKPT_APPENDLIST_SIZE (LSM_APPLIST_SZ * 2) - -/* A #define to describe each integer in the checkpoint header. */ -#define CKPT_HDR_ID_MSW 0 -#define CKPT_HDR_ID_LSW 1 -#define CKPT_HDR_NCKPT 2 -#define CKPT_HDR_CMPID 3 -#define CKPT_HDR_NBLOCK 4 -#define CKPT_HDR_BLKSZ 5 -#define CKPT_HDR_NLEVEL 6 -#define CKPT_HDR_PGSZ 7 -#define CKPT_HDR_NWRITE 8 - -#define CKPT_HDR_LO_MSW 9 -#define CKPT_HDR_LO_LSW 10 -#define CKPT_HDR_LO_CKSUM1 11 -#define CKPT_HDR_LO_CKSUM2 12 - -typedef struct CkptBuffer CkptBuffer; - -/* -** Dynamic buffer used to accumulate data for a checkpoint. -*/ -struct CkptBuffer { - lsm_env *pEnv; - int nAlloc; - u32 *aCkpt; -}; - -/* -** Calculate the checksum of the checkpoint specified by arguments aCkpt and -** nCkpt. Store the checksum in *piCksum1 and *piCksum2 before returning. -** -** The value of the nCkpt parameter includes the two checksum values at -** the end of the checkpoint. They are not used as inputs to the checksum -** calculation. The checksum is based on the array of (nCkpt-2) integers -** at aCkpt[]. -*/ -static void ckptChecksum(u32 *aCkpt, u32 nCkpt, u32 *piCksum1, u32 *piCksum2){ - u32 i; - u32 cksum1 = 1; - u32 cksum2 = 2; - - if( nCkpt % 2 ){ - cksum1 += aCkpt[nCkpt-3] & 0x0000FFFF; - cksum2 += aCkpt[nCkpt-3] & 0xFFFF0000; - } - - for(i=0; (i+3)=p->nAlloc ){ - int nNew = LSM_MAX(8, iIdx*2); - p->aCkpt = (u32 *)lsmReallocOrFree(p->pEnv, p->aCkpt, nNew*sizeof(u32)); - if( !p->aCkpt ){ - *pRc = LSM_NOMEM_BKPT; - return; - } - p->nAlloc = nNew; - } - p->aCkpt[iIdx] = iVal; -} - -/* -** Argument aInt points to an array nInt elements in size. Switch the -** endian-ness of each element of the array. -*/ -static void ckptChangeEndianness(u32 *aInt, int nInt){ - if( LSM_LITTLE_ENDIAN ){ - int i; - for(i=0; iaCkpt, nCkpt+2, &aCksum[0], &aCksum[1]); - ckptSetValue(p, nCkpt, aCksum[0], pRc); - ckptSetValue(p, nCkpt+1, aCksum[1], pRc); - } -} - -static void ckptAppend64(CkptBuffer *p, int *piOut, i64 iVal, int *pRc){ - int iOut = *piOut; - ckptSetValue(p, iOut++, (iVal >> 32) & 0xFFFFFFFF, pRc); - ckptSetValue(p, iOut++, (iVal & 0xFFFFFFFF), pRc); - *piOut = iOut; -} - -static i64 ckptRead64(u32 *a){ - return (((i64)a[0]) << 32) + (i64)a[1]; -} - -static i64 ckptGobble64(u32 *a, int *piIn){ - int iIn = *piIn; - *piIn += 2; - return ckptRead64(&a[iIn]); -} - - -/* -** Append a 6-value segment record corresponding to pSeg to the checkpoint -** buffer passed as the third argument. -*/ -static void ckptExportSegment( - Segment *pSeg, - CkptBuffer *p, - int *piOut, - int *pRc -){ - ckptAppend64(p, piOut, pSeg->iFirst, pRc); - ckptAppend64(p, piOut, pSeg->iLastPg, pRc); - ckptAppend64(p, piOut, pSeg->iRoot, pRc); - ckptAppend64(p, piOut, pSeg->nSize, pRc); -} - -static void ckptExportLevel( - Level *pLevel, /* Level object to serialize */ - CkptBuffer *p, /* Append new level record to this ckpt */ - int *piOut, /* IN/OUT: Size of checkpoint so far */ - int *pRc /* IN/OUT: Error code */ -){ - int iOut = *piOut; - Merge *pMerge; - - pMerge = pLevel->pMerge; - ckptSetValue(p, iOut++, (u32)pLevel->iAge + (u32)(pLevel->flags<<16), pRc); - ckptSetValue(p, iOut++, pLevel->nRight, pRc); - ckptExportSegment(&pLevel->lhs, p, &iOut, pRc); - - assert( (pLevel->nRight>0)==(pMerge!=0) ); - if( pMerge ){ - int i; - for(i=0; inRight; i++){ - ckptExportSegment(&pLevel->aRhs[i], p, &iOut, pRc); - } - assert( pMerge->nInput==pLevel->nRight - || pMerge->nInput==pLevel->nRight+1 - ); - ckptSetValue(p, iOut++, pMerge->nInput, pRc); - ckptSetValue(p, iOut++, pMerge->nSkip, pRc); - for(i=0; inInput; i++){ - ckptAppend64(p, &iOut, pMerge->aInput[i].iPg, pRc); - ckptSetValue(p, iOut++, pMerge->aInput[i].iCell, pRc); - } - ckptAppend64(p, &iOut, pMerge->splitkey.iPg, pRc); - ckptSetValue(p, iOut++, pMerge->splitkey.iCell, pRc); - ckptAppend64(p, &iOut, pMerge->iCurrentPtr, pRc); - } - - *piOut = iOut; -} - -/* -** Populate the log offset fields of the checkpoint buffer. 4 values. -*/ -static void ckptExportLog( - lsm_db *pDb, - int bFlush, - CkptBuffer *p, - int *piOut, - int *pRc -){ - int iOut = *piOut; - - assert( iOut==CKPT_HDR_LO_MSW ); - - if( bFlush ){ - i64 iOff = pDb->treehdr.iOldLog; - ckptAppend64(p, &iOut, iOff, pRc); - ckptSetValue(p, iOut++, pDb->treehdr.oldcksum0, pRc); - ckptSetValue(p, iOut++, pDb->treehdr.oldcksum1, pRc); - }else{ - for(; iOut<=CKPT_HDR_LO_CKSUM2; iOut++){ - ckptSetValue(p, iOut, pDb->pShmhdr->aSnap2[iOut], pRc); - } - } - - assert( *pRc || iOut==CKPT_HDR_LO_CKSUM2+1 ); - *piOut = iOut; -} - -static void ckptExportAppendlist( - lsm_db *db, /* Database connection */ - CkptBuffer *p, /* Checkpoint buffer to write to */ - int *piOut, /* IN/OUT: Offset within checkpoint buffer */ - int *pRc /* IN/OUT: Error code */ -){ - int i; - LsmPgno *aiAppend = db->pWorker->aiAppend; - - for(i=0; ipFS; /* File system object */ - Snapshot *pSnap = pDb->pWorker; /* Worker snapshot */ - int nLevel = 0; /* Number of levels in checkpoint */ - int iLevel; /* Used to count out nLevel levels */ - int iOut = 0; /* Current offset in aCkpt[] */ - Level *pLevel; /* Level iterator */ - int i; /* Iterator used while serializing freelist */ - CkptBuffer ckpt; - - /* Initialize the output buffer */ - memset(&ckpt, 0, sizeof(CkptBuffer)); - ckpt.pEnv = pDb->pEnv; - iOut = CKPT_HDR_SIZE; - - /* Write the log offset into the checkpoint. */ - ckptExportLog(pDb, bLog, &ckpt, &iOut, &rc); - - /* Write the append-point list */ - ckptExportAppendlist(pDb, &ckpt, &iOut, &rc); - - /* Figure out how many levels will be written to the checkpoint. */ - for(pLevel=lsmDbSnapshotLevel(pSnap); pLevel; pLevel=pLevel->pNext) nLevel++; - - /* Serialize nLevel levels. */ - iLevel = 0; - for(pLevel=lsmDbSnapshotLevel(pSnap); iLevelpNext){ - ckptExportLevel(pLevel, &ckpt, &iOut, &rc); - iLevel++; - } - - /* Write the block-redirect list */ - ckptSetValue(&ckpt, iOut++, pSnap->redirect.n, &rc); - for(i=0; iredirect.n; i++){ - ckptSetValue(&ckpt, iOut++, pSnap->redirect.a[i].iFrom, &rc); - ckptSetValue(&ckpt, iOut++, pSnap->redirect.a[i].iTo, &rc); - } - - /* Write the freelist */ - assert( pSnap->freelist.nEntry<=pDb->nMaxFreelist ); - if( rc==LSM_OK ){ - int nFree = pSnap->freelist.nEntry; - ckptSetValue(&ckpt, iOut++, nFree, &rc); - for(i=0; ifreelist.aEntry[i]; - ckptSetValue(&ckpt, iOut++, p->iBlk, &rc); - ckptSetValue(&ckpt, iOut++, (p->iId >> 32) & 0xFFFFFFFF, &rc); - ckptSetValue(&ckpt, iOut++, p->iId & 0xFFFFFFFF, &rc); - } - } - - /* Write the checkpoint header */ - assert( iId>=0 ); - assert( pSnap->iCmpId==pDb->compress.iId - || pSnap->iCmpId==LSM_COMPRESSION_EMPTY - ); - ckptSetValue(&ckpt, CKPT_HDR_ID_MSW, (u32)(iId>>32), &rc); - ckptSetValue(&ckpt, CKPT_HDR_ID_LSW, (u32)(iId&0xFFFFFFFF), &rc); - ckptSetValue(&ckpt, CKPT_HDR_NCKPT, iOut+2, &rc); - ckptSetValue(&ckpt, CKPT_HDR_CMPID, pDb->compress.iId, &rc); - ckptSetValue(&ckpt, CKPT_HDR_NBLOCK, pSnap->nBlock, &rc); - ckptSetValue(&ckpt, CKPT_HDR_BLKSZ, lsmFsBlockSize(pFS), &rc); - ckptSetValue(&ckpt, CKPT_HDR_NLEVEL, nLevel, &rc); - ckptSetValue(&ckpt, CKPT_HDR_PGSZ, lsmFsPageSize(pFS), &rc); - ckptSetValue(&ckpt, CKPT_HDR_NWRITE, pSnap->nWrite, &rc); - - if( bCksum ){ - ckptAddChecksum(&ckpt, iOut, &rc); - }else{ - ckptSetValue(&ckpt, iOut, 0, &rc); - ckptSetValue(&ckpt, iOut+1, 0, &rc); - } - iOut += 2; - assert( iOut<=1024 ); - -#ifdef LSM_LOG_FREELIST - lsmLogMessage(pDb, rc, - "ckptExportSnapshot(): id=%lld freelist: %d", iId, pSnap->freelist.nEntry - ); - for(i=0; ifreelist.nEntry; i++){ - lsmLogMessage(pDb, rc, - "ckptExportSnapshot(): iBlk=%d id=%lld", - pSnap->freelist.aEntry[i].iBlk, - pSnap->freelist.aEntry[i].iId - ); - } -#endif - - *ppCkpt = (void *)ckpt.aCkpt; - if( pnCkpt ) *pnCkpt = sizeof(u32)*iOut; - return rc; -} - - -/* -** Helper function for ckptImport(). -*/ -static void ckptNewSegment( - u32 *aIn, - int *piIn, - Segment *pSegment /* Populate this structure */ -){ - assert( pSegment->iFirst==0 && pSegment->iLastPg==0 ); - assert( pSegment->nSize==0 && pSegment->iRoot==0 ); - pSegment->iFirst = ckptGobble64(aIn, piIn); - pSegment->iLastPg = ckptGobble64(aIn, piIn); - pSegment->iRoot = ckptGobble64(aIn, piIn); - pSegment->nSize = (int)ckptGobble64(aIn, piIn); - assert( pSegment->iFirst ); -} - -static int ckptSetupMerge(lsm_db *pDb, u32 *aInt, int *piIn, Level *pLevel){ - Merge *pMerge; /* Allocated Merge object */ - int nInput; /* Number of input segments in merge */ - int iIn = *piIn; /* Next value to read from aInt[] */ - int i; /* Iterator variable */ - int nByte; /* Number of bytes to allocate */ - - /* Allocate the Merge object. If malloc() fails, return LSM_NOMEM. */ - nInput = (int)aInt[iIn++]; - nByte = sizeof(Merge) + sizeof(MergeInput) * nInput; - pMerge = (Merge *)lsmMallocZero(pDb->pEnv, nByte); - if( !pMerge ) return LSM_NOMEM_BKPT; - pLevel->pMerge = pMerge; - - /* Populate the Merge object. */ - pMerge->aInput = (MergeInput *)&pMerge[1]; - pMerge->nInput = nInput; - pMerge->iOutputOff = -1; - pMerge->nSkip = (int)aInt[iIn++]; - for(i=0; iaInput[i].iPg = ckptGobble64(aInt, &iIn); - pMerge->aInput[i].iCell = (int)aInt[iIn++]; - } - pMerge->splitkey.iPg = ckptGobble64(aInt, &iIn); - pMerge->splitkey.iCell = (int)aInt[iIn++]; - pMerge->iCurrentPtr = ckptGobble64(aInt, &iIn); - - /* Set *piIn and return LSM_OK. */ - *piIn = iIn; - return LSM_OK; -} - - -static int ckptLoadLevels( - lsm_db *pDb, - u32 *aIn, - int *piIn, - int nLevel, - Level **ppLevel -){ - int i; - int rc = LSM_OK; - Level *pRet = 0; - Level **ppNext; - int iIn = *piIn; - - ppNext = &pRet; - for(i=0; rc==LSM_OK && ipEnv, sizeof(Level), &rc); - if( rc==LSM_OK ){ - pLevel->iAge = (u16)(aIn[iIn] & 0x0000FFFF); - pLevel->flags = (u16)((aIn[iIn]>>16) & 0x0000FFFF); - iIn++; - pLevel->nRight = aIn[iIn++]; - if( pLevel->nRight ){ - int nByte = sizeof(Segment) * pLevel->nRight; - pLevel->aRhs = (Segment *)lsmMallocZeroRc(pDb->pEnv, nByte, &rc); - } - if( rc==LSM_OK ){ - *ppNext = pLevel; - ppNext = &pLevel->pNext; - - /* Allocate the main segment */ - ckptNewSegment(aIn, &iIn, &pLevel->lhs); - - /* Allocate each of the right-hand segments, if any */ - for(iRight=0; iRightnRight; iRight++){ - ckptNewSegment(aIn, &iIn, &pLevel->aRhs[iRight]); - } - - /* Set up the Merge object, if required */ - if( pLevel->nRight>0 ){ - rc = ckptSetupMerge(pDb, aIn, &iIn, pLevel); - } - } - } - } - - if( rc!=LSM_OK ){ - /* An OOM must have occurred. Free any level structures allocated and - ** return the error to the caller. */ - lsmSortedFreeLevel(pDb->pEnv, pRet); - pRet = 0; - } - - *ppLevel = pRet; - *piIn = iIn; - return rc; -} - - -int lsmCheckpointLoadLevels(lsm_db *pDb, void *pVal, int nVal){ - int rc = LSM_OK; - if( nVal>0 ){ - u32 *aIn; - - aIn = lsmMallocRc(pDb->pEnv, nVal, &rc); - if( aIn ){ - Level *pLevel = 0; - Level *pParent; - - int nIn; - int nLevel; - int iIn = 1; - memcpy(aIn, pVal, nVal); - nIn = nVal / sizeof(u32); - - ckptChangeEndianness(aIn, nIn); - nLevel = aIn[0]; - rc = ckptLoadLevels(pDb, aIn, &iIn, nLevel, &pLevel); - lsmFree(pDb->pEnv, aIn); - assert( rc==LSM_OK || pLevel==0 ); - if( rc==LSM_OK ){ - pParent = lsmDbSnapshotLevel(pDb->pWorker); - assert( pParent ); - while( pParent->pNext ) pParent = pParent->pNext; - pParent->pNext = pLevel; - } - } - } - - return rc; -} - -/* -** Return the data for the LEVELS record. -** -** The size of the checkpoint that can be stored in the database header -** must not exceed 1024 32-bit integers. Normally, it does not. However, -** if it does, part of the checkpoint must be stored in the LSM. This -** routine returns that part. -*/ -int lsmCheckpointLevels( - lsm_db *pDb, /* Database handle */ - int nLevel, /* Number of levels to write to blob */ - void **paVal, /* OUT: Pointer to LEVELS blob */ - int *pnVal /* OUT: Size of LEVELS blob in bytes */ -){ - Level *p; /* Used to iterate through levels */ - int nAll= 0; - int rc; - int i; - int iOut; - CkptBuffer ckpt; - assert( nLevel>0 ); - - for(p=lsmDbSnapshotLevel(pDb->pWorker); p; p=p->pNext) nAll++; - - assert( nAll>nLevel ); - nAll -= nLevel; - for(p=lsmDbSnapshotLevel(pDb->pWorker); p && nAll>0; p=p->pNext) nAll--; - - memset(&ckpt, 0, sizeof(CkptBuffer)); - ckpt.pEnv = pDb->pEnv; - - ckptSetValue(&ckpt, 0, nLevel, &rc); - iOut = 1; - for(i=0; rc==LSM_OK && ipNext; - } - assert( rc!=LSM_OK || p==0 ); - - if( rc==LSM_OK ){ - ckptChangeEndianness(ckpt.aCkpt, iOut); - *paVal = (void *)ckpt.aCkpt; - *pnVal = iOut * sizeof(u32); - }else{ - *pnVal = 0; - *paVal = 0; - } - - return rc; -} - -/* -** Read the checkpoint id from meta-page pPg. -*/ -static i64 ckptLoadId(MetaPage *pPg){ - i64 ret = 0; - if( pPg ){ - int nData; - u8 *aData = lsmFsMetaPageData(pPg, &nData); - ret = (((i64)lsmGetU32(&aData[CKPT_HDR_ID_MSW*4])) << 32) + - ((i64)lsmGetU32(&aData[CKPT_HDR_ID_LSW*4])); - } - return ret; -} - -/* -** Return true if the buffer passed as an argument contains a valid -** checkpoint. -*/ -static int ckptChecksumOk(u32 *aCkpt){ - u32 nCkpt = aCkpt[CKPT_HDR_NCKPT]; - u32 cksum1; - u32 cksum2; - - if( nCkpt(LSM_META_RW_PAGE_SIZE)/sizeof(u32) ){ - return 0; - } - ckptChecksum(aCkpt, nCkpt, &cksum1, &cksum2); - return (cksum1==aCkpt[nCkpt-2] && cksum2==aCkpt[nCkpt-1]); -} - -/* -** Attempt to load a checkpoint from meta page iMeta. -** -** This function is a no-op if *pRc is set to any value other than LSM_OK -** when it is called. If an error occurs, *pRc is set to an LSM error code -** before returning. -** -** If no error occurs and the checkpoint is successfully loaded, copy it to -** ShmHeader.aSnap1[] and ShmHeader.aSnap2[], and set ShmHeader.iMetaPage -** to indicate its origin. In this case return 1. Or, if the checkpoint -** cannot be loaded (because the checksum does not compute), return 0. -*/ -static int ckptTryLoad(lsm_db *pDb, MetaPage *pPg, u32 iMeta, int *pRc){ - int bLoaded = 0; /* Return value */ - if( *pRc==LSM_OK ){ - int rc = LSM_OK; /* Error code */ - u32 *aCkpt = 0; /* Pointer to buffer containing checkpoint */ - u32 nCkpt; /* Number of elements in aCkpt[] */ - int nData; /* Bytes of data in aData[] */ - u8 *aData; /* Meta page data */ - - aData = lsmFsMetaPageData(pPg, &nData); - nCkpt = (u32)lsmGetU32(&aData[CKPT_HDR_NCKPT*sizeof(u32)]); - if( nCkpt<=nData/sizeof(u32) && nCkpt>CKPT_HDR_NCKPT ){ - aCkpt = (u32 *)lsmMallocRc(pDb->pEnv, nCkpt*sizeof(u32), &rc); - } - if( aCkpt ){ - memcpy(aCkpt, aData, nCkpt*sizeof(u32)); - ckptChangeEndianness(aCkpt, nCkpt); - if( ckptChecksumOk(aCkpt) ){ - ShmHeader *pShm = pDb->pShmhdr; - memcpy(pShm->aSnap1, aCkpt, nCkpt*sizeof(u32)); - memcpy(pShm->aSnap2, aCkpt, nCkpt*sizeof(u32)); - memcpy(pDb->aSnapshot, aCkpt, nCkpt*sizeof(u32)); - pShm->iMetaPage = iMeta; - bLoaded = 1; - } - } - - lsmFree(pDb->pEnv, aCkpt); - *pRc = rc; - } - return bLoaded; -} - -/* -** Initialize the shared-memory header with an empty snapshot. This function -** is called when no valid snapshot can be found in the database header. -*/ -static void ckptLoadEmpty(lsm_db *pDb){ - u32 aCkpt[] = { - 0, /* CKPT_HDR_ID_MSW */ - 10, /* CKPT_HDR_ID_LSW */ - 0, /* CKPT_HDR_NCKPT */ - LSM_COMPRESSION_EMPTY, /* CKPT_HDR_CMPID */ - 0, /* CKPT_HDR_NBLOCK */ - 0, /* CKPT_HDR_BLKSZ */ - 0, /* CKPT_HDR_NLEVEL */ - 0, /* CKPT_HDR_PGSZ */ - 0, /* CKPT_HDR_NWRITE */ - 0, 0, 1234, 5678, /* The log pointer and initial checksum */ - 0,0,0,0, 0,0,0,0, /* The append list */ - 0, /* The redirected block list */ - 0, /* The free block list */ - 0, 0 /* Space for checksum values */ - }; - u32 nCkpt = array_size(aCkpt); - ShmHeader *pShm = pDb->pShmhdr; - - aCkpt[CKPT_HDR_NCKPT] = nCkpt; - aCkpt[CKPT_HDR_BLKSZ] = pDb->nDfltBlksz; - aCkpt[CKPT_HDR_PGSZ] = pDb->nDfltPgsz; - ckptChecksum(aCkpt, array_size(aCkpt), &aCkpt[nCkpt-2], &aCkpt[nCkpt-1]); - - memcpy(pShm->aSnap1, aCkpt, nCkpt*sizeof(u32)); - memcpy(pShm->aSnap2, aCkpt, nCkpt*sizeof(u32)); - memcpy(pDb->aSnapshot, aCkpt, nCkpt*sizeof(u32)); -} - -/* -** This function is called as part of database recovery to initialize the -** ShmHeader.aSnap1[] and ShmHeader.aSnap2[] snapshots. -*/ -int lsmCheckpointRecover(lsm_db *pDb){ - int rc = LSM_OK; /* Return Code */ - i64 iId1; /* Id of checkpoint on meta-page 1 */ - i64 iId2; /* Id of checkpoint on meta-page 2 */ - int bLoaded = 0; /* True once checkpoint has been loaded */ - int cmp; /* True if (iId2>iId1) */ - MetaPage *apPg[2] = {0, 0}; /* Meta-pages 1 and 2 */ - - rc = lsmFsMetaPageGet(pDb->pFS, 0, 1, &apPg[0]); - if( rc==LSM_OK ) rc = lsmFsMetaPageGet(pDb->pFS, 0, 2, &apPg[1]); - - iId1 = ckptLoadId(apPg[0]); - iId2 = ckptLoadId(apPg[1]); - cmp = (iId2 > iId1); - bLoaded = ckptTryLoad(pDb, apPg[cmp?1:0], (cmp?2:1), &rc); - if( bLoaded==0 ){ - bLoaded = ckptTryLoad(pDb, apPg[cmp?0:1], (cmp?1:2), &rc); - } - - /* The database does not contain a valid checkpoint. Initialize the shared - ** memory header with an empty checkpoint. */ - if( bLoaded==0 ){ - ckptLoadEmpty(pDb); - } - - lsmFsMetaPageRelease(apPg[0]); - lsmFsMetaPageRelease(apPg[1]); - - return rc; -} - -/* -** Store the snapshot in pDb->aSnapshot[] in meta-page iMeta. -*/ -int lsmCheckpointStore(lsm_db *pDb, int iMeta){ - MetaPage *pPg = 0; - int rc; - - assert( iMeta==1 || iMeta==2 ); - rc = lsmFsMetaPageGet(pDb->pFS, 1, iMeta, &pPg); - if( rc==LSM_OK ){ - u8 *aData; - int nData; - int nCkpt; - - nCkpt = (int)pDb->aSnapshot[CKPT_HDR_NCKPT]; - aData = lsmFsMetaPageData(pPg, &nData); - memcpy(aData, pDb->aSnapshot, nCkpt*sizeof(u32)); - ckptChangeEndianness((u32 *)aData, nCkpt); - rc = lsmFsMetaPageRelease(pPg); - } - - return rc; -} - -/* -** Copy the current client snapshot from shared-memory to pDb->aSnapshot[]. -*/ -int lsmCheckpointLoad(lsm_db *pDb, int *piRead){ - int nRem = LSM_ATTEMPTS_BEFORE_PROTOCOL; - ShmHeader *pShm = pDb->pShmhdr; - while( (nRem--)>0 ){ - int nInt; - - nInt = pShm->aSnap1[CKPT_HDR_NCKPT]; - if( nInt<=(LSM_META_RW_PAGE_SIZE / sizeof(u32)) ){ - memcpy(pDb->aSnapshot, pShm->aSnap1, nInt*sizeof(u32)); - if( ckptChecksumOk(pDb->aSnapshot) ){ - if( piRead ) *piRead = 1; - return LSM_OK; - } - } - - nInt = pShm->aSnap2[CKPT_HDR_NCKPT]; - if( nInt<=(LSM_META_RW_PAGE_SIZE / sizeof(u32)) ){ - memcpy(pDb->aSnapshot, pShm->aSnap2, nInt*sizeof(u32)); - if( ckptChecksumOk(pDb->aSnapshot) ){ - if( piRead ) *piRead = 2; - return LSM_OK; - } - } - - lsmShmBarrier(pDb); - } - return LSM_PROTOCOL_BKPT; -} - -int lsmInfoCompressionId(lsm_db *db, u32 *piCmpId){ - int rc; - - assert( db->pClient==0 && db->pWorker==0 ); - rc = lsmCheckpointLoad(db, 0); - if( rc==LSM_OK ){ - *piCmpId = db->aSnapshot[CKPT_HDR_CMPID]; - } - - return rc; -} - -int lsmCheckpointLoadOk(lsm_db *pDb, int iSnap){ - u32 *aShm; - assert( iSnap==1 || iSnap==2 ); - aShm = (iSnap==1) ? pDb->pShmhdr->aSnap1 : pDb->pShmhdr->aSnap2; - return (lsmCheckpointId(pDb->aSnapshot, 0)==lsmCheckpointId(aShm, 0) ); -} - -int lsmCheckpointClientCacheOk(lsm_db *pDb){ - return ( pDb->pClient - && pDb->pClient->iId==lsmCheckpointId(pDb->aSnapshot, 0) - && pDb->pClient->iId==lsmCheckpointId(pDb->pShmhdr->aSnap1, 0) - && pDb->pClient->iId==lsmCheckpointId(pDb->pShmhdr->aSnap2, 0) - ); -} - -int lsmCheckpointLoadWorker(lsm_db *pDb){ - int rc; - ShmHeader *pShm = pDb->pShmhdr; - int nInt1; - int nInt2; - - /* Must be holding the WORKER lock to do this. Or DMS2. */ - assert( - lsmShmAssertLock(pDb, LSM_LOCK_WORKER, LSM_LOCK_EXCL) - || lsmShmAssertLock(pDb, LSM_LOCK_DMS1, LSM_LOCK_EXCL) - ); - - /* Check that the two snapshots match. If not, repair them. */ - nInt1 = pShm->aSnap1[CKPT_HDR_NCKPT]; - nInt2 = pShm->aSnap2[CKPT_HDR_NCKPT]; - if( nInt1!=nInt2 || memcmp(pShm->aSnap1, pShm->aSnap2, nInt2*sizeof(u32)) ){ - if( ckptChecksumOk(pShm->aSnap1) ){ - memcpy(pShm->aSnap2, pShm->aSnap1, sizeof(u32)*nInt1); - }else if( ckptChecksumOk(pShm->aSnap2) ){ - memcpy(pShm->aSnap1, pShm->aSnap2, sizeof(u32)*nInt2); - }else{ - return LSM_PROTOCOL_BKPT; - } - } - - rc = lsmCheckpointDeserialize(pDb, 1, pShm->aSnap1, &pDb->pWorker); - if( pDb->pWorker ) pDb->pWorker->pDatabase = pDb->pDatabase; - - if( rc==LSM_OK ){ - rc = lsmCheckCompressionId(pDb, pDb->pWorker->iCmpId); - } - -#if 0 - assert( rc!=LSM_OK || lsmFsIntegrityCheck(pDb) ); -#endif - return rc; -} - -int lsmCheckpointDeserialize( - lsm_db *pDb, - int bInclFreelist, /* If true, deserialize free-list */ - u32 *aCkpt, - Snapshot **ppSnap -){ - int rc = LSM_OK; - Snapshot *pNew; - - pNew = (Snapshot *)lsmMallocZeroRc(pDb->pEnv, sizeof(Snapshot), &rc); - if( rc==LSM_OK ){ - Level *pLvl; - int nFree; - int i; - int nLevel = (int)aCkpt[CKPT_HDR_NLEVEL]; - int iIn = CKPT_HDR_SIZE + CKPT_APPENDLIST_SIZE + CKPT_LOGPTR_SIZE; - - pNew->iId = lsmCheckpointId(aCkpt, 0); - pNew->nBlock = aCkpt[CKPT_HDR_NBLOCK]; - pNew->nWrite = aCkpt[CKPT_HDR_NWRITE]; - rc = ckptLoadLevels(pDb, aCkpt, &iIn, nLevel, &pNew->pLevel); - pNew->iLogOff = lsmCheckpointLogOffset(aCkpt); - pNew->iCmpId = aCkpt[CKPT_HDR_CMPID]; - - /* Make a copy of the append-list */ - for(i=0; iaiAppend[i] = ckptRead64(a); - } - - /* Read the block-redirect list */ - pNew->redirect.n = aCkpt[iIn++]; - if( pNew->redirect.n ){ - pNew->redirect.a = lsmMallocZeroRc(pDb->pEnv, - (sizeof(struct RedirectEntry) * LSM_MAX_BLOCK_REDIRECTS), &rc - ); - if( rc==LSM_OK ){ - for(i=0; iredirect.n; i++){ - pNew->redirect.a[i].iFrom = aCkpt[iIn++]; - pNew->redirect.a[i].iTo = aCkpt[iIn++]; - } - } - for(pLvl=pNew->pLevel; pLvl->pNext; pLvl=pLvl->pNext); - if( pLvl->nRight ){ - pLvl->aRhs[pLvl->nRight-1].pRedirect = &pNew->redirect; - }else{ - pLvl->lhs.pRedirect = &pNew->redirect; - } - } - - /* Copy the free-list */ - if( rc==LSM_OK && bInclFreelist ){ - nFree = aCkpt[iIn++]; - if( nFree ){ - pNew->freelist.aEntry = (FreelistEntry *)lsmMallocZeroRc( - pDb->pEnv, sizeof(FreelistEntry)*nFree, &rc - ); - if( rc==LSM_OK ){ - int j; - for(j=0; jfreelist.aEntry[j]; - p->iBlk = aCkpt[iIn++]; - p->iId = ((i64)(aCkpt[iIn])<<32) + aCkpt[iIn+1]; - iIn += 2; - } - pNew->freelist.nEntry = pNew->freelist.nAlloc = nFree; - } - } - } - } - - if( rc!=LSM_OK ){ - lsmFreeSnapshot(pDb->pEnv, pNew); - pNew = 0; - } - - *ppSnap = pNew; - return rc; -} - -/* -** Connection pDb must be the worker connection in order to call this -** function. It returns true if the database already contains the maximum -** number of levels or false otherwise. -** -** This is used when flushing the in-memory tree to disk. If the database -** is already full, then the caller should invoke lsm_work() or similar -** until it is not full before creating a new level by flushing the in-memory -** tree to disk. Limiting the number of levels in the database ensures that -** the records describing them always fit within the checkpoint blob. -*/ -int lsmDatabaseFull(lsm_db *pDb){ - Level *p; - int nRhs = 0; - - assert( lsmShmAssertLock(pDb, LSM_LOCK_WORKER, LSM_LOCK_EXCL) ); - assert( pDb->pWorker ); - - for(p=pDb->pWorker->pLevel; p; p=p->pNext){ - nRhs += (p->nRight ? p->nRight : 1); - } - - return (nRhs >= LSM_MAX_RHS_SEGMENTS); -} - -/* -** The connection passed as the only argument is currently the worker -** connection. Some work has been performed on the database by the connection, -** but no new snapshot has been written into shared memory. -** -** This function updates the shared-memory worker and client snapshots with -** the new snapshot produced by the work performed by pDb. -** -** If successful, LSM_OK is returned. Otherwise, if an error occurs, an LSM -** error code is returned. -*/ -int lsmCheckpointSaveWorker(lsm_db *pDb, int bFlush){ - Snapshot *pSnap = pDb->pWorker; - ShmHeader *pShm = pDb->pShmhdr; - void *p = 0; - int n = 0; - int rc; - - pSnap->iId++; - rc = ckptExportSnapshot(pDb, bFlush, pSnap->iId, 1, &p, &n); - if( rc!=LSM_OK ) return rc; - assert( ckptChecksumOk((u32 *)p) ); - - assert( n<=LSM_META_RW_PAGE_SIZE ); - memcpy(pShm->aSnap2, p, n); - lsmShmBarrier(pDb); - memcpy(pShm->aSnap1, p, n); - lsmFree(pDb->pEnv, p); - - /* assert( lsmFsIntegrityCheck(pDb) ); */ - return LSM_OK; -} - -/* -** This function is used to determine the snapshot-id of the most recently -** checkpointed snapshot. Variable ShmHeader.iMetaPage indicates which of -** the two meta-pages said snapshot resides on (if any). -** -** If successful, this function loads the snapshot from the meta-page, -** verifies its checksum and sets *piId to the snapshot-id before returning -** LSM_OK. Or, if the checksum attempt fails, *piId is set to zero and -** LSM_OK returned. If an error occurs, an LSM error code is returned and -** the final value of *piId is undefined. -*/ -int lsmCheckpointSynced(lsm_db *pDb, i64 *piId, i64 *piLog, u32 *pnWrite){ - int rc = LSM_OK; - MetaPage *pPg; - u32 iMeta; - - iMeta = pDb->pShmhdr->iMetaPage; - if( iMeta==1 || iMeta==2 ){ - rc = lsmFsMetaPageGet(pDb->pFS, 0, iMeta, &pPg); - if( rc==LSM_OK ){ - int nCkpt; - int nData; - u8 *aData; - - aData = lsmFsMetaPageData(pPg, &nData); - assert( nData==LSM_META_RW_PAGE_SIZE ); - nCkpt = lsmGetU32(&aData[CKPT_HDR_NCKPT*sizeof(u32)]); - if( nCkpt<(LSM_META_RW_PAGE_SIZE/sizeof(u32)) ){ - u32 *aCopy = lsmMallocRc(pDb->pEnv, sizeof(u32) * nCkpt, &rc); - if( aCopy ){ - memcpy(aCopy, aData, nCkpt*sizeof(u32)); - ckptChangeEndianness(aCopy, nCkpt); - if( ckptChecksumOk(aCopy) ){ - if( piId ) *piId = lsmCheckpointId(aCopy, 0); - if( piLog ) *piLog = (lsmCheckpointLogOffset(aCopy) >> 1); - if( pnWrite ) *pnWrite = aCopy[CKPT_HDR_NWRITE]; - } - lsmFree(pDb->pEnv, aCopy); - } - } - lsmFsMetaPageRelease(pPg); - } - } - - if( (iMeta!=1 && iMeta!=2) || rc!=LSM_OK || pDb->pShmhdr->iMetaPage!=iMeta ){ - if( piId ) *piId = 0; - if( piLog ) *piLog = 0; - if( pnWrite ) *pnWrite = 0; - } - return rc; -} - -/* -** Return the checkpoint-id of the checkpoint array passed as the first -** argument to this function. If the second argument is true, then assume -** that the checkpoint is made up of 32-bit big-endian integers. If it -** is false, assume that the integers are in machine byte order. -*/ -i64 lsmCheckpointId(u32 *aCkpt, int bDisk){ - i64 iId; - if( bDisk ){ - u8 *aData = (u8 *)aCkpt; - iId = (((i64)lsmGetU32(&aData[CKPT_HDR_ID_MSW*4])) << 32); - iId += ((i64)lsmGetU32(&aData[CKPT_HDR_ID_LSW*4])); - }else{ - iId = ((i64)aCkpt[CKPT_HDR_ID_MSW] << 32) + (i64)aCkpt[CKPT_HDR_ID_LSW]; - } - return iId; -} - -u32 lsmCheckpointNBlock(u32 *aCkpt){ - return aCkpt[CKPT_HDR_NBLOCK]; -} - -u32 lsmCheckpointNWrite(u32 *aCkpt, int bDisk){ - if( bDisk ){ - return lsmGetU32((u8 *)&aCkpt[CKPT_HDR_NWRITE]); - }else{ - return aCkpt[CKPT_HDR_NWRITE]; - } -} - -i64 lsmCheckpointLogOffset(u32 *aCkpt){ - return ((i64)aCkpt[CKPT_HDR_LO_MSW] << 32) + (i64)aCkpt[CKPT_HDR_LO_LSW]; -} - -int lsmCheckpointPgsz(u32 *aCkpt){ return (int)aCkpt[CKPT_HDR_PGSZ]; } - -int lsmCheckpointBlksz(u32 *aCkpt){ return (int)aCkpt[CKPT_HDR_BLKSZ]; } - -void lsmCheckpointLogoffset( - u32 *aCkpt, - DbLog *pLog -){ - pLog->aRegion[2].iStart = (lsmCheckpointLogOffset(aCkpt) >> 1); - - pLog->cksum0 = aCkpt[CKPT_HDR_LO_CKSUM1]; - pLog->cksum1 = aCkpt[CKPT_HDR_LO_CKSUM2]; - pLog->iSnapshotId = lsmCheckpointId(aCkpt, 0); -} - -void lsmCheckpointZeroLogoffset(lsm_db *pDb){ - u32 nCkpt; - - nCkpt = pDb->aSnapshot[CKPT_HDR_NCKPT]; - assert( nCkpt>CKPT_HDR_NCKPT ); - assert( nCkpt==pDb->pShmhdr->aSnap1[CKPT_HDR_NCKPT] ); - assert( 0==memcmp(pDb->aSnapshot, pDb->pShmhdr->aSnap1, nCkpt*sizeof(u32)) ); - assert( 0==memcmp(pDb->aSnapshot, pDb->pShmhdr->aSnap2, nCkpt*sizeof(u32)) ); - - pDb->aSnapshot[CKPT_HDR_LO_MSW] = 0; - pDb->aSnapshot[CKPT_HDR_LO_LSW] = 0; - ckptChecksum(pDb->aSnapshot, nCkpt, - &pDb->aSnapshot[nCkpt-2], &pDb->aSnapshot[nCkpt-1] - ); - - memcpy(pDb->pShmhdr->aSnap1, pDb->aSnapshot, nCkpt*sizeof(u32)); - memcpy(pDb->pShmhdr->aSnap2, pDb->aSnapshot, nCkpt*sizeof(u32)); -} - -/* -** Set the output variable to the number of KB of data written into the -** database file since the most recent checkpoint. -*/ -int lsmCheckpointSize(lsm_db *db, int *pnKB){ - int rc = LSM_OK; - u32 nSynced; - - /* Set nSynced to the number of pages that had been written when the - ** database was last checkpointed. */ - rc = lsmCheckpointSynced(db, 0, 0, &nSynced); - - if( rc==LSM_OK ){ - u32 nPgsz = db->pShmhdr->aSnap1[CKPT_HDR_PGSZ]; - u32 nWrite = db->pShmhdr->aSnap1[CKPT_HDR_NWRITE]; - *pnKB = (int)(( ((i64)(nWrite - nSynced) * nPgsz) + 1023) / 1024); - } - - return rc; -} diff --git a/ext/lsm1/lsm_file.c b/ext/lsm1/lsm_file.c deleted file mode 100644 index 1dcdd05d99..0000000000 --- a/ext/lsm1/lsm_file.c +++ /dev/null @@ -1,3312 +0,0 @@ -/* -** 2011-08-26 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** -** NORMAL DATABASE FILE FORMAT -** -** The following database file format concepts are used by the code in -** this file to read and write the database file. -** -** Pages: -** -** A database file is divided into pages. The first 8KB of the file consists -** of two 4KB meta-pages. The meta-page size is not configurable. The -** remainder of the file is made up of database pages. The default database -** page size is 4KB. Database pages are aligned to page-size boundaries, -** so if the database page size is larger than 8KB there is a gap between -** the end of the meta pages and the start of the database pages. -** -** Database pages are numbered based on their position in the file. Page N -** begins at byte offset ((N-1)*pgsz). This means that page 1 does not -** exist - since it would always overlap with the meta pages. If the -** page-size is (say) 512 bytes, then the first usable page in the database -** is page 33. -** -** It is assumed that the first two meta pages and the data that follows -** them are located on different disk sectors. So that if a power failure -** while writing to a meta page there is no risk of damage to the other -** meta page or any other part of the database file. TODO: This may need -** to be revisited. -** -** Blocks: -** -** The database file is also divided into blocks. The default block size is -** 1MB. When writing to the database file, an attempt is made to write data -** in contiguous block-sized chunks. -** -** The first and last page on each block are special in that they are 4 -** bytes smaller than all other pages. This is because the last four bytes -** of space on the first and last pages of each block are reserved for -** pointers to other blocks (i.e. a 32-bit block number). -** -** Runs: -** -** A run is a sequence of pages that the upper layer uses to store a -** sorted array of database keys (and accompanying data - values, FC -** pointers and so on). Given a page within a run, it is possible to -** navigate to the next page in the run as follows: -** -** a) if the current page is not the last in a block, the next page -** in the run is located immediately after the current page, OR -** -** b) if the current page is the last page in a block, the next page -** in the run is the first page on the block identified by the -** block pointer stored in the last 4 bytes of the current block. -** -** It is possible to navigate to the previous page in a similar fashion, -** using the block pointer embedded in the last 4 bytes of the first page -** of each block as required. -** -** The upper layer is responsible for identifying by page number the -** first and last page of any run that it needs to navigate - there are -** no "end-of-run" markers stored or identified by this layer. This is -** necessary as clients reading different database snapshots may access -** different subsets of a run. -** -** THE LOG FILE -** -** This file opens and closes the log file. But it does not contain any -** logic related to the log file format. Instead, it exports the following -** functions that are used by the code in lsm_log.c to read and write the -** log file: -** -** lsmFsOpenLog -** lsmFsWriteLog -** lsmFsSyncLog -** lsmFsReadLog -** lsmFsTruncateLog -** lsmFsCloseAndDeleteLog -** -** COMPRESSED DATABASE FILE FORMAT -** -** The compressed database file format is very similar to the normal format. -** The file still begins with two 4KB meta-pages (which are never compressed). -** It is still divided into blocks. -** -** The first and last four bytes of each block are reserved for 32-bit -** pointer values. Similar to the way four bytes are carved from the end of -** the first and last page of each block in uncompressed databases. From -** the point of view of the upper layer, all pages are the same size - this -** is different from the uncompressed format where the first and last pages -** on each block are 4 bytes smaller than the others. -** -** Pages are stored in variable length compressed form, as follows: -** -** * 3-byte size field containing the size of the compressed page image -** in bytes. The most significant bit of each byte of the size field -** is always set. The remaining 7 bits are used to store a 21-bit -** integer value (in big-endian order - the first byte in the field -** contains the most significant 7 bits). Since the maximum allowed -** size of a compressed page image is (2^17 - 1) bytes, there are -** actually 4 unused bits in the size field. -** -** In other words, if the size of the compressed page image is nSz, -** the header can be serialized as follows: -** -** u8 aHdr[3] -** aHdr[0] = 0x80 | (u8)(nSz >> 14); -** aHdr[1] = 0x80 | (u8)(nSz >> 7); -** aHdr[2] = 0x80 | (u8)(nSz >> 0); -** -** * Compressed page image. -** -** * A second copy of the 3-byte record header. -** -** A page number is a byte offset into the database file. So the smallest -** possible page number is 8192 (immediately after the two meta-pages). -** The first and root page of a segment are identified by a page number -** corresponding to the byte offset of the first byte in the corresponding -** page record. The last page of a segment is identified by the byte offset -** of the last byte in its record. -** -** Unlike uncompressed pages, compressed page records may span blocks. -** -** Sometimes, in order to avoid touching sectors that contain synced data -** when writing, it is necessary to insert unused space between compressed -** page records. This can be done as follows: -** -** * For less than 6 bytes of empty space, the first and last byte -** of the free space contain the total number of free bytes. For -** example: -** -** Block of 4 free bytes: 0x04 0x?? 0x?? 0x04 -** Block of 2 free bytes: 0x02 0x02 -** A single free byte: 0x01 -** -** * For 6 or more bytes of empty space, a record similar to a -** compressed page record is added to the segment. A padding record -** is distinguished from a compressed page record by the most -** significant bit of the second byte of the size field, which is -** cleared instead of set. -*/ -#include "lsmInt.h" - -#include -#include -#include - -/* -** File-system object. Each database connection allocates a single instance -** of the following structure. It is used for all access to the database and -** log files. -** -** The database file may be accessed via two methods - using mmap() or using -** read() and write() calls. In the general case both methods are used - a -** prefix of the file is mapped into memory and the remainder accessed using -** read() and write(). This is helpful when accessing very large files (or -** files that may grow very large during the lifetime of a database -** connection) on systems with 32-bit address spaces. However, it also requires -** that this object manage two distinct types of Page objects simultaneously - -** those that carry pointers to the mapped file and those that carry arrays -** populated by read() calls. -** -** pFree: -** The head of a singly-linked list that containing currently unused Page -** structures suitable for use as mmap-page handles. Connected by the -** Page.pFreeNext pointers. -** -** pMapped: -** The head of a singly-linked list that contains all pages that currently -** carry pointers to the mapped region. This is used if the region is -** every remapped - the pointers carried by existing pages can be adjusted -** to account for the remapping. Connected by the Page.pMappedNext pointers. -** -** pWaiting: -** When the upper layer wishes to append a new b-tree page to a segment, -** it allocates a Page object that carries a malloc'd block of memory - -** regardless of the mmap-related configuration. The page is not assigned -** a page number at first. When the upper layer has finished constructing -** the page contents, it calls lsmFsPagePersist() to assign a page number -** to it. At this point it is likely that N pages have been written to the -** segment, the (N+1)th page is still outstanding and the b-tree page is -** assigned page number (N+2). To avoid writing page (N+2) before page -** (N+1), the recently completed b-tree page is held in the singly linked -** list headed by pWaiting until page (N+1) has been written. -** -** Function lsmFsFlushWaiting() is responsible for eventually writing -** waiting pages to disk. -** -** apHash/nHash: -** Hash table used to store all Page objects that carry malloc'd arrays, -** except those b-tree pages that have not yet been assigned page numbers. -** Once they have been assigned page numbers - they are added to this -** hash table. -** -** Hash table overflow chains are connected using the Page.pHashNext -** pointers. -** -** pLruFirst, pLruLast: -** The first and last entries in a doubly-linked list of pages. This -** list contains all pages with malloc'd data that are present in the -** hash table and have a ref-count of zero. -*/ -struct FileSystem { - lsm_db *pDb; /* Database handle that owns this object */ - lsm_env *pEnv; /* Environment pointer */ - char *zDb; /* Database file name */ - char *zLog; /* Database file name */ - int nMetasize; /* Size of meta pages in bytes */ - int nMetaRwSize; /* Read/written size of meta pages in bytes */ - int nPagesize; /* Database page-size in bytes */ - int nBlocksize; /* Database block-size in bytes */ - - /* r/w file descriptors for both files. */ - LsmFile *pLsmFile; /* Used after lsm_close() to link into list */ - lsm_file *fdDb; /* Database file */ - lsm_file *fdLog; /* Log file */ - int szSector; /* Database file sector size */ - - /* If this is a compressed database, a pointer to the compression methods. - ** For an uncompressed database, a NULL pointer. */ - lsm_compress *pCompress; - u8 *aIBuffer; /* Buffer to compress to */ - u8 *aOBuffer; /* Buffer to uncompress from */ - int nBuffer; /* Allocated size of above buffers in bytes */ - - /* mmap() page related things */ - i64 nMapLimit; /* Maximum bytes of file to map */ - void *pMap; /* Current mapping of database file */ - i64 nMap; /* Bytes mapped at pMap */ - Page *pFree; /* Unused Page structures */ - Page *pMapped; /* List of Page structs that point to pMap */ - - /* Page cache parameters for non-mmap() pages */ - int nCacheMax; /* Configured cache size (in pages) */ - int nCacheAlloc; /* Current cache size (in pages) */ - Page *pLruFirst; /* Head of the LRU list */ - Page *pLruLast; /* Tail of the LRU list */ - int nHash; /* Number of hash slots in hash table */ - Page **apHash; /* nHash Hash slots */ - Page *pWaiting; /* b-tree pages waiting to be written */ - - /* Statistics */ - int nOut; /* Number of outstanding pages */ - int nWrite; /* Total number of pages written */ - int nRead; /* Total number of pages read */ -}; - -/* -** Database page handle. -** -** pSeg: -** When lsmFsSortedAppend() is called on a compressed database, the new -** page is not assigned a page number or location in the database file -** immediately. Instead, these are assigned by the lsmFsPagePersist() call -** right before it writes the compressed page image to disk. -** -** The lsmFsSortedAppend() function sets the pSeg pointer to point to the -** segment that the new page will be a part of. It is unset by -** lsmFsPagePersist() after the page is written to disk. -*/ -struct Page { - u8 *aData; /* Buffer containing page data */ - int nData; /* Bytes of usable data at aData[] */ - LsmPgno iPg; /* Page number */ - int nRef; /* Number of outstanding references */ - int flags; /* Combination of PAGE_XXX flags */ - Page *pHashNext; /* Next page in hash table slot */ - Page *pLruNext; /* Next page in LRU list */ - Page *pLruPrev; /* Previous page in LRU list */ - FileSystem *pFS; /* File system that owns this page */ - - /* Only used in compressed database mode: */ - int nCompress; /* Compressed size (or 0 for uncomp. db) */ - int nCompressPrev; /* Compressed size of prev page */ - Segment *pSeg; /* Segment this page will be written to */ - - /* Pointers for singly linked lists */ - Page *pWaitingNext; /* Next page in FileSystem.pWaiting list */ - Page *pFreeNext; /* Next page in FileSystem.pFree list */ - Page *pMappedNext; /* Next page in FileSystem.pMapped list */ -}; - -/* -** Meta-data page handle. There are two meta-data pages at the start of -** the database file, each FileSystem.nMetasize bytes in size. -*/ -struct MetaPage { - int iPg; /* Either 1 or 2 */ - int bWrite; /* Write back to db file on release */ - u8 *aData; /* Pointer to buffer */ - FileSystem *pFS; /* FileSystem that owns this page */ -}; - -/* -** Values for LsmPage.flags -*/ -#define PAGE_DIRTY 0x00000001 /* Set if page is dirty */ -#define PAGE_FREE 0x00000002 /* Set if Page.aData requires lsmFree() */ -#define PAGE_HASPREV 0x00000004 /* Set if page is first on uncomp. block */ - -/* -** Number of pgsz byte pages omitted from the start of block 1. The start -** of block 1 contains two 4096 byte meta pages (8192 bytes in total). -*/ -#define BLOCK1_HDR_SIZE(pgsz) LSM_MAX(1, 8192/(pgsz)) - -/* -** If NDEBUG is not defined, set a breakpoint in function lsmIoerrBkpt() -** to catch IO errors (any error returned by a VFS method). -*/ -#ifndef NDEBUG -static void lsmIoerrBkpt(void){ - static int nErr = 0; - nErr++; -} -static int IOERR_WRAPPER(int rc){ - if( rc!=LSM_OK ) lsmIoerrBkpt(); - return rc; -} -#else -# define IOERR_WRAPPER(rc) (rc) -#endif - -#ifdef NDEBUG -# define assert_lists_are_ok(x) -#else -static Page *fsPageFindInHash(FileSystem *pFS, LsmPgno iPg, int *piHash); - -static void assert_lists_are_ok(FileSystem *pFS){ -#if 0 - Page *p; - - assert( pFS->nMapLimit>=0 ); - - /* Check that all pages in the LRU list have nRef==0, pointers to buffers - ** in heap memory, and corresponding entries in the hash table. */ - for(p=pFS->pLruFirst; p; p=p->pLruNext){ - assert( p==pFS->pLruFirst || p->pLruPrev!=0 ); - assert( p==pFS->pLruLast || p->pLruNext!=0 ); - assert( p->pLruPrev==0 || p->pLruPrev->pLruNext==p ); - assert( p->pLruNext==0 || p->pLruNext->pLruPrev==p ); - assert( p->nRef==0 ); - assert( p->flags & PAGE_FREE ); - assert( p==fsPageFindInHash(pFS, p->iPg, 0) ); - } -#endif -} -#endif - -/* -** Wrappers around the VFS methods of the lsm_env object: -** -** lsmEnvOpen() -** lsmEnvRead() -** lsmEnvWrite() -** lsmEnvSync() -** lsmEnvSectorSize() -** lsmEnvClose() -** lsmEnvTruncate() -** lsmEnvUnlink() -** lsmEnvRemap() -*/ -int lsmEnvOpen(lsm_env *pEnv, const char *zFile, int flags, lsm_file **ppNew){ - return pEnv->xOpen(pEnv, zFile, flags, ppNew); -} - -static int lsmEnvRead( - lsm_env *pEnv, - lsm_file *pFile, - lsm_i64 iOff, - void *pRead, - int nRead -){ - return IOERR_WRAPPER( pEnv->xRead(pFile, iOff, pRead, nRead) ); -} - -static int lsmEnvWrite( - lsm_env *pEnv, - lsm_file *pFile, - lsm_i64 iOff, - const void *pWrite, - int nWrite -){ - return IOERR_WRAPPER( pEnv->xWrite(pFile, iOff, (void *)pWrite, nWrite) ); -} - -static int lsmEnvSync(lsm_env *pEnv, lsm_file *pFile){ - return IOERR_WRAPPER( pEnv->xSync(pFile) ); -} - -static int lsmEnvSectorSize(lsm_env *pEnv, lsm_file *pFile){ - return pEnv->xSectorSize(pFile); -} - -int lsmEnvClose(lsm_env *pEnv, lsm_file *pFile){ - return IOERR_WRAPPER( pEnv->xClose(pFile) ); -} - -static int lsmEnvTruncate(lsm_env *pEnv, lsm_file *pFile, lsm_i64 nByte){ - return IOERR_WRAPPER( pEnv->xTruncate(pFile, nByte) ); -} - -static int lsmEnvUnlink(lsm_env *pEnv, const char *zDel){ - return IOERR_WRAPPER( pEnv->xUnlink(pEnv, zDel) ); -} - -static int lsmEnvRemap( - lsm_env *pEnv, - lsm_file *pFile, - i64 szMin, - void **ppMap, - i64 *pszMap -){ - return pEnv->xRemap(pFile, szMin, ppMap, pszMap); -} - -int lsmEnvLock(lsm_env *pEnv, lsm_file *pFile, int iLock, int eLock){ - if( pFile==0 ) return LSM_OK; - return pEnv->xLock(pFile, iLock, eLock); -} - -int lsmEnvTestLock( - lsm_env *pEnv, - lsm_file *pFile, - int iLock, - int nLock, - int eLock -){ - return pEnv->xTestLock(pFile, iLock, nLock, eLock); -} - -int lsmEnvShmMap( - lsm_env *pEnv, - lsm_file *pFile, - int iChunk, - int sz, - void **ppOut -){ - return pEnv->xShmMap(pFile, iChunk, sz, ppOut); -} - -void lsmEnvShmBarrier(lsm_env *pEnv){ - pEnv->xShmBarrier(); -} - -void lsmEnvShmUnmap(lsm_env *pEnv, lsm_file *pFile, int bDel){ - pEnv->xShmUnmap(pFile, bDel); -} - -void lsmEnvSleep(lsm_env *pEnv, int nUs){ - pEnv->xSleep(pEnv, nUs); -} - - -/* -** Write the contents of string buffer pStr into the log file, starting at -** offset iOff. -*/ -int lsmFsWriteLog(FileSystem *pFS, i64 iOff, LsmString *pStr){ - assert( pFS->fdLog ); - return lsmEnvWrite(pFS->pEnv, pFS->fdLog, iOff, pStr->z, pStr->n); -} - -/* -** fsync() the log file. -*/ -int lsmFsSyncLog(FileSystem *pFS){ - assert( pFS->fdLog ); - return lsmEnvSync(pFS->pEnv, pFS->fdLog); -} - -/* -** Read nRead bytes of data starting at offset iOff of the log file. Append -** the results to string buffer pStr. -*/ -int lsmFsReadLog(FileSystem *pFS, i64 iOff, int nRead, LsmString *pStr){ - int rc; /* Return code */ - assert( pFS->fdLog ); - rc = lsmStringExtend(pStr, nRead); - if( rc==LSM_OK ){ - rc = lsmEnvRead(pFS->pEnv, pFS->fdLog, iOff, &pStr->z[pStr->n], nRead); - pStr->n += nRead; - } - return rc; -} - -/* -** Truncate the log file to nByte bytes in size. -*/ -int lsmFsTruncateLog(FileSystem *pFS, i64 nByte){ - if( pFS->fdLog==0 ) return LSM_OK; - return lsmEnvTruncate(pFS->pEnv, pFS->fdLog, nByte); -} - -/* -** Truncate the db file to nByte bytes in size. -*/ -int lsmFsTruncateDb(FileSystem *pFS, i64 nByte){ - if( pFS->fdDb==0 ) return LSM_OK; - return lsmEnvTruncate(pFS->pEnv, pFS->fdDb, nByte); -} - -/* -** Close the log file. Then delete it from the file-system. This function -** is called during database shutdown only. -*/ -int lsmFsCloseAndDeleteLog(FileSystem *pFS){ - char *zDel; - - if( pFS->fdLog ){ - lsmEnvClose(pFS->pEnv, pFS->fdLog ); - pFS->fdLog = 0; - } - - zDel = lsmMallocPrintf(pFS->pEnv, "%s-log", pFS->zDb); - if( zDel ){ - lsmEnvUnlink(pFS->pEnv, zDel); - lsmFree(pFS->pEnv, zDel); - } - return LSM_OK; -} - -/* -** Return true if page iReal of the database should be accessed using mmap. -** False otherwise. -*/ -static int fsMmapPage(FileSystem *pFS, LsmPgno iReal){ - return ((i64)iReal*pFS->nPagesize <= pFS->nMapLimit); -} - -/* -** Given that there are currently nHash slots in the hash table, return -** the hash key for file iFile, page iPg. -*/ -static int fsHashKey(int nHash, LsmPgno iPg){ - return (iPg % nHash); -} - -/* -** This is a helper function for lsmFsOpen(). It opens a single file on -** disk (either the database or log file). -*/ -static lsm_file *fsOpenFile( - FileSystem *pFS, /* File system object */ - int bReadonly, /* True to open this file read-only */ - int bLog, /* True for log, false for db */ - int *pRc /* IN/OUT: Error code */ -){ - lsm_file *pFile = 0; - if( *pRc==LSM_OK ){ - int flags = (bReadonly ? LSM_OPEN_READONLY : 0); - const char *zPath = (bLog ? pFS->zLog : pFS->zDb); - - *pRc = lsmEnvOpen(pFS->pEnv, zPath, flags, &pFile); - } - return pFile; -} - -/* -** If it is not already open, this function opens the log file. It returns -** LSM_OK if successful (or if the log file was already open) or an LSM -** error code otherwise. -** -** The log file must be opened before any of the following may be called: -** -** lsmFsWriteLog -** lsmFsSyncLog -** lsmFsReadLog -*/ -int lsmFsOpenLog(lsm_db *db, int *pbOpen){ - int rc = LSM_OK; - FileSystem *pFS = db->pFS; - - if( 0==pFS->fdLog ){ - pFS->fdLog = fsOpenFile(pFS, db->bReadonly, 1, &rc); - - if( rc==LSM_IOERR_NOENT && db->bReadonly ){ - rc = LSM_OK; - } - } - - if( pbOpen ) *pbOpen = (pFS->fdLog!=0); - return rc; -} - -/* -** Close the log file, if it is open. -*/ -void lsmFsCloseLog(lsm_db *db){ - FileSystem *pFS = db->pFS; - if( pFS->fdLog ){ - lsmEnvClose(pFS->pEnv, pFS->fdLog); - pFS->fdLog = 0; - } -} - -/* -** Open a connection to a database stored within the file-system. -** -** If parameter bReadonly is true, then open a read-only file-descriptor -** on the database file. It is possible that bReadonly will be false even -** if the user requested that pDb be opened read-only. This is because the -** file-descriptor may later on be recycled by a read-write connection. -** If the db file can be opened for read-write access, it always is. Parameter -** bReadonly is only ever true if it has already been determined that the -** db can only be opened for read-only access. -** -** Return LSM_OK if successful or an lsm error code otherwise. -*/ -int lsmFsOpen( - lsm_db *pDb, /* Database connection to open fd for */ - const char *zDb, /* Full path to database file */ - int bReadonly /* True to open db file read-only */ -){ - FileSystem *pFS; - int rc = LSM_OK; - int nDb = strlen(zDb); - int nByte; - - assert( pDb->pFS==0 ); - assert( pDb->pWorker==0 && pDb->pClient==0 ); - - nByte = sizeof(FileSystem) + nDb+1 + nDb+4+1; - pFS = (FileSystem *)lsmMallocZeroRc(pDb->pEnv, nByte, &rc); - if( pFS ){ - LsmFile *pLsmFile; - pFS->zDb = (char *)&pFS[1]; - pFS->zLog = &pFS->zDb[nDb+1]; - pFS->nPagesize = LSM_DFLT_PAGE_SIZE; - pFS->nBlocksize = LSM_DFLT_BLOCK_SIZE; - pFS->nMetasize = LSM_META_PAGE_SIZE; - pFS->nMetaRwSize = LSM_META_RW_PAGE_SIZE; - pFS->pDb = pDb; - pFS->pEnv = pDb->pEnv; - - /* Make a copy of the database and log file names. */ - memcpy(pFS->zDb, zDb, nDb+1); - memcpy(pFS->zLog, zDb, nDb); - memcpy(&pFS->zLog[nDb], "-log", 5); - - /* Allocate the hash-table here. At some point, it should be changed - ** so that it can grow dynamicly. */ - pFS->nCacheMax = 2048*1024 / pFS->nPagesize; - pFS->nHash = 4096; - pFS->apHash = lsmMallocZeroRc(pDb->pEnv, sizeof(Page *) * pFS->nHash, &rc); - - /* Open the database file */ - pLsmFile = lsmDbRecycleFd(pDb); - if( pLsmFile ){ - pFS->pLsmFile = pLsmFile; - pFS->fdDb = pLsmFile->pFile; - memset(pLsmFile, 0, sizeof(LsmFile)); - }else{ - pFS->pLsmFile = lsmMallocZeroRc(pDb->pEnv, sizeof(LsmFile), &rc); - if( rc==LSM_OK ){ - pFS->fdDb = fsOpenFile(pFS, bReadonly, 0, &rc); - } - } - - if( rc!=LSM_OK ){ - lsmFsClose(pFS); - pFS = 0; - }else{ - pFS->szSector = lsmEnvSectorSize(pFS->pEnv, pFS->fdDb); - } - } - - pDb->pFS = pFS; - return rc; -} - -/* -** Configure the file-system object according to the current values of -** the LSM_CONFIG_MMAP and LSM_CONFIG_SET_COMPRESSION options. -*/ -int lsmFsConfigure(lsm_db *db){ - FileSystem *pFS = db->pFS; - if( pFS ){ - lsm_env *pEnv = pFS->pEnv; - Page *pPg; - - assert( pFS->nOut==0 ); - assert( pFS->pWaiting==0 ); - assert( pFS->pMapped==0 ); - - /* Reset any compression/decompression buffers already allocated */ - lsmFree(pEnv, pFS->aIBuffer); - lsmFree(pEnv, pFS->aOBuffer); - pFS->nBuffer = 0; - - /* Unmap the file, if it is currently mapped */ - if( pFS->pMap ){ - lsmEnvRemap(pEnv, pFS->fdDb, -1, &pFS->pMap, &pFS->nMap); - pFS->nMapLimit = 0; - } - - /* Free all allocated page structures */ - pPg = pFS->pLruFirst; - while( pPg ){ - Page *pNext = pPg->pLruNext; - assert( pPg->flags & PAGE_FREE ); - lsmFree(pEnv, pPg->aData); - lsmFree(pEnv, pPg); - pPg = pNext; - } - - pPg = pFS->pFree; - while( pPg ){ - Page *pNext = pPg->pFreeNext; - lsmFree(pEnv, pPg); - pPg = pNext; - } - - /* Zero pointers that point to deleted page objects */ - pFS->nCacheAlloc = 0; - pFS->pLruFirst = 0; - pFS->pLruLast = 0; - pFS->pFree = 0; - if( pFS->apHash ){ - memset(pFS->apHash, 0, pFS->nHash*sizeof(pFS->apHash[0])); - } - - /* Configure the FileSystem object */ - if( db->compress.xCompress ){ - pFS->pCompress = &db->compress; - pFS->nMapLimit = 0; - }else{ - pFS->pCompress = 0; - if( db->iMmap==1 ){ - /* Unlimited */ - pFS->nMapLimit = (i64)1 << 60; - }else{ - /* iMmap is a limit in KB. Set nMapLimit to the same value in bytes. */ - pFS->nMapLimit = (i64)db->iMmap * 1024; - } - } - } - - return LSM_OK; -} - -/* -** Close and destroy a FileSystem object. -*/ -void lsmFsClose(FileSystem *pFS){ - if( pFS ){ - Page *pPg; - lsm_env *pEnv = pFS->pEnv; - - assert( pFS->nOut==0 ); - pPg = pFS->pLruFirst; - while( pPg ){ - Page *pNext = pPg->pLruNext; - if( pPg->flags & PAGE_FREE ) lsmFree(pEnv, pPg->aData); - lsmFree(pEnv, pPg); - pPg = pNext; - } - - pPg = pFS->pFree; - while( pPg ){ - Page *pNext = pPg->pFreeNext; - if( pPg->flags & PAGE_FREE ) lsmFree(pEnv, pPg->aData); - lsmFree(pEnv, pPg); - pPg = pNext; - } - - if( pFS->fdDb ) lsmEnvClose(pFS->pEnv, pFS->fdDb ); - if( pFS->fdLog ) lsmEnvClose(pFS->pEnv, pFS->fdLog ); - lsmFree(pEnv, pFS->pLsmFile); - lsmFree(pEnv, pFS->apHash); - lsmFree(pEnv, pFS->aIBuffer); - lsmFree(pEnv, pFS->aOBuffer); - lsmFree(pEnv, pFS); - } -} - -/* -** This function is called when closing a database handle (i.e. lsm_close()) -** if there exist other connections to the same database within this process. -** In that case the file-descriptor open on the database file is not closed -** when the FileSystem object is destroyed, as this would cause any POSIX -** locks held by the other connections to be silently dropped (see "man close" -** for details). Instead, the file-descriptor is stored in a list by the -** lsm_shared.c module until it is either closed or reused. -** -** This function returns a pointer to an object that can be linked into -** the list described above. The returned object now 'owns' the database -** file descriptr, so that when the FileSystem object is destroyed, it -** will not be closed. -** -** This function may be called at most once in the life-time of a -** FileSystem object. The results of any operations involving the database -** file descriptor are undefined once this function has been called. -** -** None of this is necessary on non-POSIX systems. But we do it anyway in -** the name of using as similar code as possible on all platforms. -*/ -LsmFile *lsmFsDeferClose(FileSystem *pFS){ - LsmFile *p = pFS->pLsmFile; - assert( p->pNext==0 ); - p->pFile = pFS->fdDb; - pFS->fdDb = 0; - pFS->pLsmFile = 0; - return p; -} - -/* -** Allocate a buffer and populate it with the output of the xFileid() -** method of the database file handle. If successful, set *ppId to point -** to the buffer and *pnId to the number of bytes in the buffer and return -** LSM_OK. Otherwise, set *ppId and *pnId to zero and return an LSM -** error code. -*/ -int lsmFsFileid(lsm_db *pDb, void **ppId, int *pnId){ - lsm_env *pEnv = pDb->pEnv; - FileSystem *pFS = pDb->pFS; - int rc; - int nId = 0; - void *pId; - - rc = pEnv->xFileid(pFS->fdDb, 0, &nId); - pId = lsmMallocZeroRc(pEnv, nId, &rc); - if( rc==LSM_OK ) rc = pEnv->xFileid(pFS->fdDb, pId, &nId); - - if( rc!=LSM_OK ){ - lsmFree(pEnv, pId); - pId = 0; - nId = 0; - } - - *ppId = pId; - *pnId = nId; - return rc; -} - -/* -** Return the nominal page-size used by this file-system. Actual pages -** may be smaller or larger than this value. -*/ -int lsmFsPageSize(FileSystem *pFS){ - return pFS->nPagesize; -} - -/* -** Return the block-size used by this file-system. -*/ -int lsmFsBlockSize(FileSystem *pFS){ - return pFS->nBlocksize; -} - -/* -** Configure the nominal page-size used by this file-system. Actual -** pages may be smaller or larger than this value. -*/ -void lsmFsSetPageSize(FileSystem *pFS, int nPgsz){ - pFS->nPagesize = nPgsz; - pFS->nCacheMax = 2048*1024 / pFS->nPagesize; -} - -/* -** Configure the block-size used by this file-system. -*/ -void lsmFsSetBlockSize(FileSystem *pFS, int nBlocksize){ - pFS->nBlocksize = nBlocksize; -} - -/* -** Return the page number of the first page on block iBlock. Blocks are -** numbered starting from 1. -** -** For a compressed database, page numbers are byte offsets. The first -** page on each block is the byte offset immediately following the 4-byte -** "previous block" pointer at the start of each block. -*/ -static LsmPgno fsFirstPageOnBlock(FileSystem *pFS, int iBlock){ - LsmPgno iPg; - if( pFS->pCompress ){ - if( iBlock==1 ){ - iPg = pFS->nMetasize * 2 + 4; - }else{ - iPg = pFS->nBlocksize * (LsmPgno)(iBlock-1) + 4; - } - }else{ - const int nPagePerBlock = (pFS->nBlocksize / pFS->nPagesize); - if( iBlock==1 ){ - iPg = 1 + ((pFS->nMetasize*2 + pFS->nPagesize - 1) / pFS->nPagesize); - }else{ - iPg = 1 + (iBlock-1) * nPagePerBlock; - } - } - return iPg; -} - -/* -** Return the page number of the last page on block iBlock. Blocks are -** numbered starting from 1. -** -** For a compressed database, page numbers are byte offsets. The first -** page on each block is the byte offset of the byte immediately before -** the 4-byte "next block" pointer at the end of each block. -*/ -static LsmPgno fsLastPageOnBlock(FileSystem *pFS, int iBlock){ - if( pFS->pCompress ){ - return pFS->nBlocksize * (LsmPgno)iBlock - 1 - 4; - }else{ - const int nPagePerBlock = (pFS->nBlocksize / pFS->nPagesize); - return iBlock * nPagePerBlock; - } -} - -/* -** Return the block number of the block that page iPg is located on. -** Blocks are numbered starting from 1. -*/ -static int fsPageToBlock(FileSystem *pFS, LsmPgno iPg){ - if( pFS->pCompress ){ - return (int)((iPg / pFS->nBlocksize) + 1); - }else{ - return (int)(1 + ((iPg-1) / (pFS->nBlocksize / pFS->nPagesize))); - } -} - -/* -** Return true if page iPg is the last page on its block. -** -** This function is only called in non-compressed database mode. -*/ -static int fsIsLast(FileSystem *pFS, LsmPgno iPg){ - const int nPagePerBlock = (pFS->nBlocksize / pFS->nPagesize); - assert( !pFS->pCompress ); - return ( iPg && (iPg % nPagePerBlock)==0 ); -} - -/* -** Return true if page iPg is the first page on its block. -** -** This function is only called in non-compressed database mode. -*/ -static int fsIsFirst(FileSystem *pFS, LsmPgno iPg){ - const int nPagePerBlock = (pFS->nBlocksize / pFS->nPagesize); - assert( !pFS->pCompress ); - return ( (iPg % nPagePerBlock)==1 - || (iPgnData; - } - return pPage->aData; -} - -/* -** Return the page number of a page. -*/ -LsmPgno lsmFsPageNumber(Page *pPage){ - /* assert( (pPage->flags & PAGE_DIRTY)==0 ); */ - return pPage ? pPage->iPg : 0; -} - -/* -** Page pPg is currently part of the LRU list belonging to pFS. Remove -** it from the list. pPg->pLruNext and pPg->pLruPrev are cleared by this -** operation. -*/ -static void fsPageRemoveFromLru(FileSystem *pFS, Page *pPg){ - assert( pPg->pLruNext || pPg==pFS->pLruLast ); - assert( pPg->pLruPrev || pPg==pFS->pLruFirst ); - if( pPg->pLruNext ){ - pPg->pLruNext->pLruPrev = pPg->pLruPrev; - }else{ - pFS->pLruLast = pPg->pLruPrev; - } - if( pPg->pLruPrev ){ - pPg->pLruPrev->pLruNext = pPg->pLruNext; - }else{ - pFS->pLruFirst = pPg->pLruNext; - } - pPg->pLruPrev = 0; - pPg->pLruNext = 0; -} - -/* -** Page pPg is not currently part of the LRU list belonging to pFS. Add it. -*/ -static void fsPageAddToLru(FileSystem *pFS, Page *pPg){ - assert( pPg->pLruNext==0 && pPg->pLruPrev==0 ); - pPg->pLruPrev = pFS->pLruLast; - if( pPg->pLruPrev ){ - pPg->pLruPrev->pLruNext = pPg; - }else{ - pFS->pLruFirst = pPg; - } - pFS->pLruLast = pPg; -} - -/* -** Page pPg is currently stored in the apHash/nHash hash table. Remove it. -*/ -static void fsPageRemoveFromHash(FileSystem *pFS, Page *pPg){ - int iHash; - Page **pp; - - iHash = fsHashKey(pFS->nHash, pPg->iPg); - for(pp=&pFS->apHash[iHash]; *pp!=pPg; pp=&(*pp)->pHashNext); - *pp = pPg->pHashNext; - pPg->pHashNext = 0; -} - -/* -** Free a Page object allocated by fsPageBuffer(). -*/ -static void fsPageBufferFree(Page *pPg){ - pPg->pFS->nCacheAlloc--; - lsmFree(pPg->pFS->pEnv, pPg->aData); - lsmFree(pPg->pFS->pEnv, pPg); -} - - -/* -** Purge the cache of all non-mmap pages with nRef==0. -*/ -void lsmFsPurgeCache(FileSystem *pFS){ - Page *pPg; - - pPg = pFS->pLruFirst; - while( pPg ){ - Page *pNext = pPg->pLruNext; - assert( pPg->flags & PAGE_FREE ); - fsPageRemoveFromHash(pFS, pPg); - fsPageBufferFree(pPg); - pPg = pNext; - } - pFS->pLruFirst = 0; - pFS->pLruLast = 0; - - assert( pFS->nCacheAlloc<=pFS->nOut && pFS->nCacheAlloc>=0 ); -} - -/* -** Search the hash-table for page iPg. If an entry is round, return a pointer -** to it. Otherwise, return NULL. -** -** Either way, if argument piHash is not NULL set *piHash to the hash slot -** number that page iPg would be stored in before returning. -*/ -static Page *fsPageFindInHash(FileSystem *pFS, LsmPgno iPg, int *piHash){ - Page *p; /* Return value */ - int iHash = fsHashKey(pFS->nHash, iPg); - - if( piHash ) *piHash = iHash; - for(p=pFS->apHash[iHash]; p; p=p->pHashNext){ - if( p->iPg==iPg) break; - } - return p; -} - -/* -** Allocate and return a non-mmap Page object. If there are already -** nCacheMax such Page objects outstanding, try to recycle an existing -** Page instead. -*/ -static int fsPageBuffer( - FileSystem *pFS, - Page **ppOut -){ - int rc = LSM_OK; - Page *pPage = 0; - if( pFS->pLruFirst==0 || pFS->nCacheAllocnCacheMax ){ - /* Allocate a new Page object */ - pPage = lsmMallocZero(pFS->pEnv, sizeof(Page)); - if( !pPage ){ - rc = LSM_NOMEM_BKPT; - }else{ - pPage->aData = (u8 *)lsmMalloc(pFS->pEnv, pFS->nPagesize); - if( !pPage->aData ){ - lsmFree(pFS->pEnv, pPage); - rc = LSM_NOMEM_BKPT; - pPage = 0; - }else{ - pFS->nCacheAlloc++; - } - } - }else{ - /* Reuse an existing Page object */ - u8 *aData; - pPage = pFS->pLruFirst; - aData = pPage->aData; - fsPageRemoveFromLru(pFS, pPage); - fsPageRemoveFromHash(pFS, pPage); - - memset(pPage, 0, sizeof(Page)); - pPage->aData = aData; - } - - if( pPage ){ - pPage->flags = PAGE_FREE; - } - *ppOut = pPage; - return rc; -} - -/* -** Assuming *pRc is initially LSM_OK, attempt to ensure that the -** memory-mapped region is at least iSz bytes in size. If it is not already, -** iSz bytes in size, extend it and update the pointers associated with any -** outstanding Page objects. -** -** If *pRc is not LSM_OK when this function is called, it is a no-op. -** Otherwise, *pRc is set to an lsm error code if an error occurs, or -** left unmodified otherwise. -** -** This function is never called in compressed database mode. -*/ -static void fsGrowMapping( - FileSystem *pFS, /* File system object */ - i64 iSz, /* Minimum size to extend mapping to */ - int *pRc /* IN/OUT: Error code */ -){ - assert( pFS->pCompress==0 ); - assert( PAGE_HASPREV==4 ); - - if( *pRc==LSM_OK && iSz>pFS->nMap ){ - int rc; - u8 *aOld = pFS->pMap; - rc = lsmEnvRemap(pFS->pEnv, pFS->fdDb, iSz, &pFS->pMap, &pFS->nMap); - if( rc==LSM_OK && pFS->pMap!=aOld ){ - Page *pFix; - i64 iOff = (u8 *)pFS->pMap - aOld; - for(pFix=pFS->pMapped; pFix; pFix=pFix->pMappedNext){ - pFix->aData += iOff; - } - lsmSortedRemap(pFS->pDb); - } - *pRc = rc; - } -} - -/* -** If it is mapped, unmap the database file. -*/ -int lsmFsUnmap(FileSystem *pFS){ - int rc = LSM_OK; - if( pFS ){ - rc = lsmEnvRemap(pFS->pEnv, pFS->fdDb, -1, &pFS->pMap, &pFS->nMap); - } - return rc; -} - -/* -** fsync() the database file. -*/ -int lsmFsSyncDb(FileSystem *pFS, int nBlock){ - return lsmEnvSync(pFS->pEnv, pFS->fdDb); -} - -/* -** If block iBlk has been redirected according to the redirections in the -** object passed as the first argument, return the destination block to -** which it is redirected. Otherwise, return a copy of iBlk. -*/ -static int fsRedirectBlock(Redirect *p, int iBlk){ - if( p ){ - int i; - for(i=0; in; i++){ - if( iBlk==p->a[i].iFrom ) return p->a[i].iTo; - } - } - assert( iBlk!=0 ); - return iBlk; -} - -/* -** If page iPg has been redirected according to the redirections in the -** object passed as the second argument, return the destination page to -** which it is redirected. Otherwise, return a copy of iPg. -*/ -LsmPgno lsmFsRedirectPage(FileSystem *pFS, Redirect *pRedir, LsmPgno iPg){ - LsmPgno iReal = iPg; - - if( pRedir ){ - const int nPagePerBlock = ( - pFS->pCompress ? pFS->nBlocksize : (pFS->nBlocksize / pFS->nPagesize) - ); - int iBlk = fsPageToBlock(pFS, iPg); - int i; - for(i=0; in; i++){ - int iFrom = pRedir->a[i].iFrom; - if( iFrom>iBlk ) break; - if( iFrom==iBlk ){ - int iTo = pRedir->a[i].iTo; - iReal = iPg - (LsmPgno)(iFrom - iTo) * nPagePerBlock; - if( iTo==1 ){ - iReal += (fsFirstPageOnBlock(pFS, 1)-1); - } - break; - } - } - } - - assert( iReal!=0 ); - return iReal; -} - -/* Required by the circular fsBlockNext<->fsPageGet dependency. */ -static int fsPageGet(FileSystem *, Segment *, LsmPgno, int, Page **, int *); - -/* -** Parameter iBlock is a database file block. This function reads the value -** stored in the blocks "next block" pointer and stores it in *piNext. -** LSM_OK is returned if everything is successful, or an LSM error code -** otherwise. -*/ -static int fsBlockNext( - FileSystem *pFS, /* File-system object handle */ - Segment *pSeg, /* Use this segment for block redirects */ - int iBlock, /* Read field from this block */ - int *piNext /* OUT: Next block in linked list */ -){ - int rc; - int iRead; /* Read block from here */ - - if( pSeg ){ - iRead = fsRedirectBlock(pSeg->pRedirect, iBlock); - }else{ - iRead = iBlock; - } - - assert( pFS->nMapLimit==0 || pFS->pCompress==0 ); - if( pFS->pCompress ){ - i64 iOff; /* File offset to read data from */ - u8 aNext[4]; /* 4-byte pointer read from db file */ - - iOff = (i64)iRead * pFS->nBlocksize - sizeof(aNext); - rc = lsmEnvRead(pFS->pEnv, pFS->fdDb, iOff, aNext, sizeof(aNext)); - if( rc==LSM_OK ){ - *piNext = (int)lsmGetU32(aNext); - } - }else{ - const int nPagePerBlock = (pFS->nBlocksize / pFS->nPagesize); - Page *pLast; - rc = fsPageGet(pFS, 0, iRead*nPagePerBlock, 0, &pLast, 0); - if( rc==LSM_OK ){ - *piNext = lsmGetU32(&pLast->aData[pFS->nPagesize-4]); - lsmFsPageRelease(pLast); - } - } - - if( pSeg ){ - *piNext = fsRedirectBlock(pSeg->pRedirect, *piNext); - } - return rc; -} - -/* -** Return the page number of the last page on the same block as page iPg. -*/ -LsmPgno fsLastPageOnPagesBlock(FileSystem *pFS, LsmPgno iPg){ - return fsLastPageOnBlock(pFS, fsPageToBlock(pFS, iPg)); -} - -/* -** Read nData bytes of data from offset iOff of the database file into -** buffer aData. If this means reading past the end of a block, follow -** the block pointer to the next block and continue reading. -** -** Offset iOff is an absolute offset - not subject to any block redirection. -** However any block pointer followed is. Use pSeg->pRedirect in this case. -** -** This function is only called in compressed database mode. -*/ -static int fsReadData( - FileSystem *pFS, /* File-system handle */ - Segment *pSeg, /* Block redirection */ - i64 iOff, /* Read data from this offset */ - u8 *aData, /* Buffer to read data into */ - int nData /* Number of bytes to read */ -){ - i64 iEob; /* End of block */ - int nRead; - int rc; - - assert( pFS->pCompress ); - - iEob = fsLastPageOnPagesBlock(pFS, iOff) + 1; - nRead = (int)LSM_MIN(iEob - iOff, nData); - - rc = lsmEnvRead(pFS->pEnv, pFS->fdDb, iOff, aData, nRead); - if( rc==LSM_OK && nRead!=nData ){ - int iBlk; - - rc = fsBlockNext(pFS, pSeg, fsPageToBlock(pFS, iOff), &iBlk); - if( rc==LSM_OK ){ - i64 iOff2 = fsFirstPageOnBlock(pFS, iBlk); - rc = lsmEnvRead(pFS->pEnv, pFS->fdDb, iOff2, &aData[nRead], nData-nRead); - } - } - - return rc; -} - -/* -** Parameter iBlock is a database file block. This function reads the value -** stored in the blocks "previous block" pointer and stores it in *piPrev. -** LSM_OK is returned if everything is successful, or an LSM error code -** otherwise. -*/ -static int fsBlockPrev( - FileSystem *pFS, /* File-system object handle */ - Segment *pSeg, /* Use this segment for block redirects */ - int iBlock, /* Read field from this block */ - int *piPrev /* OUT: Previous block in linked list */ -){ - int rc = LSM_OK; /* Return code */ - - assert( pFS->nMapLimit==0 || pFS->pCompress==0 ); - assert( iBlock>0 ); - - if( pFS->pCompress ){ - i64 iOff = fsFirstPageOnBlock(pFS, iBlock) - 4; - u8 aPrev[4]; /* 4-byte pointer read from db file */ - rc = lsmEnvRead(pFS->pEnv, pFS->fdDb, iOff, aPrev, sizeof(aPrev)); - if( rc==LSM_OK ){ - Redirect *pRedir = (pSeg ? pSeg->pRedirect : 0); - *piPrev = fsRedirectBlock(pRedir, (int)lsmGetU32(aPrev)); - } - }else{ - assert( 0 ); - } - return rc; -} - -/* -** Encode and decode routines for record size fields. -*/ -static void putRecordSize(u8 *aBuf, int nByte, int bFree){ - aBuf[0] = (u8)(nByte >> 14) | 0x80; - aBuf[1] = ((u8)(nByte >> 7) & 0x7F) | (bFree ? 0x00 : 0x80); - aBuf[2] = (u8)nByte | 0x80; -} -static int getRecordSize(u8 *aBuf, int *pbFree){ - int nByte; - nByte = (aBuf[0] & 0x7F) << 14; - nByte += (aBuf[1] & 0x7F) << 7; - nByte += (aBuf[2] & 0x7F); - *pbFree = !(aBuf[1] & 0x80); - return nByte; -} - -/* -** Subtract iSub from database file offset iOff and set *piRes to the -** result. If doing so means passing the start of a block, follow the -** block pointer stored in the first 4 bytes of the block. -** -** Offset iOff is an absolute offset - not subject to any block redirection. -** However any block pointer followed is. Use pSeg->pRedirect in this case. -** -** Return LSM_OK if successful or an lsm error code if an error occurs. -*/ -static int fsSubtractOffset( - FileSystem *pFS, - Segment *pSeg, - i64 iOff, - int iSub, - i64 *piRes -){ - i64 iStart; - int iBlk = 0; - int rc; - - assert( pFS->pCompress ); - - iStart = fsFirstPageOnBlock(pFS, fsPageToBlock(pFS, iOff)); - if( (iOff-iSub)>=iStart ){ - *piRes = (iOff-iSub); - return LSM_OK; - } - - rc = fsBlockPrev(pFS, pSeg, fsPageToBlock(pFS, iOff), &iBlk); - *piRes = fsLastPageOnBlock(pFS, iBlk) - iSub + (iOff - iStart + 1); - return rc; -} - -/* -** Add iAdd to database file offset iOff and set *piRes to the -** result. If doing so means passing the end of a block, follow the -** block pointer stored in the last 4 bytes of the block. -** -** Offset iOff is an absolute offset - not subject to any block redirection. -** However any block pointer followed is. Use pSeg->pRedirect in this case. -** -** Return LSM_OK if successful or an lsm error code if an error occurs. -*/ -static int fsAddOffset( - FileSystem *pFS, - Segment *pSeg, - i64 iOff, - int iAdd, - i64 *piRes -){ - i64 iEob; - int iBlk; - int rc; - - assert( pFS->pCompress ); - - iEob = fsLastPageOnPagesBlock(pFS, iOff); - if( (iOff+iAdd)<=iEob ){ - *piRes = (iOff+iAdd); - return LSM_OK; - } - - rc = fsBlockNext(pFS, pSeg, fsPageToBlock(pFS, iOff), &iBlk); - *piRes = fsFirstPageOnBlock(pFS, iBlk) + iAdd - (iEob - iOff + 1); - return rc; -} - -/* -** If it is not already allocated, allocate either the FileSystem.aOBuffer (if -** bWrite is true) or the FileSystem.aIBuffer (if bWrite is false). Return -** LSM_OK if successful if the attempt to allocate memory fails. -*/ -static int fsAllocateBuffer(FileSystem *pFS, int bWrite){ - u8 **pp; /* Pointer to either aIBuffer or aOBuffer */ - - assert( pFS->pCompress ); - - /* If neither buffer has been allocated, figure out how large they - ** should be. Store this value in FileSystem.nBuffer. */ - if( pFS->nBuffer==0 ){ - assert( pFS->aIBuffer==0 && pFS->aOBuffer==0 ); - pFS->nBuffer = pFS->pCompress->xBound(pFS->pCompress->pCtx, pFS->nPagesize); - if( pFS->nBuffer<(pFS->szSector+6) ){ - pFS->nBuffer = pFS->szSector+6; - } - } - - pp = (bWrite ? &pFS->aOBuffer : &pFS->aIBuffer); - if( *pp==0 ){ - *pp = lsmMalloc(pFS->pEnv, LSM_MAX(pFS->nBuffer, pFS->nPagesize)); - if( *pp==0 ) return LSM_NOMEM_BKPT; - } - - return LSM_OK; -} - -/* -** This function is only called in compressed database mode. It reads and -** uncompresses the compressed data for page pPg from the database and -** populates the pPg->aData[] buffer and pPg->nCompress field. -** -** It is possible that instead of a page record, there is free space -** at offset pPg->iPgno. In this case no data is read from the file, but -** output variable *pnSpace is set to the total number of free bytes. -** -** LSM_OK is returned if successful, or an LSM error code otherwise. -*/ -static int fsReadPagedata( - FileSystem *pFS, /* File-system handle */ - Segment *pSeg, /* pPg is part of this segment */ - Page *pPg, /* Page to read and uncompress data for */ - int *pnSpace /* OUT: Total bytes of free space */ -){ - lsm_compress *p = pFS->pCompress; - i64 iOff = pPg->iPg; - u8 aSz[3]; - int rc; - - assert( p && pPg->nCompress==0 ); - - if( fsAllocateBuffer(pFS, 0) ) return LSM_NOMEM; - - rc = fsReadData(pFS, pSeg, iOff, aSz, sizeof(aSz)); - - if( rc==LSM_OK ){ - int bFree; - if( aSz[0] & 0x80 ){ - pPg->nCompress = (int)getRecordSize(aSz, &bFree); - }else{ - pPg->nCompress = (int)aSz[0] - sizeof(aSz)*2; - bFree = 1; - } - if( bFree ){ - if( pnSpace ){ - *pnSpace = pPg->nCompress + sizeof(aSz)*2; - }else{ - rc = LSM_CORRUPT_BKPT; - } - }else{ - rc = fsAddOffset(pFS, pSeg, iOff, 3, &iOff); - if( rc==LSM_OK ){ - if( pPg->nCompress>pFS->nBuffer ){ - rc = LSM_CORRUPT_BKPT; - }else{ - rc = fsReadData(pFS, pSeg, iOff, pFS->aIBuffer, pPg->nCompress); - } - if( rc==LSM_OK ){ - int n = pFS->nPagesize; - rc = p->xUncompress(p->pCtx, - (char *)pPg->aData, &n, - (const char *)pFS->aIBuffer, pPg->nCompress - ); - if( rc==LSM_OK && n!=pPg->pFS->nPagesize ){ - rc = LSM_CORRUPT_BKPT; - } - } - } - } - } - return rc; -} - -/* -** Return a handle for a database page. -** -** If this file-system object is accessing a compressed database it may be -** that there is no page record at database file offset iPg. Instead, there -** may be a free space record. In this case, set *ppPg to NULL and *pnSpace -** to the total number of free bytes before returning. -** -** If no error occurs, LSM_OK is returned. Otherwise, an lsm error code. -*/ -static int fsPageGet( - FileSystem *pFS, /* File-system handle */ - Segment *pSeg, /* Block redirection to use (or NULL) */ - LsmPgno iPg, /* Page id */ - int noContent, /* True to not load content from disk */ - Page **ppPg, /* OUT: New page handle */ - int *pnSpace /* OUT: Bytes of free space */ -){ - Page *p; - int iHash; - int rc = LSM_OK; - - /* In most cases iReal is the same as iPg. Except, if pSeg->pRedirect is - ** not NULL, and the block containing iPg has been redirected, then iReal - ** is the page number after redirection. */ - LsmPgno iReal = lsmFsRedirectPage(pFS, (pSeg ? pSeg->pRedirect : 0), iPg); - - assert_lists_are_ok(pFS); - assert( iPg>=fsFirstPageOnBlock(pFS, 1) ); - assert( iReal>=fsFirstPageOnBlock(pFS, 1) ); - *ppPg = 0; - - /* Search the hash-table for the page */ - p = fsPageFindInHash(pFS, iReal, &iHash); - - if( p ){ - assert( p->flags & PAGE_FREE ); - if( p->nRef==0 ) fsPageRemoveFromLru(pFS, p); - }else{ - - if( fsMmapPage(pFS, iReal) ){ - i64 iEnd = (i64)iReal * pFS->nPagesize; - fsGrowMapping(pFS, iEnd, &rc); - if( rc!=LSM_OK ) return rc; - - if( pFS->pFree ){ - p = pFS->pFree; - pFS->pFree = p->pFreeNext; - assert( p->nRef==0 ); - }else{ - p = lsmMallocZeroRc(pFS->pEnv, sizeof(Page), &rc); - if( rc ) return rc; - p->pFS = pFS; - } - p->aData = &((u8 *)pFS->pMap)[pFS->nPagesize * (iReal-1)]; - p->iPg = iReal; - - /* This page now carries a pointer to the mapping. Link it in to - ** the FileSystem.pMapped list. */ - assert( p->pMappedNext==0 ); - p->pMappedNext = pFS->pMapped; - pFS->pMapped = p; - - assert( pFS->pCompress==0 ); - assert( (p->flags & PAGE_FREE)==0 ); - }else{ - rc = fsPageBuffer(pFS, &p); - if( rc==LSM_OK ){ - int nSpace = 0; - p->iPg = iReal; - p->nRef = 0; - p->pFS = pFS; - assert( p->flags==0 || p->flags==PAGE_FREE ); - -#ifdef LSM_DEBUG - memset(p->aData, 0x56, pFS->nPagesize); -#endif - assert( p->pLruNext==0 && p->pLruPrev==0 ); - if( noContent==0 ){ - if( pFS->pCompress ){ - rc = fsReadPagedata(pFS, pSeg, p, &nSpace); - }else{ - int nByte = pFS->nPagesize; - i64 iOff = (i64)(iReal-1) * pFS->nPagesize; - rc = lsmEnvRead(pFS->pEnv, pFS->fdDb, iOff, p->aData, nByte); - } - pFS->nRead++; - } - - /* If the xRead() call was successful (or not attempted), link the - ** page into the page-cache hash-table. Otherwise, if it failed, - ** free the buffer. */ - if( rc==LSM_OK && nSpace==0 ){ - p->pHashNext = pFS->apHash[iHash]; - pFS->apHash[iHash] = p; - }else{ - fsPageBufferFree(p); - p = 0; - if( pnSpace ) *pnSpace = nSpace; - } - } - } - - assert( (rc==LSM_OK && (p || (pnSpace && *pnSpace))) - || (rc!=LSM_OK && p==0) - ); - } - - if( rc==LSM_OK && p ){ - if( pFS->pCompress==0 && (fsIsLast(pFS, iReal) || fsIsFirst(pFS, iReal)) ){ - p->nData = pFS->nPagesize - 4; - if( fsIsFirst(pFS, iReal) && p->nRef==0 ){ - p->aData += 4; - p->flags |= PAGE_HASPREV; - } - }else{ - p->nData = pFS->nPagesize; - } - pFS->nOut += (p->nRef==0); - p->nRef++; - } - *ppPg = p; - return rc; -} - -/* -** Read the 64-bit checkpoint id of the checkpoint currently stored on meta -** page iMeta of the database file. If no error occurs, store the id value -** in *piVal and return LSM_OK. Otherwise, return an LSM error code and leave -** *piVal unmodified. -** -** If a checkpointer connection is currently updating meta-page iMeta, or an -** earlier checkpointer crashed while doing so, the value read into *piVal -** may be garbage. It is the callers responsibility to deal with this. -*/ -int lsmFsReadSyncedId(lsm_db *db, int iMeta, i64 *piVal){ - FileSystem *pFS = db->pFS; - int rc = LSM_OK; - - assert( iMeta==1 || iMeta==2 ); - if( pFS->nMapLimit>0 ){ - fsGrowMapping(pFS, iMeta*LSM_META_PAGE_SIZE, &rc); - if( rc==LSM_OK ){ - *piVal = (i64)lsmGetU64(&((u8 *)pFS->pMap)[(iMeta-1)*LSM_META_PAGE_SIZE]); - } - }else{ - MetaPage *pMeta = 0; - rc = lsmFsMetaPageGet(pFS, 0, iMeta, &pMeta); - if( rc==LSM_OK ){ - *piVal = (i64)lsmGetU64(pMeta->aData); - lsmFsMetaPageRelease(pMeta); - } - } - - return rc; -} - - -/* -** Return true if the first or last page of segment pRun falls between iFirst -** and iLast, inclusive, and pRun is not equal to pIgnore. -*/ -static int fsRunEndsBetween( - Segment *pRun, - Segment *pIgnore, - LsmPgno iFirst, - LsmPgno iLast -){ - return (pRun!=pIgnore && ( - (pRun->iFirst>=iFirst && pRun->iFirst<=iLast) - || (pRun->iLastPg>=iFirst && pRun->iLastPg<=iLast) - )); -} - -/* -** Return true if level pLevel contains a segment other than pIgnore for -** which the first or last page is between iFirst and iLast, inclusive. -*/ -static int fsLevelEndsBetween( - Level *pLevel, - Segment *pIgnore, - LsmPgno iFirst, - LsmPgno iLast -){ - int i; - - if( fsRunEndsBetween(&pLevel->lhs, pIgnore, iFirst, iLast) ){ - return 1; - } - for(i=0; inRight; i++){ - if( fsRunEndsBetween(&pLevel->aRhs[i], pIgnore, iFirst, iLast) ){ - return 1; - } - } - - return 0; -} - -/* -** Block iBlk is no longer in use by segment pIgnore. If it is not in use -** by any other segment, move it to the free block list. -*/ -static int fsFreeBlock( - FileSystem *pFS, /* File system object */ - Snapshot *pSnapshot, /* Worker snapshot */ - Segment *pIgnore, /* Ignore this run when searching */ - int iBlk /* Block number of block to free */ -){ - int rc = LSM_OK; /* Return code */ - LsmPgno iFirst; /* First page on block iBlk */ - LsmPgno iLast; /* Last page on block iBlk */ - Level *pLevel; /* Used to iterate through levels */ - - int iIn; /* Used to iterate through append points */ - int iOut = 0; /* Used to output append points */ - LsmPgno *aApp = pSnapshot->aiAppend; - - iFirst = fsFirstPageOnBlock(pFS, iBlk); - iLast = fsLastPageOnBlock(pFS, iBlk); - - /* Check if any other run in the snapshot has a start or end page - ** within this block. If there is such a run, return early. */ - for(pLevel=lsmDbSnapshotLevel(pSnapshot); pLevel; pLevel=pLevel->pNext){ - if( fsLevelEndsBetween(pLevel, pIgnore, iFirst, iLast) ){ - return LSM_OK; - } - } - - /* Remove any entries that lie on this block from the append-list. */ - for(iIn=0; iIniLast ){ - aApp[iOut++] = aApp[iIn]; - } - } - while( iOutpDb, iBlk); - } - return rc; -} - -/* -** Delete or otherwise recycle the blocks currently occupied by run pDel. -*/ -int lsmFsSortedDelete( - FileSystem *pFS, - Snapshot *pSnapshot, - int bZero, /* True to zero the Segment structure */ - Segment *pDel -){ - if( pDel->iFirst ){ - int rc = LSM_OK; - - int iBlk; - int iLastBlk; - - iBlk = fsPageToBlock(pFS, pDel->iFirst); - iLastBlk = fsPageToBlock(pFS, pDel->iLastPg); - - /* Mark all blocks currently used by this sorted run as free */ - while( iBlk && rc==LSM_OK ){ - int iNext = 0; - if( iBlk!=iLastBlk ){ - rc = fsBlockNext(pFS, pDel, iBlk, &iNext); - }else if( bZero==0 && pDel->iLastPg!=fsLastPageOnBlock(pFS, iLastBlk) ){ - break; - } - rc = fsFreeBlock(pFS, pSnapshot, pDel, iBlk); - iBlk = iNext; - } - - if( pDel->pRedirect ){ - assert( pDel->pRedirect==&pSnapshot->redirect ); - pSnapshot->redirect.n = 0; - } - - if( bZero ) memset(pDel, 0, sizeof(Segment)); - } - return LSM_OK; -} - -/* -** aPgno is an array containing nPgno page numbers. Return the smallest page -** number from the array that falls on block iBlk. Or, if none of the pages -** in aPgno[] fall on block iBlk, return 0. -*/ -static LsmPgno firstOnBlock( - FileSystem *pFS, - int iBlk, - LsmPgno *aPgno, - int nPgno -){ - LsmPgno iRet = 0; - int i; - for(i=0; ipRedirect, iPg)); -} - -/* -** Return true if the second argument is not NULL and any of the first -** last or root pages lie on a redirected block. -*/ -static int fsSegmentRedirects(FileSystem *pFS, Segment *p){ - return (p && ( - fsPageRedirects(pFS, p, p->iFirst) - || fsPageRedirects(pFS, p, p->iRoot) - || fsPageRedirects(pFS, p, p->iLastPg) - )); -} -#endif - -/* -** Argument aPgno is an array of nPgno page numbers. All pages belong to -** the segment pRun. This function gobbles from the start of the run to the -** first page that appears in aPgno[] (i.e. so that the aPgno[] entry is -** the new first page of the run). -*/ -void lsmFsGobble( - lsm_db *pDb, - Segment *pRun, - LsmPgno *aPgno, - int nPgno -){ - int rc = LSM_OK; - FileSystem *pFS = pDb->pFS; - Snapshot *pSnapshot = pDb->pWorker; - int iBlk; - - assert( pRun->nSize>0 ); - assert( 0==fsSegmentRedirects(pFS, pRun) ); - assert( nPgno>0 && 0==fsPageRedirects(pFS, pRun, aPgno[0]) ); - - iBlk = fsPageToBlock(pFS, pRun->iFirst); - pRun->nSize += (int)(pRun->iFirst - fsFirstPageOnBlock(pFS, iBlk)); - - while( rc==LSM_OK ){ - int iNext = 0; - LsmPgno iFirst = firstOnBlock(pFS, iBlk, aPgno, nPgno); - if( iFirst ){ - pRun->iFirst = iFirst; - break; - } - rc = fsBlockNext(pFS, pRun, iBlk, &iNext); - if( rc==LSM_OK ) rc = fsFreeBlock(pFS, pSnapshot, pRun, iBlk); - pRun->nSize -= (int)( - 1 + fsLastPageOnBlock(pFS, iBlk) - fsFirstPageOnBlock(pFS, iBlk) - ); - iBlk = iNext; - } - - pRun->nSize -= (int)(pRun->iFirst - fsFirstPageOnBlock(pFS, iBlk)); - assert( pRun->nSize>0 ); -} - -/* -** This function is only used in compressed database mode. -** -** Argument iPg is the page number (byte offset) of a page within segment -** pSeg. The page record, including all headers, is nByte bytes in size. -** Before returning, set *piNext to the page number of the next page in -** the segment, or to zero if iPg is the last. -** -** In other words, do: -** -** *piNext = iPg + nByte; -** -** But take block overflow and redirection into account. -*/ -static int fsNextPageOffset( - FileSystem *pFS, /* File system object */ - Segment *pSeg, /* Segment to move within */ - LsmPgno iPg, /* Offset of current page */ - int nByte, /* Size of current page including headers */ - LsmPgno *piNext /* OUT: Offset of next page. Or zero (EOF) */ -){ - LsmPgno iNext; - int rc; - - assert( pFS->pCompress ); - - rc = fsAddOffset(pFS, pSeg, iPg, nByte-1, &iNext); - if( pSeg && iNext==pSeg->iLastPg ){ - iNext = 0; - }else if( rc==LSM_OK ){ - rc = fsAddOffset(pFS, pSeg, iNext, 1, &iNext); - } - - *piNext = iNext; - return rc; -} - -/* -** This function is only used in compressed database mode. -** -** Argument iPg is the page number of a pagethat appears in segment pSeg. -** This function determines the page number of the previous page in the -** same run. *piPrev is set to the previous page number before returning. -** -** LSM_OK is returned if no error occurs. Otherwise, an lsm error code. -** If any value other than LSM_OK is returned, then the final value of -** *piPrev is undefined. -*/ -static int fsGetPageBefore( - FileSystem *pFS, - Segment *pSeg, - LsmPgno iPg, - LsmPgno *piPrev -){ - u8 aSz[3]; - int rc; - i64 iRead; - - assert( pFS->pCompress ); - - rc = fsSubtractOffset(pFS, pSeg, iPg, sizeof(aSz), &iRead); - if( rc==LSM_OK ) rc = fsReadData(pFS, pSeg, iRead, aSz, sizeof(aSz)); - - if( rc==LSM_OK ){ - int bFree; - int nSz; - if( aSz[2] & 0x80 ){ - nSz = getRecordSize(aSz, &bFree) + sizeof(aSz)*2; - }else{ - nSz = (int)(aSz[2] & 0x7F); - bFree = 1; - } - rc = fsSubtractOffset(pFS, pSeg, iPg, nSz, piPrev); - } - - return rc; -} - -/* -** The first argument to this function is a valid reference to a database -** file page that is part of a sorted run. If parameter eDir is -1, this -** function attempts to locate and load the previous page in the same run. -** Or, if eDir is +1, it attempts to find the next page in the same run. -** The results of passing an eDir value other than positive or negative one -** are undefined. -** -** If parameter pRun is not NULL then it must point to the run that page -** pPg belongs to. In this case, if pPg is the first or last page of the -** run, and the request is for the previous or next page, respectively, -** *ppNext is set to NULL before returning LSM_OK. If pRun is NULL, then it -** is assumed that the next or previous page, as requested, exists. -** -** If the previous/next page does exist and is successfully loaded, *ppNext -** is set to point to it and LSM_OK is returned. Otherwise, if an error -** occurs, *ppNext is set to NULL and and lsm error code returned. -** -** Page references returned by this function should be released by the -** caller using lsmFsPageRelease(). -*/ -int lsmFsDbPageNext(Segment *pRun, Page *pPg, int eDir, Page **ppNext){ - int rc = LSM_OK; - FileSystem *pFS = pPg->pFS; - LsmPgno iPg = pPg->iPg; - - assert( 0==fsSegmentRedirects(pFS, pRun) ); - if( pFS->pCompress ){ - int nSpace = pPg->nCompress + 2*3; - - do { - if( eDir>0 ){ - rc = fsNextPageOffset(pFS, pRun, iPg, nSpace, &iPg); - }else{ - if( iPg==pRun->iFirst ){ - iPg = 0; - }else{ - rc = fsGetPageBefore(pFS, pRun, iPg, &iPg); - } - } - - nSpace = 0; - if( iPg!=0 ){ - rc = fsPageGet(pFS, pRun, iPg, 0, ppNext, &nSpace); - assert( (*ppNext==0)==(rc!=LSM_OK || nSpace>0) ); - }else{ - *ppNext = 0; - } - }while( nSpace>0 && rc==LSM_OK ); - - }else{ - Redirect *pRedir = pRun ? pRun->pRedirect : 0; - assert( eDir==1 || eDir==-1 ); - if( eDir<0 ){ - if( pRun && iPg==pRun->iFirst ){ - *ppNext = 0; - return LSM_OK; - }else if( fsIsFirst(pFS, iPg) ){ - assert( pPg->flags & PAGE_HASPREV ); - iPg = fsLastPageOnBlock(pFS, lsmGetU32(&pPg->aData[-4])); - }else{ - iPg--; - } - }else{ - if( pRun ){ - if( iPg==pRun->iLastPg ){ - *ppNext = 0; - return LSM_OK; - } - } - - if( fsIsLast(pFS, iPg) ){ - int iBlk = fsRedirectBlock( - pRedir, lsmGetU32(&pPg->aData[pFS->nPagesize-4]) - ); - iPg = fsFirstPageOnBlock(pFS, iBlk); - }else{ - iPg++; - } - } - rc = fsPageGet(pFS, pRun, iPg, 0, ppNext, 0); - } - - return rc; -} - -/* -** This function is called when creating a new segment to determine if the -** first part of it can be written following an existing segment on an -** already allocated block. If it is possible, the page number of the first -** page to use for the new segment is returned. Otherwise zero. -** -** If argument pLvl is not NULL, then this function will not attempt to -** start the new segment immediately following any segment that is part -** of the right-hand-side of pLvl. -*/ -static LsmPgno findAppendPoint(FileSystem *pFS, Level *pLvl){ - int i; - LsmPgno *aiAppend = pFS->pDb->pWorker->aiAppend; - LsmPgno iRet = 0; - - for(i=LSM_APPLIST_SZ-1; iRet==0 && i>=0; i--){ - if( (iRet = aiAppend[i]) ){ - if( pLvl ){ - int iBlk = fsPageToBlock(pFS, iRet); - int j; - for(j=0; iRet && jnRight; j++){ - if( fsPageToBlock(pFS, pLvl->aRhs[j].iLastPg)==iBlk ){ - iRet = 0; - } - } - } - if( iRet ) aiAppend[i] = 0; - } - } - return iRet; -} - -/* -** Append a page to the left-hand-side of pLvl. Set the ref-count to 1 and -** return a pointer to it. The page is writable until either -** lsmFsPagePersist() is called on it or the ref-count drops to zero. -*/ -int lsmFsSortedAppend( - FileSystem *pFS, - Snapshot *pSnapshot, - Level *pLvl, - int bDefer, - Page **ppOut -){ - int rc = LSM_OK; - Page *pPg = 0; - LsmPgno iApp = 0; - LsmPgno iNext = 0; - Segment *p = &pLvl->lhs; - LsmPgno iPrev = p->iLastPg; - - *ppOut = 0; - assert( p->pRedirect==0 ); - - if( pFS->pCompress || bDefer ){ - /* In compressed database mode the page is not assigned a page number - ** or location in the database file at this point. This will be done - ** by the lsmFsPagePersist() call. */ - rc = fsPageBuffer(pFS, &pPg); - if( rc==LSM_OK ){ - pPg->pFS = pFS; - pPg->pSeg = p; - pPg->iPg = 0; - pPg->flags |= PAGE_DIRTY; - pPg->nData = pFS->nPagesize; - assert( pPg->aData ); - if( pFS->pCompress==0 ) pPg->nData -= 4; - - pPg->nRef = 1; - pFS->nOut++; - } - }else{ - if( iPrev==0 ){ - iApp = findAppendPoint(pFS, pLvl); - }else if( fsIsLast(pFS, iPrev) ){ - int iNext2; - rc = fsBlockNext(pFS, 0, fsPageToBlock(pFS, iPrev), &iNext2); - if( rc!=LSM_OK ) return rc; - iApp = fsFirstPageOnBlock(pFS, iNext2); - }else{ - iApp = iPrev + 1; - } - - /* If this is the first page allocated, or if the page allocated is the - ** last in the block, also allocate the next block here. */ - if( iApp==0 || fsIsLast(pFS, iApp) ){ - int iNew; /* New block number */ - - rc = lsmBlockAllocate(pFS->pDb, 0, &iNew); - if( rc!=LSM_OK ) return rc; - if( iApp==0 ){ - iApp = fsFirstPageOnBlock(pFS, iNew); - }else{ - iNext = fsFirstPageOnBlock(pFS, iNew); - } - } - - /* Grab the new page. */ - pPg = 0; - rc = fsPageGet(pFS, 0, iApp, 1, &pPg, 0); - assert( rc==LSM_OK || pPg==0 ); - - /* If this is the first or last page of a block, fill in the pointer - ** value at the end of the new page. */ - if( rc==LSM_OK ){ - p->nSize++; - p->iLastPg = iApp; - if( p->iFirst==0 ) p->iFirst = iApp; - pPg->flags |= PAGE_DIRTY; - - if( fsIsLast(pFS, iApp) ){ - lsmPutU32(&pPg->aData[pFS->nPagesize-4], fsPageToBlock(pFS, iNext)); - }else if( fsIsFirst(pFS, iApp) ){ - lsmPutU32(&pPg->aData[-4], fsPageToBlock(pFS, iPrev)); - } - } - } - - *ppOut = pPg; - return rc; -} - -/* -** Mark the segment passed as the second argument as finished. Once a segment -** is marked as finished it is not possible to append any further pages to -** it. -** -** Return LSM_OK if successful or an lsm error code if an error occurs. -*/ -int lsmFsSortedFinish(FileSystem *pFS, Segment *p){ - int rc = LSM_OK; - if( p && p->iLastPg ){ - assert( p->pRedirect==0 ); - - /* Check if the last page of this run happens to be the last of a block. - ** If it is, then an extra block has already been allocated for this run. - ** Shift this extra block back to the free-block list. - ** - ** Otherwise, add the first free page in the last block used by the run - ** to the lAppend list. - */ - if( fsLastPageOnPagesBlock(pFS, p->iLastPg)!=p->iLastPg ){ - int i; - LsmPgno *aiAppend = pFS->pDb->pWorker->aiAppend; - for(i=0; iiLastPg+1; - break; - } - } - }else if( pFS->pCompress==0 ){ - Page *pLast; - rc = fsPageGet(pFS, 0, p->iLastPg, 0, &pLast, 0); - if( rc==LSM_OK ){ - int iBlk = (int)lsmGetU32(&pLast->aData[pFS->nPagesize-4]); - lsmBlockRefree(pFS->pDb, iBlk); - lsmFsPageRelease(pLast); - } - }else{ - int iBlk = 0; - rc = fsBlockNext(pFS, p, fsPageToBlock(pFS, p->iLastPg), &iBlk); - if( rc==LSM_OK ){ - lsmBlockRefree(pFS->pDb, iBlk); - } - } - } - return rc; -} - -/* -** Obtain a reference to page number iPg. -** -** Return LSM_OK if successful, or an lsm error code if an error occurs. -*/ -int lsmFsDbPageGet(FileSystem *pFS, Segment *pSeg, LsmPgno iPg, Page **ppPg){ - return fsPageGet(pFS, pSeg, iPg, 0, ppPg, 0); -} - -/* -** Obtain a reference to the last page in the segment passed as the -** second argument. -** -** Return LSM_OK if successful, or an lsm error code if an error occurs. -*/ -int lsmFsDbPageLast(FileSystem *pFS, Segment *pSeg, Page **ppPg){ - int rc; - LsmPgno iPg = pSeg->iLastPg; - if( pFS->pCompress ){ - int nSpace; - iPg++; - do { - nSpace = 0; - rc = fsGetPageBefore(pFS, pSeg, iPg, &iPg); - if( rc==LSM_OK ){ - rc = fsPageGet(pFS, pSeg, iPg, 0, ppPg, &nSpace); - } - }while( rc==LSM_OK && nSpace>0 ); - - }else{ - rc = fsPageGet(pFS, pSeg, iPg, 0, ppPg, 0); - } - return rc; -} - -/* -** Return a reference to meta-page iPg. If successful, LSM_OK is returned -** and *ppPg populated with the new page reference. The reference should -** be released by the caller using lsmFsPageRelease(). -** -** Otherwise, if an error occurs, *ppPg is set to NULL and an LSM error -** code is returned. -*/ -int lsmFsMetaPageGet( - FileSystem *pFS, /* File-system connection */ - int bWrite, /* True for write access, false for read */ - int iPg, /* Either 1 or 2 */ - MetaPage **ppPg /* OUT: Pointer to MetaPage object */ -){ - int rc = LSM_OK; - MetaPage *pPg; - assert( iPg==1 || iPg==2 ); - - pPg = lsmMallocZeroRc(pFS->pEnv, sizeof(Page), &rc); - - if( pPg ){ - i64 iOff = (iPg-1) * pFS->nMetasize; - if( pFS->nMapLimit>0 ){ - fsGrowMapping(pFS, 2*pFS->nMetasize, &rc); - pPg->aData = (u8 *)(pFS->pMap) + iOff; - }else{ - pPg->aData = lsmMallocRc(pFS->pEnv, pFS->nMetasize, &rc); - if( rc==LSM_OK && bWrite==0 ){ - rc = lsmEnvRead( - pFS->pEnv, pFS->fdDb, iOff, pPg->aData, pFS->nMetaRwSize - ); - } -#ifndef NDEBUG - /* pPg->aData causes an uninitialized access via a downstreadm write(). - After discussion on this list, this memory should not, for performance - reasons, be memset. However, tracking down "real" misuse is more - difficult with this "false" positive, so it is set when NDEBUG. - */ - else if( rc==LSM_OK ){ - memset( pPg->aData, 0x77, pFS->nMetasize ); - } -#endif - } - - if( rc!=LSM_OK ){ - if( pFS->nMapLimit==0 ) lsmFree(pFS->pEnv, pPg->aData); - lsmFree(pFS->pEnv, pPg); - pPg = 0; - }else{ - pPg->iPg = iPg; - pPg->bWrite = bWrite; - pPg->pFS = pFS; - } - } - - *ppPg = pPg; - return rc; -} - -/* -** Release a meta-page reference obtained via a call to lsmFsMetaPageGet(). -*/ -int lsmFsMetaPageRelease(MetaPage *pPg){ - int rc = LSM_OK; - if( pPg ){ - FileSystem *pFS = pPg->pFS; - - if( pFS->nMapLimit==0 ){ - if( pPg->bWrite ){ - i64 iOff = (pPg->iPg==2 ? pFS->nMetasize : 0); - int nWrite = pFS->nMetaRwSize; - rc = lsmEnvWrite(pFS->pEnv, pFS->fdDb, iOff, pPg->aData, nWrite); - } - lsmFree(pFS->pEnv, pPg->aData); - } - - lsmFree(pFS->pEnv, pPg); - } - return rc; -} - -/* -** Return a pointer to a buffer containing the data associated with the -** meta-page passed as the first argument. If parameter pnData is not NULL, -** set *pnData to the size of the meta-page in bytes before returning. -*/ -u8 *lsmFsMetaPageData(MetaPage *pPg, int *pnData){ - if( pnData ) *pnData = pPg->pFS->nMetaRwSize; - return pPg->aData; -} - -/* -** Return true if page is currently writable. This is used in assert() -** statements only. -*/ -#ifndef NDEBUG -int lsmFsPageWritable(Page *pPg){ - return (pPg->flags & PAGE_DIRTY) ? 1 : 0; -} -#endif - -/* -** This is called when block iFrom is being redirected to iTo. If page -** number (*piPg) lies on block iFrom, then calculate the equivalent -** page on block iTo and set *piPg to this value before returning. -*/ -static void fsMovePage( - FileSystem *pFS, /* File system object */ - int iTo, /* Destination block */ - int iFrom, /* Source block */ - LsmPgno *piPg /* IN/OUT: Page number */ -){ - LsmPgno iPg = *piPg; - if( iFrom==fsPageToBlock(pFS, iPg) ){ - const int nPagePerBlock = ( - pFS->pCompress ? pFS ->nBlocksize : (pFS->nBlocksize / pFS->nPagesize) - ); - *piPg = iPg - (LsmPgno)(iFrom - iTo) * nPagePerBlock; - } -} - -/* -** Copy the contents of block iFrom to block iTo. -** -** It is safe to assume that there are no outstanding references to pages -** on block iTo. And that block iFrom is not currently being written. In -** other words, the data can be read and written directly. -*/ -int lsmFsMoveBlock(FileSystem *pFS, Segment *pSeg, int iTo, int iFrom){ - Snapshot *p = pFS->pDb->pWorker; - int rc = LSM_OK; - int i; - i64 nMap; - - i64 iFromOff = (i64)(iFrom-1) * pFS->nBlocksize; - i64 iToOff = (i64)(iTo-1) * pFS->nBlocksize; - - assert( iTo!=1 ); - assert( iFrom>iTo ); - - /* Grow the mapping as required. */ - nMap = LSM_MIN(pFS->nMapLimit, (i64)iFrom * pFS->nBlocksize); - fsGrowMapping(pFS, nMap, &rc); - - if( rc==LSM_OK ){ - const int nPagePerBlock = (pFS->nBlocksize / pFS->nPagesize); - int nSz = pFS->nPagesize; - u8 *aBuf = 0; - u8 *aData = 0; - - for(i=0; rc==LSM_OK && inMapLimit ){ - u8 *aMap = (u8 *)(pFS->pMap); - aData = &aMap[iOff]; - }else{ - if( aBuf==0 ){ - aBuf = (u8 *)lsmMallocRc(pFS->pEnv, nSz, &rc); - if( aBuf==0 ) break; - } - aData = aBuf; - rc = lsmEnvRead(pFS->pEnv, pFS->fdDb, iOff, aData, nSz); - } - - /* Copy aData to the to page */ - if( rc==LSM_OK ){ - iOff = iToOff + i*nSz; - if( (iOff+nSz)<=pFS->nMapLimit ){ - u8 *aMap = (u8 *)(pFS->pMap); - memcpy(&aMap[iOff], aData, nSz); - }else{ - rc = lsmEnvWrite(pFS->pEnv, pFS->fdDb, iOff, aData, nSz); - } - } - } - lsmFree(pFS->pEnv, aBuf); - lsmFsPurgeCache(pFS); - } - - /* Update append-point list if necessary */ - for(i=0; iaiAppend[i]); - } - - /* Update the Segment structure itself */ - fsMovePage(pFS, iTo, iFrom, &pSeg->iFirst); - fsMovePage(pFS, iTo, iFrom, &pSeg->iLastPg); - fsMovePage(pFS, iTo, iFrom, &pSeg->iRoot); - - return rc; -} - -/* -** Append raw data to a segment. Return the database file offset that the -** data is written to (this may be used as the page number if the data -** being appended is a new page record). -** -** This function is only used in compressed database mode. -*/ -static LsmPgno fsAppendData( - FileSystem *pFS, /* File-system handle */ - Segment *pSeg, /* Segment to append to */ - const u8 *aData, /* Buffer containing data to write */ - int nData, /* Size of buffer aData[] in bytes */ - int *pRc /* IN/OUT: Error code */ -){ - LsmPgno iRet = 0; - int rc = *pRc; - assert( pFS->pCompress ); - if( rc==LSM_OK ){ - int nRem = 0; - int nWrite = 0; - LsmPgno iLastOnBlock; - LsmPgno iApp = pSeg->iLastPg+1; - - /* If this is the first data written into the segment, find an append-point - ** or allocate a new block. */ - if( iApp==1 ){ - pSeg->iFirst = iApp = findAppendPoint(pFS, 0); - if( iApp==0 ){ - int iBlk; - rc = lsmBlockAllocate(pFS->pDb, 0, &iBlk); - pSeg->iFirst = iApp = fsFirstPageOnBlock(pFS, iBlk); - } - } - iRet = iApp; - - /* Write as much data as is possible at iApp (usually all of it). */ - iLastOnBlock = fsLastPageOnPagesBlock(pFS, iApp); - if( rc==LSM_OK ){ - int nSpace = (int)(iLastOnBlock - iApp + 1); - nWrite = LSM_MIN(nData, nSpace); - nRem = nData - nWrite; - assert( nWrite>=0 ); - if( nWrite!=0 ){ - rc = lsmEnvWrite(pFS->pEnv, pFS->fdDb, iApp, aData, nWrite); - } - iApp += nWrite; - } - - /* If required, allocate a new block and write the rest of the data - ** into it. Set the next and previous block pointers to link the new - ** block to the old. */ - assert( nRem<=0 || (iApp-1)==iLastOnBlock ); - if( rc==LSM_OK && (iApp-1)==iLastOnBlock ){ - u8 aPtr[4]; /* Space to serialize a u32 */ - int iBlk; /* New block number */ - - if( nWrite>0 ){ - /* Allocate a new block. */ - rc = lsmBlockAllocate(pFS->pDb, 0, &iBlk); - - /* Set the "next" pointer on the old block */ - if( rc==LSM_OK ){ - assert( iApp==(fsPageToBlock(pFS, iApp)*pFS->nBlocksize)-4 ); - lsmPutU32(aPtr, iBlk); - rc = lsmEnvWrite(pFS->pEnv, pFS->fdDb, iApp, aPtr, sizeof(aPtr)); - } - - /* Set the "prev" pointer on the new block */ - if( rc==LSM_OK ){ - LsmPgno iWrite; - lsmPutU32(aPtr, fsPageToBlock(pFS, iApp)); - iWrite = fsFirstPageOnBlock(pFS, iBlk); - rc = lsmEnvWrite(pFS->pEnv, pFS->fdDb, iWrite-4, aPtr, sizeof(aPtr)); - if( nRem>0 ) iApp = iWrite; - } - }else{ - /* The next block is already allocated. */ - assert( nRem>0 ); - assert( pSeg->pRedirect==0 ); - rc = fsBlockNext(pFS, 0, fsPageToBlock(pFS, iApp), &iBlk); - iRet = iApp = fsFirstPageOnBlock(pFS, iBlk); - } - - /* Write the remaining data into the new block */ - if( rc==LSM_OK && nRem>0 ){ - rc = lsmEnvWrite(pFS->pEnv, pFS->fdDb, iApp, &aData[nWrite], nRem); - iApp += nRem; - } - } - - pSeg->iLastPg = iApp-1; - *pRc = rc; - } - - return iRet; -} - -/* -** This function is only called in compressed database mode. It -** compresses the contents of page pPg and writes the result to the -** buffer at pFS->aOBuffer. The size of the compressed data is stored in -** pPg->nCompress. -** -** If buffer pFS->aOBuffer[] has not been allocated then this function -** allocates it. If this fails, LSM_NOMEM is returned. Otherwise, LSM_OK. -*/ -static int fsCompressIntoBuffer(FileSystem *pFS, Page *pPg){ - lsm_compress *p = pFS->pCompress; - - if( fsAllocateBuffer(pFS, 1) ) return LSM_NOMEM; - assert( pPg->nData==pFS->nPagesize ); - - pPg->nCompress = pFS->nBuffer; - return p->xCompress(p->pCtx, - (char *)pFS->aOBuffer, &pPg->nCompress, - (const char *)pPg->aData, pPg->nData - ); -} - -/* -** Append a new page to segment pSeg. Set output variable *piNew to the -** page number of the new page before returning. -** -** If the new page is the last on its block, then the 'next' block that -** will be used by the segment is allocated here too. In this case output -** variable *piNext is set to the block number of the next block. -** -** If the new page is the first on its block but not the first in the -** entire segment, set output variable *piPrev to the block number of -** the previous block in the segment. -** -** LSM_OK is returned if successful, or an lsm error code otherwise. If -** any value other than LSM_OK is returned, then the final value of all -** output variables is undefined. -*/ -static int fsAppendPage( - FileSystem *pFS, - Segment *pSeg, - LsmPgno *piNew, - int *piPrev, - int *piNext -){ - LsmPgno iPrev = pSeg->iLastPg; - int rc; - assert( iPrev!=0 ); - - *piPrev = 0; - *piNext = 0; - - if( fsIsLast(pFS, iPrev) ){ - /* Grab the first page on the next block (which has already be - ** allocated). In this case set *piPrev to tell the caller to set - ** the "previous block" pointer in the first 4 bytes of the page. - */ - int iNext; - int iBlk = fsPageToBlock(pFS, iPrev); - assert( pSeg->pRedirect==0 ); - rc = fsBlockNext(pFS, 0, iBlk, &iNext); - if( rc!=LSM_OK ) return rc; - *piNew = fsFirstPageOnBlock(pFS, iNext); - *piPrev = iBlk; - }else{ - *piNew = iPrev+1; - if( fsIsLast(pFS, *piNew) ){ - /* Allocate the next block here. */ - int iBlk; - rc = lsmBlockAllocate(pFS->pDb, 0, &iBlk); - if( rc!=LSM_OK ) return rc; - *piNext = iBlk; - } - } - - pSeg->nSize++; - pSeg->iLastPg = *piNew; - return LSM_OK; -} - -/* -** Flush all pages in the FileSystem.pWaiting list to disk. -*/ -void lsmFsFlushWaiting(FileSystem *pFS, int *pRc){ - int rc = *pRc; - Page *pPg; - - pPg = pFS->pWaiting; - pFS->pWaiting = 0; - - while( pPg ){ - Page *pNext = pPg->pWaitingNext; - if( rc==LSM_OK ) rc = lsmFsPagePersist(pPg); - assert( pPg->nRef==1 ); - lsmFsPageRelease(pPg); - pPg = pNext; - } - *pRc = rc; -} - -/* -** If there exists a hash-table entry associated with page iPg, remove it. -*/ -static void fsRemoveHashEntry(FileSystem *pFS, LsmPgno iPg){ - Page *p; - int iHash = fsHashKey(pFS->nHash, iPg); - - for(p=pFS->apHash[iHash]; p && p->iPg!=iPg; p=p->pHashNext); - - if( p ){ - assert( p->nRef==0 || (p->flags & PAGE_FREE)==0 ); - fsPageRemoveFromHash(pFS, p); - p->iPg = 0; - iHash = fsHashKey(pFS->nHash, 0); - p->pHashNext = pFS->apHash[iHash]; - pFS->apHash[iHash] = p; - } -} - -/* -** If the page passed as an argument is dirty, update the database file -** (or mapping of the database file) with its current contents and mark -** the page as clean. -** -** Return LSM_OK if the operation is a success, or an LSM error code -** otherwise. -*/ -int lsmFsPagePersist(Page *pPg){ - int rc = LSM_OK; - if( pPg && (pPg->flags & PAGE_DIRTY) ){ - FileSystem *pFS = pPg->pFS; - - if( pFS->pCompress ){ - int iHash; /* Hash key of assigned page number */ - u8 aSz[3]; /* pPg->nCompress as a 24-bit big-endian */ - assert( pPg->pSeg && pPg->iPg==0 && pPg->nCompress==0 ); - - /* Compress the page image. */ - rc = fsCompressIntoBuffer(pFS, pPg); - - /* Serialize the compressed size into buffer aSz[] */ - putRecordSize(aSz, pPg->nCompress, 0); - - /* Write the serialized page record into the database file. */ - pPg->iPg = fsAppendData(pFS, pPg->pSeg, aSz, sizeof(aSz), &rc); - fsAppendData(pFS, pPg->pSeg, pFS->aOBuffer, pPg->nCompress, &rc); - fsAppendData(pFS, pPg->pSeg, aSz, sizeof(aSz), &rc); - - /* Now that it has a page number, insert the page into the hash table */ - iHash = fsHashKey(pFS->nHash, pPg->iPg); - pPg->pHashNext = pFS->apHash[iHash]; - pFS->apHash[iHash] = pPg; - - pPg->pSeg->nSize += (sizeof(aSz) * 2) + pPg->nCompress; - - pPg->flags &= ~PAGE_DIRTY; - pFS->nWrite++; - }else{ - - if( pPg->iPg==0 ){ - /* No page number has been assigned yet. This occurs with pages used - ** in the b-tree hierarchy. They were not assigned page numbers when - ** they were created as doing so would cause this call to - ** lsmFsPagePersist() to write an out-of-order page. Instead a page - ** number is assigned here so that the page data will be appended - ** to the current segment. - */ - Page **pp; - int iPrev = 0; - int iNext = 0; - int iHash; - - assert( pPg->pSeg->iFirst ); - assert( pPg->flags & PAGE_FREE ); - assert( (pPg->flags & PAGE_HASPREV)==0 ); - assert( pPg->nData==pFS->nPagesize-4 ); - - rc = fsAppendPage(pFS, pPg->pSeg, &pPg->iPg, &iPrev, &iNext); - if( rc!=LSM_OK ) return rc; - - assert( pPg->flags & PAGE_FREE ); - iHash = fsHashKey(pFS->nHash, pPg->iPg); - fsRemoveHashEntry(pFS, pPg->iPg); - pPg->pHashNext = pFS->apHash[iHash]; - pFS->apHash[iHash] = pPg; - assert( pPg->pHashNext==0 || pPg->pHashNext->iPg!=pPg->iPg ); - - if( iPrev ){ - assert( iNext==0 ); - memmove(&pPg->aData[4], pPg->aData, pPg->nData); - lsmPutU32(pPg->aData, iPrev); - pPg->flags |= PAGE_HASPREV; - pPg->aData += 4; - }else if( iNext ){ - assert( iPrev==0 ); - lsmPutU32(&pPg->aData[pPg->nData], iNext); - }else{ - int nData = pPg->nData; - pPg->nData += 4; - lsmSortedExpandBtreePage(pPg, nData); - } - - pPg->nRef++; - for(pp=&pFS->pWaiting; *pp; pp=&(*pp)->pWaitingNext); - *pp = pPg; - assert( pPg->pWaitingNext==0 ); - - }else{ - i64 iOff; /* Offset to write within database file */ - - iOff = (i64)pFS->nPagesize * (i64)(pPg->iPg-1); - if( fsMmapPage(pFS, pPg->iPg)==0 ){ - u8 *aData = pPg->aData - (pPg->flags & PAGE_HASPREV); - rc = lsmEnvWrite(pFS->pEnv, pFS->fdDb, iOff, aData, pFS->nPagesize); - }else if( pPg->flags & PAGE_FREE ){ - fsGrowMapping(pFS, iOff + pFS->nPagesize, &rc); - if( rc==LSM_OK ){ - u8 *aTo = &((u8 *)(pFS->pMap))[iOff]; - u8 *aFrom = pPg->aData - (pPg->flags & PAGE_HASPREV); - memcpy(aTo, aFrom, pFS->nPagesize); - lsmFree(pFS->pEnv, aFrom); - pFS->nCacheAlloc--; - pPg->aData = aTo + (pPg->flags & PAGE_HASPREV); - pPg->flags &= ~PAGE_FREE; - fsPageRemoveFromHash(pFS, pPg); - pPg->pMappedNext = pFS->pMapped; - pFS->pMapped = pPg; - } - } - - lsmFsFlushWaiting(pFS, &rc); - pPg->flags &= ~PAGE_DIRTY; - pFS->nWrite++; - } - } - } - - return rc; -} - -/* -** For non-compressed databases, this function is a no-op. For compressed -** databases, it adds a padding record to the segment passed as the third -** argument. -** -** The size of the padding records is selected so that the last byte -** written is the last byte of a disk sector. This means that if a -** snapshot is taken and checkpointed, subsequent worker processes will -** not write to any sector that contains checkpointed data. -*/ -int lsmFsSortedPadding( - FileSystem *pFS, - Snapshot *pSnapshot, - Segment *pSeg -){ - int rc = LSM_OK; - if( pFS->pCompress && pSeg->iFirst ){ - LsmPgno iLast2; - LsmPgno iLast = pSeg->iLastPg; /* Current last page of segment */ - int nPad; /* Bytes of padding required */ - u8 aSz[3]; - - iLast2 = (1 + iLast/pFS->szSector) * pFS->szSector - 1; - assert( fsPageToBlock(pFS, iLast)==fsPageToBlock(pFS, iLast2) ); - nPad = (int)(iLast2 - iLast); - - if( iLast2>fsLastPageOnPagesBlock(pFS, iLast) ){ - nPad -= 4; - } - assert( nPad>=0 ); - - if( nPad>=6 ){ - pSeg->nSize += nPad; - nPad -= 6; - putRecordSize(aSz, nPad, 1); - fsAppendData(pFS, pSeg, aSz, sizeof(aSz), &rc); - memset(pFS->aOBuffer, 0, nPad); - fsAppendData(pFS, pSeg, pFS->aOBuffer, nPad, &rc); - fsAppendData(pFS, pSeg, aSz, sizeof(aSz), &rc); - }else if( nPad>0 ){ - u8 aBuf[5] = {0,0,0,0,0}; - aBuf[0] = (u8)nPad; - aBuf[nPad-1] = (u8)nPad; - fsAppendData(pFS, pSeg, aBuf, nPad, &rc); - } - - assert( rc!=LSM_OK - || pSeg->iLastPg==fsLastPageOnPagesBlock(pFS, pSeg->iLastPg) - || ((pSeg->iLastPg + 1) % pFS->szSector)==0 - ); - } - - return rc; -} - - -/* -** Increment the reference count on the page object passed as the first -** argument. -*/ -void lsmFsPageRef(Page *pPg){ - if( pPg ){ - pPg->nRef++; - } -} - -/* -** Release a page-reference obtained using fsPageGet(). -*/ -int lsmFsPageRelease(Page *pPg){ - int rc = LSM_OK; - if( pPg ){ - assert( pPg->nRef>0 ); - pPg->nRef--; - if( pPg->nRef==0 ){ - FileSystem *pFS = pPg->pFS; - rc = lsmFsPagePersist(pPg); - pFS->nOut--; - - assert( pPg->pFS->pCompress - || fsIsFirst(pPg->pFS, pPg->iPg)==0 - || (pPg->flags & PAGE_HASPREV) - ); - pPg->aData -= (pPg->flags & PAGE_HASPREV); - pPg->flags &= ~PAGE_HASPREV; - - if( (pPg->flags & PAGE_FREE)==0 ){ - /* Removed from mapped list */ - Page **pp; - for(pp=&pFS->pMapped; (*pp)!=pPg; pp=&(*pp)->pMappedNext); - *pp = pPg->pMappedNext; - pPg->pMappedNext = 0; - - /* Add to free list */ - pPg->pFreeNext = pFS->pFree; - pFS->pFree = pPg; - }else{ - fsPageAddToLru(pFS, pPg); - } - } - } - - return rc; -} - -/* -** Return the total number of pages read from the database file. -*/ -int lsmFsNRead(FileSystem *pFS){ return pFS->nRead; } - -/* -** Return the total number of pages written to the database file. -*/ -int lsmFsNWrite(FileSystem *pFS){ return pFS->nWrite; } - -/* -** Return a copy of the environment pointer used by the file-system object. -*/ -lsm_env *lsmFsEnv(FileSystem *pFS){ - return pFS->pEnv; -} - -/* -** Return a copy of the environment pointer used by the file-system object -** to which this page belongs. -*/ -lsm_env *lsmPageEnv(Page *pPg) { - return pPg->pFS->pEnv; -} - -/* -** Return a pointer to the file-system object associated with the Page -** passed as the only argument. -*/ -FileSystem *lsmPageFS(Page *pPg){ - return pPg->pFS; -} - -/* -** Return the sector-size as reported by the log file handle. -*/ -int lsmFsSectorSize(FileSystem *pFS){ - return pFS->szSector; -} - -/* -** Helper function for lsmInfoArrayStructure(). -*/ -static Segment *startsWith(Segment *pRun, LsmPgno iFirst){ - return (iFirst==pRun->iFirst) ? pRun : 0; -} - -/* -** Return the segment that starts with page iFirst, if any. If no such segment -** can be found, return NULL. -*/ -static Segment *findSegment(Snapshot *pWorker, LsmPgno iFirst){ - Level *pLvl; /* Used to iterate through db levels */ - Segment *pSeg = 0; /* Pointer to segment to return */ - - for(pLvl=lsmDbSnapshotLevel(pWorker); pLvl && pSeg==0; pLvl=pLvl->pNext){ - if( 0==(pSeg = startsWith(&pLvl->lhs, iFirst)) ){ - int i; - for(i=0; inRight; i++){ - if( (pSeg = startsWith(&pLvl->aRhs[i], iFirst)) ) break; - } - } - } - - return pSeg; -} - -/* -** This function implements the lsm_info(LSM_INFO_ARRAY_STRUCTURE) request. -** If successful, *pzOut is set to point to a nul-terminated string -** containing the array structure and LSM_OK is returned. The caller should -** eventually free the string using lsmFree(). -** -** If an error occurs, *pzOut is set to NULL and an LSM error code returned. -*/ -int lsmInfoArrayStructure( - lsm_db *pDb, - int bBlock, /* True for block numbers only */ - LsmPgno iFirst, - char **pzOut -){ - int rc = LSM_OK; - Snapshot *pWorker; /* Worker snapshot */ - Segment *pArray = 0; /* Array to report on */ - int bUnlock = 0; - - *pzOut = 0; - if( iFirst==0 ) return LSM_ERROR; - - /* Obtain the worker snapshot */ - pWorker = pDb->pWorker; - if( !pWorker ){ - rc = lsmBeginWork(pDb); - if( rc!=LSM_OK ) return rc; - pWorker = pDb->pWorker; - bUnlock = 1; - } - - /* Search for the array that starts on page iFirst */ - pArray = findSegment(pWorker, iFirst); - - if( pArray==0 ){ - /* Could not find the requested array. This is an error. */ - rc = LSM_ERROR; - }else{ - FileSystem *pFS = pDb->pFS; - LsmString str; - int iBlk; - int iLastBlk; - - iBlk = fsPageToBlock(pFS, pArray->iFirst); - iLastBlk = fsPageToBlock(pFS, pArray->iLastPg); - - lsmStringInit(&str, pDb->pEnv); - if( bBlock ){ - lsmStringAppendf(&str, "%d", iBlk); - while( iBlk!=iLastBlk ){ - fsBlockNext(pFS, pArray, iBlk, &iBlk); - lsmStringAppendf(&str, " %d", iBlk); - } - }else{ - lsmStringAppendf(&str, "%d", pArray->iFirst); - while( iBlk!=iLastBlk ){ - lsmStringAppendf(&str, " %d", fsLastPageOnBlock(pFS, iBlk)); - fsBlockNext(pFS, pArray, iBlk, &iBlk); - lsmStringAppendf(&str, " %d", fsFirstPageOnBlock(pFS, iBlk)); - } - lsmStringAppendf(&str, " %d", pArray->iLastPg); - } - - *pzOut = str.z; - } - - if( bUnlock ){ - int rcwork = LSM_BUSY; - lsmFinishWork(pDb, 0, &rcwork); - } - return rc; -} - -int lsmFsSegmentContainsPg( - FileSystem *pFS, - Segment *pSeg, - LsmPgno iPg, - int *pbRes -){ - Redirect *pRedir = pSeg->pRedirect; - int rc = LSM_OK; - int iBlk; - int iLastBlk; - int iPgBlock; /* Block containing page iPg */ - - iPgBlock = fsPageToBlock(pFS, pSeg->iFirst); - iBlk = fsRedirectBlock(pRedir, fsPageToBlock(pFS, pSeg->iFirst)); - iLastBlk = fsRedirectBlock(pRedir, fsPageToBlock(pFS, pSeg->iLastPg)); - - while( iBlk!=iLastBlk && iBlk!=iPgBlock && rc==LSM_OK ){ - rc = fsBlockNext(pFS, pSeg, iBlk, &iBlk); - } - - *pbRes = (iBlk==iPgBlock); - return rc; -} - -/* -** This function implements the lsm_info(LSM_INFO_ARRAY_PAGES) request. -** If successful, *pzOut is set to point to a nul-terminated string -** containing the array structure and LSM_OK is returned. The caller should -** eventually free the string using lsmFree(). -** -** If an error occurs, *pzOut is set to NULL and an LSM error code returned. -*/ -int lsmInfoArrayPages(lsm_db *pDb, LsmPgno iFirst, char **pzOut){ - int rc = LSM_OK; - Snapshot *pWorker; /* Worker snapshot */ - Segment *pSeg = 0; /* Array to report on */ - int bUnlock = 0; - - *pzOut = 0; - if( iFirst==0 ) return LSM_ERROR; - - /* Obtain the worker snapshot */ - pWorker = pDb->pWorker; - if( !pWorker ){ - rc = lsmBeginWork(pDb); - if( rc!=LSM_OK ) return rc; - pWorker = pDb->pWorker; - bUnlock = 1; - } - - /* Search for the array that starts on page iFirst */ - pSeg = findSegment(pWorker, iFirst); - - if( pSeg==0 ){ - /* Could not find the requested array. This is an error. */ - rc = LSM_ERROR; - }else{ - Page *pPg = 0; - FileSystem *pFS = pDb->pFS; - LsmString str; - - lsmStringInit(&str, pDb->pEnv); - rc = lsmFsDbPageGet(pFS, pSeg, iFirst, &pPg); - while( rc==LSM_OK && pPg ){ - Page *pNext = 0; - lsmStringAppendf(&str, " %lld", lsmFsPageNumber(pPg)); - rc = lsmFsDbPageNext(pSeg, pPg, 1, &pNext); - lsmFsPageRelease(pPg); - pPg = pNext; - } - - if( rc!=LSM_OK ){ - lsmFree(pDb->pEnv, str.z); - }else{ - *pzOut = str.z; - } - } - - if( bUnlock ){ - int rcwork = LSM_BUSY; - lsmFinishWork(pDb, 0, &rcwork); - } - return rc; -} - -/* -** The following macros are used by the integrity-check code. Associated with -** each block in the database is an 8-bit bit mask (the entry in the aUsed[] -** array). As the integrity-check meanders through the database, it sets the -** following bits to indicate how each block is used. -** -** INTEGRITY_CHECK_FIRST_PG: -** First page of block is in use by sorted run. -** -** INTEGRITY_CHECK_LAST_PG: -** Last page of block is in use by sorted run. -** -** INTEGRITY_CHECK_USED: -** At least one page of the block is in use by a sorted run. -** -** INTEGRITY_CHECK_FREE: -** The free block list contains an entry corresponding to this block. -*/ -#define INTEGRITY_CHECK_FIRST_PG 0x01 -#define INTEGRITY_CHECK_LAST_PG 0x02 -#define INTEGRITY_CHECK_USED 0x04 -#define INTEGRITY_CHECK_FREE 0x08 - -/* -** Helper function for lsmFsIntegrityCheck() -*/ -static void checkBlocks( - FileSystem *pFS, - Segment *pSeg, - int bExtra, /* If true, count the "next" block if any */ - int nUsed, - u8 *aUsed -){ - if( pSeg ){ - if( pSeg && pSeg->nSize>0 ){ - int rc; - int iBlk; /* Current block (during iteration) */ - int iLastBlk; /* Last block of segment */ - int iFirstBlk; /* First block of segment */ - int bLastIsLastOnBlock; /* True iLast is the last on its block */ - - assert( 0==fsSegmentRedirects(pFS, pSeg) ); - iBlk = iFirstBlk = fsPageToBlock(pFS, pSeg->iFirst); - iLastBlk = fsPageToBlock(pFS, pSeg->iLastPg); - - bLastIsLastOnBlock = (fsLastPageOnBlock(pFS, iLastBlk)==pSeg->iLastPg); - assert( iBlk>0 ); - - do { - /* iBlk is a part of this sorted run. */ - aUsed[iBlk-1] |= INTEGRITY_CHECK_USED; - - /* If the first page of this block is also part of the segment, - ** set the flag to indicate that the first page of iBlk is in use. - */ - if( fsFirstPageOnBlock(pFS, iBlk)==pSeg->iFirst || iBlk!=iFirstBlk ){ - assert( (aUsed[iBlk-1] & INTEGRITY_CHECK_FIRST_PG)==0 ); - aUsed[iBlk-1] |= INTEGRITY_CHECK_FIRST_PG; - } - - /* Unless the sorted run finishes before the last page on this block, - ** the last page of this block is also in use. */ - if( iBlk!=iLastBlk || bLastIsLastOnBlock ){ - assert( (aUsed[iBlk-1] & INTEGRITY_CHECK_LAST_PG)==0 ); - aUsed[iBlk-1] |= INTEGRITY_CHECK_LAST_PG; - } - - /* Special case. The sorted run being scanned is the output run of - ** a level currently undergoing an incremental merge. The sorted - ** run ends on the last page of iBlk, but the next block has already - ** been allocated. So mark it as in use as well. */ - if( iBlk==iLastBlk && bLastIsLastOnBlock && bExtra ){ - int iExtra = 0; - rc = fsBlockNext(pFS, pSeg, iBlk, &iExtra); - assert( rc==LSM_OK ); - - assert( aUsed[iExtra-1]==0 ); - aUsed[iExtra-1] |= INTEGRITY_CHECK_USED; - aUsed[iExtra-1] |= INTEGRITY_CHECK_FIRST_PG; - aUsed[iExtra-1] |= INTEGRITY_CHECK_LAST_PG; - } - - /* Move on to the next block in the sorted run. Or set iBlk to zero - ** in order to break out of the loop if this was the last block in - ** the run. */ - if( iBlk==iLastBlk ){ - iBlk = 0; - }else{ - rc = fsBlockNext(pFS, pSeg, iBlk, &iBlk); - assert( rc==LSM_OK ); - } - }while( iBlk ); - } - } -} - -typedef struct CheckFreelistCtx CheckFreelistCtx; -struct CheckFreelistCtx { - u8 *aUsed; - int nBlock; -}; -static int checkFreelistCb(void *pCtx, int iBlk, i64 iSnapshot){ - CheckFreelistCtx *p = (CheckFreelistCtx *)pCtx; - - assert( iBlk>=1 ); - assert( iBlk<=p->nBlock ); - assert( p->aUsed[iBlk-1]==0 ); - p->aUsed[iBlk-1] = INTEGRITY_CHECK_FREE; - return 0; -} - -/* -** This function checks that all blocks in the database file are accounted -** for. For each block, exactly one of the following must be true: -** -** + the block is part of a sorted run, or -** + the block is on the free-block list -** -** This function also checks that there are no references to blocks with -** out-of-range block numbers. -** -** If no errors are found, non-zero is returned. If an error is found, an -** assert() fails. -*/ -int lsmFsIntegrityCheck(lsm_db *pDb){ - CheckFreelistCtx ctx; - FileSystem *pFS = pDb->pFS; - int i; - int rc; - Freelist freelist = {0, 0, 0}; - u8 *aUsed; - Level *pLevel; - Snapshot *pWorker = pDb->pWorker; - int nBlock = pWorker->nBlock; - -#if 0 - static int nCall = 0; - nCall++; - printf("%d calls\n", nCall); -#endif - - aUsed = lsmMallocZero(pDb->pEnv, nBlock); - if( aUsed==0 ){ - /* Malloc has failed. Since this function is only called within debug - ** builds, this probably means the user is running an OOM injection test. - ** Regardless, it will not be possible to run the integrity-check at this - ** time, so assume the database is Ok and return non-zero. */ - return 1; - } - - for(pLevel=pWorker->pLevel; pLevel; pLevel=pLevel->pNext){ - int j; - checkBlocks(pFS, &pLevel->lhs, (pLevel->nRight!=0), nBlock, aUsed); - for(j=0; jnRight; j++){ - checkBlocks(pFS, &pLevel->aRhs[j], 0, nBlock, aUsed); - } - } - - /* Mark all blocks in the free-list as used */ - ctx.aUsed = aUsed; - ctx.nBlock = nBlock; - rc = lsmWalkFreelist(pDb, 0, checkFreelistCb, (void *)&ctx); - - if( rc==LSM_OK ){ - for(i=0; ipEnv, aUsed); - lsmFree(pDb->pEnv, freelist.aEntry); - - return 1; -} - -#ifndef NDEBUG -/* -** Return true if pPg happens to be the last page in segment pSeg. Or false -** otherwise. This function is only invoked as part of assert() conditions. -*/ -int lsmFsDbPageIsLast(Segment *pSeg, Page *pPg){ - if( pPg->pFS->pCompress ){ - LsmPgno iNext = 0; - int rc; - rc = fsNextPageOffset(pPg->pFS, pSeg, pPg->iPg, pPg->nCompress+6, &iNext); - return (rc!=LSM_OK || iNext==0); - } - return (pPg->iPg==pSeg->iLastPg); -} -#endif diff --git a/ext/lsm1/lsm_log.c b/ext/lsm1/lsm_log.c deleted file mode 100644 index a66e40bccd..0000000000 --- a/ext/lsm1/lsm_log.c +++ /dev/null @@ -1,1156 +0,0 @@ -/* -** 2011-08-13 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** -** This file contains the implementation of LSM database logging. Logging -** has one purpose in LSM - to make transactions durable. -** -** When data is written to an LSM database, it is initially stored in an -** in-memory tree structure. Since this structure is in volatile memory, -** if a power failure or application crash occurs it may be lost. To -** prevent loss of data in this case, each time a record is written to the -** in-memory tree an equivalent record is appended to the log on disk. -** If a power failure or application crash does occur, data can be recovered -** by reading the log. -** -** A log file consists of the following types of records representing data -** written into the database: -** -** LOG_WRITE: A key-value pair written to the database. -** LOG_DELETE: A delete key issued to the database. -** LOG_COMMIT: A transaction commit. -** -** And the following types of records for ancillary purposes.. -** -** LOG_EOF: A record indicating the end of a log file. -** LOG_PAD1: A single byte padding record. -** LOG_PAD2: An N byte padding record (N>1). -** LOG_JUMP: A pointer to another offset within the log file. -** -** Each transaction written to the log contains one or more LOG_WRITE and/or -** LOG_DELETE records, followed by a LOG_COMMIT record. The LOG_COMMIT record -** contains an 8-byte checksum based on all previous data written to the -** log file. -** -** LOG CHECKSUMS & RECOVERY -** -** Checksums are found in two types of log records: LOG_COMMIT and -** LOG_CKSUM records. In order to recover content from a log, a client -** reads each record from the start of the log, calculating a checksum as -** it does. Each time a LOG_COMMIT or LOG_CKSUM is encountered, the -** recovery process verifies that the checksum stored in the log -** matches the calculated checksum. If it does not, the recovery process -** can stop reading the log. -** -** If a recovery process reads records (other than COMMIT or CKSUM) -** consisting of at least LSM_CKSUM_MAXDATA bytes, then the next record in -** the log must be either a LOG_CKSUM or LOG_COMMIT record. If it is -** not, the recovery process also stops reading the log. -** -** To recover the log file, it must be read twice. The first time to -** determine the location of the last valid commit record. And the second -** time to load data into the in-memory tree. -** -** Todo: Surely there is a better way... -** -** LOG WRAPPING -** -** If the log file were never deleted or wrapped, it would be possible to -** read it from start to end each time is required recovery (i.e each time -** the number of database clients changes from 0 to 1). Effectively reading -** the entire history of the database each time. This would quickly become -** inefficient. Additionally, since the log file would grow without bound, -** it wastes storage space. -** -** Instead, part of each checkpoint written into the database file contains -** a log offset (and other information required to read the log starting at -** at this offset) at which to begin recovery. Offset $O. -** -** Once a checkpoint has been written and synced into the database file, it -** is guaranteed that no recovery process will need to read any data before -** offset $O of the log file. It is therefore safe to begin overwriting -** any data that occurs before offset $O. -** -** This implementation separates the log into three regions mapped into -** the log file - regions 0, 1 and 2. During recovery, regions are read -** in ascending order (i.e. 0, then 1, then 2). Each region is zero or -** more bytes in size. -** -** |---1---|..|--0--|.|--2--|.... -** -** New records are always appended to the end of region 2. -** -** Initially (when it is empty), all three regions are zero bytes in size. -** Each of them are located at the beginning of the file. As records are -** added to the log, region 2 grows, so that the log consists of a zero -** byte region 1, followed by a zero byte region 0, followed by an N byte -** region 2. After one or more checkpoints have been written to disk, -** the start point of region 2 is moved to $O. For example: -** -** A) ||.........|--2--|.... -** -** (both regions 0 and 1 are 0 bytes in size at offset 0). -** -** Eventually, the log wraps around to write new records into the start. -** At this point, region 2 is renamed to region 0. Region 0 is renamed -** to region 2. After appending a few records to the new region 2, the -** log file looks like this: -** -** B) ||--2--|...|--0--|.... -** -** (region 1 is still 0 bytes in size, located at offset 0). -** -** Any checkpoints made at this point may reduce the size of region 0. -** However, if they do not, and region 2 expands so that it is about to -** overwrite the start of region 0, then region 2 is renamed to region 1, -** and a new region 2 created at the end of the file following the existing -** region 0. -** -** C) |---1---|..|--0--|.|-2-| -** -** In this state records are appended to region 2 until checkpoints have -** contracted regions 0 AND 1 UNTil they are both zero bytes in size. They -** are then shifted to the start of the log file, leaving the system in -** the equivalent of state A above. -** -** Alternatively, state B may transition directly to state A if the size -** of region 0 is reduced to zero bytes before region 2 threatens to -** encroach upon it. -** -** LOG_PAD1 & LOG_PAD2 RECORDS -** -** PAD1 and PAD2 records may appear in a log file at any point. They allow -** a process writing the log file align the beginning of transactions with -** the beginning of disk sectors, which increases robustness. -** -** RECORD FORMATS: -** -** LOG_EOF: * A single 0x00 byte. -** -** LOG_PAD1: * A single 0x01 byte. -** -** LOG_PAD2: * A single 0x02 byte, followed by -** * The number of unused bytes (N) as a varint, -** * An N byte block of unused space. -** -** LOG_COMMIT: * A single 0x03 byte. -** * An 8-byte checksum. -** -** LOG_JUMP: * A single 0x04 byte. -** * Absolute file offset to jump to, encoded as a varint. -** -** LOG_WRITE: * A single 0x06 or 0x07 byte, -** * The number of bytes in the key, encoded as a varint, -** * The number of bytes in the value, encoded as a varint, -** * If the first byte was 0x07, an 8 byte checksum. -** * The key data, -** * The value data. -** -** LOG_DELETE: * A single 0x08 or 0x09 byte, -** * The number of bytes in the key, encoded as a varint, -** * If the first byte was 0x09, an 8 byte checksum. -** * The key data. -** -** Varints are as described in lsm_varint.c (SQLite 4 format). -** -** CHECKSUMS: -** -** The checksum is calculated using two 32-bit unsigned integers, s0 and -** s1. The initial value for both is 42. It is updated each time a record -** is written into the log file by treating the encoded (binary) record as -** an array of 32-bit little-endian integers. Then, if x[] is the integer -** array, updating the checksum accumulators as follows: -** -** for i from 0 to n-1 step 2: -** s0 += x[i] + s1; -** s1 += x[i+1] + s0; -** endfor -** -** If the record is not an even multiple of 8-bytes in size it is padded -** with zeroes to make it so before the checksum is updated. -** -** The checksum stored in a COMMIT, WRITE or DELETE is based on all bytes -** up to the start of the 8-byte checksum itself, including the COMMIT, -** WRITE or DELETE fields that appear before the checksum in the record. -** -** VARINT FORMAT -** -** See lsm_varint.c. -*/ - -#ifndef _LSM_INT_H -# include "lsmInt.h" -#endif - -/* Log record types */ -#define LSM_LOG_EOF 0x00 -#define LSM_LOG_PAD1 0x01 -#define LSM_LOG_PAD2 0x02 -#define LSM_LOG_COMMIT 0x03 -#define LSM_LOG_JUMP 0x04 - -#define LSM_LOG_WRITE 0x06 -#define LSM_LOG_WRITE_CKSUM 0x07 - -#define LSM_LOG_DELETE 0x08 -#define LSM_LOG_DELETE_CKSUM 0x09 - -#define LSM_LOG_DRANGE 0x0A -#define LSM_LOG_DRANGE_CKSUM 0x0B - -/* Require a checksum every 32KB. */ -#define LSM_CKSUM_MAXDATA (32*1024) - -/* Do not wrap a log file smaller than this in bytes. */ -#define LSM_MIN_LOGWRAP (128*1024) - -/* -** szSector: -** Commit records must be aligned to end on szSector boundaries. If -** the safety-mode is set to NORMAL or OFF, this value is 1. Otherwise, -** if the safety-mode is set to FULL, it is the size of the file-system -** sectors as reported by lsmFsSectorSize(). -*/ -struct LogWriter { - u32 cksum0; /* Checksum 0 at offset iOff */ - u32 cksum1; /* Checksum 1 at offset iOff */ - int iCksumBuf; /* Bytes of buf that have been checksummed */ - i64 iOff; /* Offset at start of buffer buf */ - int szSector; /* Sector size for this transaction */ - LogRegion jump; /* Avoid writing to this region */ - i64 iRegion1End; /* End of first region written by trans */ - i64 iRegion2Start; /* Start of second regions written by trans */ - LsmString buf; /* Buffer containing data not yet written */ -}; - -/* -** Return the result of interpreting the first 4 bytes in buffer aIn as -** a 32-bit unsigned little-endian integer. -*/ -static u32 getU32le(u8 *aIn){ - return ((u32)aIn[3] << 24) - + ((u32)aIn[2] << 16) - + ((u32)aIn[1] << 8) - + ((u32)aIn[0]); -} - - -/* -** This function is the same as logCksum(), except that pointer "a" need -** not be aligned to an 8-byte boundary or padded with zero bytes. This -** version is slower, but sometimes more convenient to use. -*/ -static void logCksumUnaligned( - char *z, /* Input buffer */ - int n, /* Size of input buffer in bytes */ - u32 *pCksum0, /* IN/OUT: Checksum value 1 */ - u32 *pCksum1 /* IN/OUT: Checksum value 2 */ -){ - u8 *a = (u8 *)z; - u32 cksum0 = *pCksum0; - u32 cksum1 = *pCksum1; - int nIn = (n/8) * 8; - int i; - - assert( n>0 ); - for(i=0; inIn ); - memcpy(aBuf, &a[nIn], n-nIn); - cksum0 += getU32le(aBuf) + cksum1; - cksum1 += getU32le(&aBuf[4]) + cksum0; - } - - *pCksum0 = cksum0; - *pCksum1 = cksum1; -} - -/* -** Update pLog->cksum0 and pLog->cksum1 so that the first nBuf bytes in the -** write buffer (pLog->buf) are included in the checksum. -*/ -static void logUpdateCksum(LogWriter *pLog, int nBuf){ - assert( (pLog->iCksumBuf % 8)==0 ); - assert( pLog->iCksumBuf<=nBuf ); - assert( (nBuf % 8)==0 || nBuf==pLog->buf.n ); - if( nBuf>pLog->iCksumBuf ){ - logCksumUnaligned( - &pLog->buf.z[pLog->iCksumBuf], nBuf-pLog->iCksumBuf, - &pLog->cksum0, &pLog->cksum1 - ); - } - pLog->iCksumBuf = nBuf; -} - -static i64 firstByteOnSector(LogWriter *pLog, i64 iOff){ - return (iOff / pLog->szSector) * pLog->szSector; -} -static i64 lastByteOnSector(LogWriter *pLog, i64 iOff){ - return firstByteOnSector(pLog, iOff) + pLog->szSector - 1; -} - -/* -** If possible, reclaim log file space. Log file space is reclaimed after -** a snapshot that points to the same data in the database file is synced -** into the db header. -*/ -static int logReclaimSpace(lsm_db *pDb){ - int rc; - int iMeta; - int bRotrans; /* True if there exists some ro-trans */ - - /* Test if there exists some other connection with a read-only transaction - ** open. If there does, then log file space may not be reclaimed. */ - rc = lsmDetectRoTrans(pDb, &bRotrans); - if( rc!=LSM_OK || bRotrans ) return rc; - - iMeta = (int)pDb->pShmhdr->iMetaPage; - if( iMeta==1 || iMeta==2 ){ - DbLog *pLog = &pDb->treehdr.log; - i64 iSyncedId; - - /* Read the snapshot-id of the snapshot stored on meta-page iMeta. Note - ** that in theory, the value read is untrustworthy (due to a race - ** condition - see comments above lsmFsReadSyncedId()). So it is only - ** ever used to conclude that no log space can be reclaimed. If it seems - ** to indicate that it may be possible to reclaim log space, a - ** second call to lsmCheckpointSynced() (which does return trustworthy - ** values) is made below to confirm. */ - rc = lsmFsReadSyncedId(pDb, iMeta, &iSyncedId); - - if( rc==LSM_OK && pLog->iSnapshotId!=iSyncedId ){ - i64 iSnapshotId = 0; - i64 iOff = 0; - rc = lsmCheckpointSynced(pDb, &iSnapshotId, &iOff, 0); - if( rc==LSM_OK && pLog->iSnapshotIdaRegion[iRegion]; - if( iOff>=p->iStart && iOff<=p->iEnd ) break; - p->iStart = 0; - p->iEnd = 0; - } - assert( iRegion<3 ); - pLog->aRegion[iRegion].iStart = iOff; - pLog->iSnapshotId = iSnapshotId; - } - } - } - return rc; -} - -/* -** This function is called when a write-transaction is first opened. It -** is assumed that the caller is holding the client-mutex when it is -** called. -** -** Before returning, this function allocates the LogWriter object that -** will be used to write to the log file during the write transaction. -** LSM_OK is returned if no error occurs, otherwise an LSM error code. -*/ -int lsmLogBegin(lsm_db *pDb){ - int rc = LSM_OK; - LogWriter *pNew; - LogRegion *aReg; - - if( pDb->bUseLog==0 ) return LSM_OK; - - /* If the log file has not yet been opened, open it now. Also allocate - ** the LogWriter structure, if it has not already been allocated. */ - rc = lsmFsOpenLog(pDb, 0); - if( pDb->pLogWriter==0 ){ - pNew = lsmMallocZeroRc(pDb->pEnv, sizeof(LogWriter), &rc); - if( pNew ){ - lsmStringInit(&pNew->buf, pDb->pEnv); - rc = lsmStringExtend(&pNew->buf, 2); - } - pDb->pLogWriter = pNew; - }else{ - pNew = pDb->pLogWriter; - assert( (u8 *)(&pNew[1])==(u8 *)(&((&pNew->buf)[1])) ); - memset(pNew, 0, ((u8 *)&pNew->buf) - (u8 *)pNew); - pNew->buf.n = 0; - } - - if( rc==LSM_OK ){ - /* The following call detects whether or not a new snapshot has been - ** synced into the database file. If so, it updates the contents of - ** the pDb->treehdr.log structure to reclaim any space in the log - ** file that is no longer required. - ** - ** TODO: Calling this every transaction is overkill. And since the - ** call has to read and checksum a snapshot from the database file, - ** it is expensive. It would be better to figure out a way so that - ** this is only called occasionally - say for every 32KB written to - ** the log file. - */ - rc = logReclaimSpace(pDb); - } - if( rc!=LSM_OK ){ - lsmLogClose(pDb); - return rc; - } - - /* Set the effective sector-size for this transaction. Sectors are assumed - ** to be one byte in size if the safety-mode is OFF or NORMAL, or as - ** reported by lsmFsSectorSize if it is FULL. */ - if( pDb->eSafety==LSM_SAFETY_FULL ){ - pNew->szSector = lsmFsSectorSize(pDb->pFS); - assert( pNew->szSector>0 ); - }else{ - pNew->szSector = 1; - } - - /* There are now three scenarios: - ** - ** 1) Regions 0 and 1 are both zero bytes in size and region 2 begins - ** at a file offset greater than LSM_MIN_LOGWRAP. In this case, wrap - ** around to the start and write data into the start of the log file. - ** - ** 2) Region 1 is zero bytes in size and region 2 occurs earlier in the - ** file than region 0. In this case, append data to region 2, but - ** remember to jump over region 1 if required. - ** - ** 3) Region 2 is the last in the file. Append to it. - */ - aReg = &pDb->treehdr.log.aRegion[0]; - - assert( aReg[0].iEnd==0 || aReg[0].iEnd>aReg[0].iStart ); - assert( aReg[1].iEnd==0 || aReg[1].iEnd>aReg[1].iStart ); - - pNew->cksum0 = pDb->treehdr.log.cksum0; - pNew->cksum1 = pDb->treehdr.log.cksum1; - - if( aReg[0].iEnd==0 && aReg[1].iEnd==0 && aReg[2].iStart>=LSM_MIN_LOGWRAP ){ - /* Case 1. Wrap around to the start of the file. Write an LSM_LOG_JUMP - ** into the log file in this case. Pad it out to 8 bytes using a PAD2 - ** record so that the checksums can be updated immediately. */ - u8 aJump[] = { - LSM_LOG_PAD2, 0x04, 0x00, 0x00, 0x00, 0x00, LSM_LOG_JUMP, 0x00 - }; - - lsmStringBinAppend(&pNew->buf, aJump, sizeof(aJump)); - logUpdateCksum(pNew, pNew->buf.n); - rc = lsmFsWriteLog(pDb->pFS, aReg[2].iEnd, &pNew->buf); - pNew->iCksumBuf = pNew->buf.n = 0; - - aReg[2].iEnd += 8; - pNew->jump = aReg[0] = aReg[2]; - aReg[2].iStart = aReg[2].iEnd = 0; - }else if( aReg[1].iEnd==0 && aReg[2].iEndiOff = aReg[2].iEnd; - pNew->jump = aReg[0]; - }else{ - /* Case 3. */ - assert( aReg[2].iStart>=aReg[0].iEnd && aReg[2].iStart>=aReg[1].iEnd ); - pNew->iOff = aReg[2].iEnd; - } - - if( pNew->jump.iStart ){ - i64 iRound; - assert( pNew->jump.iStart>pNew->iOff ); - - iRound = firstByteOnSector(pNew, pNew->jump.iStart); - if( iRound>pNew->iOff ) pNew->jump.iStart = iRound; - pNew->jump.iEnd = lastByteOnSector(pNew, pNew->jump.iEnd); - } - - assert( pDb->pLogWriter==pNew ); - return rc; -} - -/* -** This function is called when a write-transaction is being closed. -** Parameter bCommit is true if the transaction is being committed, -** or false otherwise. The caller must hold the client-mutex to call -** this function. -** -** A call to this function deletes the LogWriter object allocated by -** lsmLogBegin(). If the transaction is being committed, the shared state -** in *pLog is updated before returning. -*/ -void lsmLogEnd(lsm_db *pDb, int bCommit){ - DbLog *pLog; - LogWriter *p; - p = pDb->pLogWriter; - - if( p==0 ) return; - pLog = &pDb->treehdr.log; - - if( bCommit ){ - pLog->aRegion[2].iEnd = p->iOff; - pLog->cksum0 = p->cksum0; - pLog->cksum1 = p->cksum1; - if( p->iRegion1End ){ - /* This happens when the transaction had to jump over some other - ** part of the log. */ - assert( pLog->aRegion[1].iEnd==0 ); - assert( pLog->aRegion[2].iStartiRegion1End ); - pLog->aRegion[1].iStart = pLog->aRegion[2].iStart; - pLog->aRegion[1].iEnd = p->iRegion1End; - pLog->aRegion[2].iStart = p->iRegion2Start; - } - } -} - -static int jumpIfRequired( - lsm_db *pDb, - LogWriter *pLog, - int nReq, - int *pbJump -){ - /* Determine if it is necessary to add an LSM_LOG_JUMP to jump over the - ** jump region before writing the LSM_LOG_WRITE or DELETE record. This - ** is necessary if there is insufficient room between the current offset - ** and the jump region to fit the new WRITE/DELETE record and the largest - ** possible JUMP record with up to 7 bytes of padding (a total of 17 - ** bytes). */ - if( (pLog->jump.iStart > (pLog->iOff + pLog->buf.n)) - && (pLog->jump.iStart < (pLog->iOff + pLog->buf.n + (nReq + 17))) - ){ - int rc; /* Return code */ - i64 iJump; /* Offset to jump to */ - u8 aJump[10]; /* Encoded jump record */ - int nJump; /* Valid bytes in aJump[] */ - int nPad; /* Bytes of padding required */ - - /* Serialize the JUMP record */ - iJump = pLog->jump.iEnd+1; - aJump[0] = LSM_LOG_JUMP; - nJump = 1 + lsmVarintPut64(&aJump[1], iJump); - - /* Adding padding to the contents of the buffer so that it will be a - ** multiple of 8 bytes in size after the JUMP record is appended. This - ** is not strictly required, it just makes the keeping the running - ** checksum up to date in this file a little simpler. */ - nPad = (pLog->buf.n + nJump) % 8; - if( nPad ){ - u8 aPad[7] = {0,0,0,0,0,0,0}; - nPad = 8-nPad; - if( nPad==1 ){ - aPad[0] = LSM_LOG_PAD1; - }else{ - aPad[0] = LSM_LOG_PAD2; - aPad[1] = (u8)(nPad-2); - } - rc = lsmStringBinAppend(&pLog->buf, aPad, nPad); - if( rc!=LSM_OK ) return rc; - } - - /* Append the JUMP record to the buffer. Then flush the buffer to disk - ** and update the checksums. The next write to the log file (assuming - ** there is no transaction rollback) will be to offset iJump (just past - ** the jump region). */ - rc = lsmStringBinAppend(&pLog->buf, aJump, nJump); - if( rc!=LSM_OK ) return rc; - assert( (pLog->buf.n % 8)==0 ); - rc = lsmFsWriteLog(pDb->pFS, pLog->iOff, &pLog->buf); - if( rc!=LSM_OK ) return rc; - logUpdateCksum(pLog, pLog->buf.n); - pLog->iRegion1End = (pLog->iOff + pLog->buf.n); - pLog->iRegion2Start = iJump; - pLog->iOff = iJump; - pLog->iCksumBuf = pLog->buf.n = 0; - if( pbJump ) *pbJump = 1; - } - - return LSM_OK; -} - -static int logCksumAndFlush(lsm_db *pDb){ - int rc; /* Return code */ - LogWriter *pLog = pDb->pLogWriter; - - /* Calculate the checksum value. Append it to the buffer. */ - logUpdateCksum(pLog, pLog->buf.n); - lsmPutU32((u8 *)&pLog->buf.z[pLog->buf.n], pLog->cksum0); - pLog->buf.n += 4; - lsmPutU32((u8 *)&pLog->buf.z[pLog->buf.n], pLog->cksum1); - pLog->buf.n += 4; - - /* Write the contents of the buffer to disk. */ - rc = lsmFsWriteLog(pDb->pFS, pLog->iOff, &pLog->buf); - pLog->iOff += pLog->buf.n; - pLog->iCksumBuf = pLog->buf.n = 0; - - return rc; -} - -/* -** Write the contents of the log-buffer to disk. Then write either a CKSUM -** or COMMIT record, depending on the value of parameter eType. -*/ -static int logFlush(lsm_db *pDb, int eType){ - int rc; - int nReq; - LogWriter *pLog = pDb->pLogWriter; - - assert( eType==LSM_LOG_COMMIT ); - assert( pLog ); - - /* Commit record is always 9 bytes in size. */ - nReq = 9; - if( eType==LSM_LOG_COMMIT && pLog->szSector>1 ) nReq += pLog->szSector + 17; - rc = jumpIfRequired(pDb, pLog, nReq, 0); - - /* If this is a COMMIT, add padding to the log so that the COMMIT record - ** is aligned against the end of a disk sector. In other words, add padding - ** so that the first byte following the COMMIT record lies on a different - ** sector. */ - if( eType==LSM_LOG_COMMIT && pLog->szSector>1 ){ - int nPad; /* Bytes of padding to add */ - - /* Determine the value of nPad. */ - nPad = ((pLog->iOff + pLog->buf.n + 9) % pLog->szSector); - if( nPad ) nPad = pLog->szSector - nPad; - rc = lsmStringExtend(&pLog->buf, nPad); - if( rc!=LSM_OK ) return rc; - - while( nPad ){ - if( nPad==1 ){ - pLog->buf.z[pLog->buf.n++] = LSM_LOG_PAD1; - nPad = 0; - }else{ - int n = LSM_MIN(200, nPad-2); - pLog->buf.z[pLog->buf.n++] = LSM_LOG_PAD2; - pLog->buf.z[pLog->buf.n++] = (char)n; - nPad -= 2; - memset(&pLog->buf.z[pLog->buf.n], 0x2B, n); - pLog->buf.n += n; - nPad -= n; - } - } - } - - /* Make sure there is room in the log-buffer to add the CKSUM or COMMIT - ** record. Then add the first byte of it. */ - rc = lsmStringExtend(&pLog->buf, 9); - if( rc!=LSM_OK ) return rc; - pLog->buf.z[pLog->buf.n++] = (char)eType; - memset(&pLog->buf.z[pLog->buf.n], 0, 8); - - rc = logCksumAndFlush(pDb); - - /* If this is a commit and synchronous=full, sync the log to disk. */ - if( rc==LSM_OK && eType==LSM_LOG_COMMIT && pDb->eSafety==LSM_SAFETY_FULL ){ - rc = lsmFsSyncLog(pDb->pFS); - } - return rc; -} - -/* -** Append an LSM_LOG_WRITE (if nVal>=0) or LSM_LOG_DELETE (if nVal<0) -** record to the database log. -*/ -int lsmLogWrite( - lsm_db *pDb, /* Database handle */ - int eType, - void *pKey, int nKey, /* Database key to write to log */ - void *pVal, int nVal /* Database value (or nVal<0) to write */ -){ - int rc = LSM_OK; - LogWriter *pLog; /* Log object to write to */ - int nReq; /* Bytes of space required in log */ - int bCksum = 0; /* True to embed a checksum in this record */ - - assert( eType==LSM_WRITE || eType==LSM_DELETE || eType==LSM_DRANGE ); - assert( LSM_LOG_WRITE==LSM_WRITE ); - assert( LSM_LOG_DELETE==LSM_DELETE ); - assert( LSM_LOG_DRANGE==LSM_DRANGE ); - assert( (eType==LSM_LOG_DELETE)==(nVal<0) ); - - if( pDb->bUseLog==0 ) return LSM_OK; - pLog = pDb->pLogWriter; - - /* Determine how many bytes of space are required, assuming that a checksum - ** will be embedded in this record (even though it may not be). */ - nReq = 1 + lsmVarintLen32(nKey) + 8 + nKey; - if( eType!=LSM_LOG_DELETE ) nReq += lsmVarintLen32(nVal) + nVal; - - /* Jump over the jump region if required. Set bCksum to true to tell the - ** code below to include a checksum in the record if either (a) writing - ** this record would mean that more than LSM_CKSUM_MAXDATA bytes of data - ** have been written to the log since the last checksum, or (b) the jump - ** is taken. */ - rc = jumpIfRequired(pDb, pLog, nReq, &bCksum); - if( (pLog->buf.n+nReq) > LSM_CKSUM_MAXDATA ) bCksum = 1; - - if( rc==LSM_OK ){ - rc = lsmStringExtend(&pLog->buf, nReq); - } - if( rc==LSM_OK ){ - u8 *a = (u8 *)&pLog->buf.z[pLog->buf.n]; - - /* Write the record header - the type byte followed by either 1 (for - ** DELETE) or 2 (for WRITE) varints. */ - assert( LSM_LOG_WRITE_CKSUM == (LSM_LOG_WRITE | 0x0001) ); - assert( LSM_LOG_DELETE_CKSUM == (LSM_LOG_DELETE | 0x0001) ); - assert( LSM_LOG_DRANGE_CKSUM == (LSM_LOG_DRANGE | 0x0001) ); - *(a++) = (u8)eType | (u8)bCksum; - a += lsmVarintPut32(a, nKey); - if( eType!=LSM_LOG_DELETE ) a += lsmVarintPut32(a, nVal); - - if( bCksum ){ - pLog->buf.n = (a - (u8 *)pLog->buf.z); - rc = logCksumAndFlush(pDb); - a = (u8 *)&pLog->buf.z[pLog->buf.n]; - } - - memcpy(a, pKey, nKey); - a += nKey; - if( eType!=LSM_LOG_DELETE ){ - memcpy(a, pVal, nVal); - a += nVal; - } - pLog->buf.n = a - (u8 *)pLog->buf.z; - assert( pLog->buf.n<=pLog->buf.nAlloc ); - } - - return rc; -} - -/* -** Append an LSM_LOG_COMMIT record to the database log. -*/ -int lsmLogCommit(lsm_db *pDb){ - if( pDb->bUseLog==0 ) return LSM_OK; - return logFlush(pDb, LSM_LOG_COMMIT); -} - -/* -** Store the current offset and other checksum related information in the -** structure *pMark. Later, *pMark can be passed to lsmLogSeek() to "rewind" -** the LogWriter object to the current log file offset. This is used when -** rolling back savepoint transactions. -*/ -void lsmLogTell( - lsm_db *pDb, /* Database handle */ - LogMark *pMark /* Populate this object with current offset */ -){ - LogWriter *pLog; - int nCksum; - - if( pDb->bUseLog==0 ) return; - pLog = pDb->pLogWriter; - nCksum = pLog->buf.n & 0xFFFFFFF8; - logUpdateCksum(pLog, nCksum); - assert( pLog->iCksumBuf==nCksum ); - pMark->nBuf = pLog->buf.n - nCksum; - memcpy(pMark->aBuf, &pLog->buf.z[nCksum], pMark->nBuf); - - pMark->iOff = pLog->iOff + pLog->buf.n; - pMark->cksum0 = pLog->cksum0; - pMark->cksum1 = pLog->cksum1; -} - -/* -** Seek (rewind) back to the log file offset stored by an ealier call to -** lsmLogTell() in *pMark. -*/ -void lsmLogSeek( - lsm_db *pDb, /* Database handle */ - LogMark *pMark /* Object containing log offset to seek to */ -){ - LogWriter *pLog; - - if( pDb->bUseLog==0 ) return; - pLog = pDb->pLogWriter; - - assert( pMark->iOff<=pLog->iOff+pLog->buf.n ); - if( (pMark->iOff & 0xFFFFFFF8)>=pLog->iOff ){ - pLog->buf.n = (int)(pMark->iOff - pLog->iOff); - pLog->iCksumBuf = (pLog->buf.n & 0xFFFFFFF8); - }else{ - pLog->buf.n = pMark->nBuf; - memcpy(pLog->buf.z, pMark->aBuf, pMark->nBuf); - pLog->iCksumBuf = 0; - pLog->iOff = pMark->iOff - pMark->nBuf; - } - pLog->cksum0 = pMark->cksum0; - pLog->cksum1 = pMark->cksum1; - - if( pMark->iOff > pLog->iRegion1End ) pLog->iRegion1End = 0; - if( pMark->iOff > pLog->iRegion2Start ) pLog->iRegion2Start = 0; -} - -/* -** This function does the work for an lsm_info(LOG_STRUCTURE) request. -*/ -int lsmInfoLogStructure(lsm_db *pDb, char **pzVal){ - int rc = LSM_OK; - char *zVal = 0; - - /* If there is no read or write transaction open, read the latest - ** tree-header from shared-memory to report on. If necessary, update - ** it based on the contents of the database header. - ** - ** No locks are taken here - these are passive read operations only. - */ - if( pDb->pCsr==0 && pDb->nTransOpen==0 ){ - rc = lsmTreeLoadHeader(pDb, 0); - if( rc==LSM_OK ) rc = logReclaimSpace(pDb); - } - - if( rc==LSM_OK ){ - DbLog *pLog = &pDb->treehdr.log; - zVal = lsmMallocPrintf(pDb->pEnv, - "%d %d %d %d %d %d", - (int)pLog->aRegion[0].iStart, (int)pLog->aRegion[0].iEnd, - (int)pLog->aRegion[1].iStart, (int)pLog->aRegion[1].iEnd, - (int)pLog->aRegion[2].iStart, (int)pLog->aRegion[2].iEnd - ); - if( !zVal ) rc = LSM_NOMEM_BKPT; - } - - *pzVal = zVal; - return rc; -} - -/************************************************************************* -** Begin code for log recovery. -*/ - -typedef struct LogReader LogReader; -struct LogReader { - FileSystem *pFS; /* File system to read from */ - i64 iOff; /* File offset at end of buf content */ - int iBuf; /* Current read offset in buf */ - LsmString buf; /* Buffer containing file content */ - - int iCksumBuf; /* Offset in buf corresponding to cksum[01] */ - u32 cksum0; /* Checksum 0 at offset iCksumBuf */ - u32 cksum1; /* Checksum 1 at offset iCksumBuf */ -}; - -static void logReaderBlob( - LogReader *p, /* Log reader object */ - LsmString *pBuf, /* Dynamic storage, if required */ - int nBlob, /* Number of bytes to read */ - u8 **ppBlob, /* OUT: Pointer to blob read */ - int *pRc /* IN/OUT: Error code */ -){ - static const int LOG_READ_SIZE = 512; - int rc = *pRc; /* Return code */ - int nReq = nBlob; /* Bytes required */ - - while( rc==LSM_OK && nReq>0 ){ - int nAvail; /* Bytes of data available in p->buf */ - if( p->buf.n==p->iBuf ){ - int nCksum; /* Total bytes requiring checksum */ - int nCarry = 0; /* Total bytes requiring checksum */ - - nCksum = p->iBuf - p->iCksumBuf; - if( nCksum>0 ){ - nCarry = nCksum % 8; - nCksum = ((nCksum / 8) * 8); - if( nCksum>0 ){ - logCksumUnaligned( - &p->buf.z[p->iCksumBuf], nCksum, &p->cksum0, &p->cksum1 - ); - } - } - if( nCarry>0 ) memcpy(p->buf.z, &p->buf.z[p->iBuf-nCarry], nCarry); - p->buf.n = nCarry; - p->iBuf = nCarry; - - rc = lsmFsReadLog(p->pFS, p->iOff, LOG_READ_SIZE, &p->buf); - if( rc!=LSM_OK ) break; - p->iCksumBuf = 0; - p->iOff += LOG_READ_SIZE; - } - - nAvail = p->buf.n - p->iBuf; - if( ppBlob && nReq==nBlob && nBlob<=nAvail ){ - *ppBlob = (u8 *)&p->buf.z[p->iBuf]; - p->iBuf += nBlob; - nReq = 0; - }else{ - int nCopy = LSM_MIN(nAvail, nReq); - if( nBlob==nReq ){ - pBuf->n = 0; - } - rc = lsmStringBinAppend(pBuf, (u8 *)&p->buf.z[p->iBuf], nCopy); - nReq -= nCopy; - p->iBuf += nCopy; - if( nReq==0 && ppBlob ){ - *ppBlob = (u8*)pBuf->z; - } - } - } - - *pRc = rc; -} - -static void logReaderVarint( - LogReader *p, - LsmString *pBuf, - int *piVal, /* OUT: Value read from log */ - int *pRc /* IN/OUT: Error code */ -){ - if( *pRc==LSM_OK ){ - u8 *aVarint; - if( p->buf.n==p->iBuf ){ - logReaderBlob(p, 0, 10, &aVarint, pRc); - if( LSM_OK==*pRc ) p->iBuf -= (10 - lsmVarintGet32(aVarint, piVal)); - }else{ - logReaderBlob(p, pBuf, lsmVarintSize(p->buf.z[p->iBuf]), &aVarint, pRc); - if( LSM_OK==*pRc ) lsmVarintGet32(aVarint, piVal); - } - } -} - -static void logReaderByte(LogReader *p, u8 *pByte, int *pRc){ - u8 *pPtr = 0; - logReaderBlob(p, 0, 1, &pPtr, pRc); - if( pPtr ) *pByte = *pPtr; -} - -static void logReaderCksum(LogReader *p, LsmString *pBuf, int *pbEof, int *pRc){ - if( *pRc==LSM_OK ){ - u8 *pPtr = 0; - u32 cksum0, cksum1; - int nCksum = p->iBuf - p->iCksumBuf; - - /* Update in-memory (expected) checksums */ - assert( nCksum>=0 ); - logCksumUnaligned(&p->buf.z[p->iCksumBuf], nCksum, &p->cksum0, &p->cksum1); - p->iCksumBuf = p->iBuf + 8; - logReaderBlob(p, pBuf, 8, &pPtr, pRc); - assert( pPtr || *pRc ); - - /* Read the checksums from the log file. Set *pbEof if they do not match. */ - if( pPtr ){ - cksum0 = lsmGetU32(pPtr); - cksum1 = lsmGetU32(&pPtr[4]); - *pbEof = (cksum0!=p->cksum0 || cksum1!=p->cksum1); - p->iCksumBuf = p->iBuf; - } - } -} - -static void logReaderInit( - lsm_db *pDb, /* Database handle */ - DbLog *pLog, /* Log object associated with pDb */ - int bInitBuf, /* True if p->buf is uninitialized */ - LogReader *p /* Initialize this LogReader object */ -){ - p->pFS = pDb->pFS; - p->iOff = pLog->aRegion[2].iStart; - p->cksum0 = pLog->cksum0; - p->cksum1 = pLog->cksum1; - if( bInitBuf ){ lsmStringInit(&p->buf, pDb->pEnv); } - p->buf.n = 0; - p->iCksumBuf = 0; - p->iBuf = 0; -} - -/* -** This function is called after reading the header of a LOG_DELETE or -** LOG_WRITE record. Parameter nByte is the total size of the key and -** value that follow the header just read. Return true if the size and -** position of the record indicate that it should contain a checksum. -*/ -static int logRequireCksum(LogReader *p, int nByte){ - return ((p->iBuf + nByte - p->iCksumBuf) > LSM_CKSUM_MAXDATA); -} - -/* -** Recover the contents of the log file. -*/ -int lsmLogRecover(lsm_db *pDb){ - LsmString buf1; /* Key buffer */ - LsmString buf2; /* Value buffer */ - LogReader reader; /* Log reader object */ - int rc = LSM_OK; /* Return code */ - int nCommit = 0; /* Number of transactions to recover */ - int iPass; - int nJump = 0; /* Number of LSM_LOG_JUMP records in pass 0 */ - DbLog *pLog; - int bOpen; - - rc = lsmFsOpenLog(pDb, &bOpen); - if( rc!=LSM_OK ) return rc; - - rc = lsmTreeInit(pDb); - if( rc!=LSM_OK ) return rc; - - pLog = &pDb->treehdr.log; - lsmCheckpointLogoffset(pDb->pShmhdr->aSnap2, pLog); - - logReaderInit(pDb, pLog, 1, &reader); - lsmStringInit(&buf1, pDb->pEnv); - lsmStringInit(&buf2, pDb->pEnv); - - /* The outer for() loop runs at most twice. The first iteration is to - ** count the number of committed transactions in the log. The second - ** iterates through those transactions and updates the in-memory tree - ** structure with their contents. */ - if( bOpen ){ - for(iPass=0; iPass<2 && rc==LSM_OK; iPass++){ - int bEof = 0; - - while( rc==LSM_OK && !bEof ){ - u8 eType = 0; - logReaderByte(&reader, &eType, &rc); - - switch( eType ){ - case LSM_LOG_PAD1: - break; - - case LSM_LOG_PAD2: { - int nPad; - logReaderVarint(&reader, &buf1, &nPad, &rc); - logReaderBlob(&reader, &buf1, nPad, 0, &rc); - break; - } - - case LSM_LOG_DRANGE: - case LSM_LOG_DRANGE_CKSUM: - case LSM_LOG_WRITE: - case LSM_LOG_WRITE_CKSUM: { - int nKey; - int nVal; - u8 *aVal; - logReaderVarint(&reader, &buf1, &nKey, &rc); - logReaderVarint(&reader, &buf2, &nVal, &rc); - - if( eType==LSM_LOG_WRITE_CKSUM || eType==LSM_LOG_DRANGE_CKSUM ){ - logReaderCksum(&reader, &buf1, &bEof, &rc); - }else{ - bEof = logRequireCksum(&reader, nKey+nVal); - } - if( bEof ) break; - - logReaderBlob(&reader, &buf1, nKey, 0, &rc); - logReaderBlob(&reader, &buf2, nVal, &aVal, &rc); - if( iPass==1 && rc==LSM_OK ){ - if( eType==LSM_LOG_WRITE || eType==LSM_LOG_WRITE_CKSUM ){ - rc = lsmTreeInsert(pDb, (u8 *)buf1.z, nKey, aVal, nVal); - }else{ - rc = lsmTreeDelete(pDb, (u8 *)buf1.z, nKey, aVal, nVal); - } - } - break; - } - - case LSM_LOG_DELETE: - case LSM_LOG_DELETE_CKSUM: { - int nKey; u8 *aKey; - logReaderVarint(&reader, &buf1, &nKey, &rc); - - if( eType==LSM_LOG_DELETE_CKSUM ){ - logReaderCksum(&reader, &buf1, &bEof, &rc); - }else{ - bEof = logRequireCksum(&reader, nKey); - } - if( bEof ) break; - - logReaderBlob(&reader, &buf1, nKey, &aKey, &rc); - if( iPass==1 && rc==LSM_OK ){ - rc = lsmTreeInsert(pDb, aKey, nKey, NULL, -1); - } - break; - } - - case LSM_LOG_COMMIT: - logReaderCksum(&reader, &buf1, &bEof, &rc); - if( bEof==0 ){ - nCommit++; - assert( nCommit>0 || iPass==1 ); - if( nCommit==0 ) bEof = 1; - } - break; - - case LSM_LOG_JUMP: { - int iOff = 0; - logReaderVarint(&reader, &buf1, &iOff, &rc); - if( rc==LSM_OK ){ - if( iPass==1 ){ - if( pLog->aRegion[2].iStart==0 ){ - assert( pLog->aRegion[1].iStart==0 ); - pLog->aRegion[1].iEnd = reader.iOff; - }else{ - assert( pLog->aRegion[0].iStart==0 ); - pLog->aRegion[0].iStart = pLog->aRegion[2].iStart; - pLog->aRegion[0].iEnd = reader.iOff-reader.buf.n+reader.iBuf; - } - pLog->aRegion[2].iStart = iOff; - }else{ - if( (nJump++)==2 ){ - bEof = 1; - } - } - - reader.iOff = iOff; - reader.buf.n = reader.iBuf; - } - break; - } - - default: - /* Including LSM_LOG_EOF */ - bEof = 1; - break; - } - } - - if( rc==LSM_OK && iPass==0 ){ - if( nCommit==0 ){ - if( pLog->aRegion[2].iStart==0 ){ - iPass = 1; - }else{ - pLog->aRegion[2].iStart = 0; - iPass = -1; - lsmCheckpointZeroLogoffset(pDb); - } - } - logReaderInit(pDb, pLog, 0, &reader); - nCommit = nCommit * -1; - } - } - } - - /* Initialize DbLog object */ - if( rc==LSM_OK ){ - pLog->aRegion[2].iEnd = reader.iOff - reader.buf.n + reader.iBuf; - pLog->cksum0 = reader.cksum0; - pLog->cksum1 = reader.cksum1; - } - - if( rc==LSM_OK ){ - rc = lsmFinishRecovery(pDb); - }else{ - lsmFinishRecovery(pDb); - } - - if( pDb->bRoTrans ){ - lsmFsCloseLog(pDb); - } - - lsmStringClear(&buf1); - lsmStringClear(&buf2); - lsmStringClear(&reader.buf); - return rc; -} - -void lsmLogClose(lsm_db *db){ - if( db->pLogWriter ){ - lsmFree(db->pEnv, db->pLogWriter->buf.z); - lsmFree(db->pEnv, db->pLogWriter); - db->pLogWriter = 0; - } -} diff --git a/ext/lsm1/lsm_main.c b/ext/lsm1/lsm_main.c deleted file mode 100644 index a9c48e004e..0000000000 --- a/ext/lsm1/lsm_main.c +++ /dev/null @@ -1,1008 +0,0 @@ -/* -** 2011-08-18 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** -** The main interface to the LSM module. -*/ -#include "lsmInt.h" - - -#ifdef LSM_DEBUG -/* -** This function returns a copy of its only argument. -** -** When the library is built with LSM_DEBUG defined, this function is called -** whenever an error code is generated (not propagated - generated). So -** if the library is mysteriously returning (say) LSM_IOERR, a breakpoint -** may be set in this function to determine why. -*/ -int lsmErrorBkpt(int rc){ - /* Set breakpoint here! */ - return rc; -} - -/* -** This function contains various assert() statements that test that the -** lsm_db structure passed as an argument is internally consistent. -*/ -static void assert_db_state(lsm_db *pDb){ - - /* If there is at least one cursor or a write transaction open, the database - ** handle must be holding a pointer to a client snapshot. And the reverse - ** - if there are no open cursors and no write transactions then there must - ** not be a client snapshot. */ - - assert( (pDb->pCsr!=0||pDb->nTransOpen>0)==(pDb->iReader>=0||pDb->bRoTrans) ); - - assert( (pDb->iReader<0 && pDb->bRoTrans==0) || pDb->pClient!=0 ); - - assert( pDb->nTransOpen>=0 ); -} -#else -# define assert_db_state(x) -#endif - -/* -** The default key-compare function. -*/ -static int xCmp(void *p1, int n1, void *p2, int n2){ - int res; - res = memcmp(p1, p2, LSM_MIN(n1, n2)); - if( res==0 ) res = (n1-n2); - return res; -} - -static void xLog(void *pCtx, int rc, const char *z){ - (void)(rc); - (void)(pCtx); - fprintf(stderr, "%s\n", z); - fflush(stderr); -} - -/* -** Allocate a new db handle. -*/ -int lsm_new(lsm_env *pEnv, lsm_db **ppDb){ - lsm_db *pDb; - - /* If the user did not provide an environment, use the default. */ - if( pEnv==0 ) pEnv = lsm_default_env(); - assert( pEnv ); - - /* Allocate the new database handle */ - *ppDb = pDb = (lsm_db *)lsmMallocZero(pEnv, sizeof(lsm_db)); - if( pDb==0 ) return LSM_NOMEM_BKPT; - - /* Initialize the new object */ - pDb->pEnv = pEnv; - pDb->nTreeLimit = LSM_DFLT_AUTOFLUSH; - pDb->nAutockpt = LSM_DFLT_AUTOCHECKPOINT; - pDb->bAutowork = LSM_DFLT_AUTOWORK; - pDb->eSafety = LSM_DFLT_SAFETY; - pDb->xCmp = xCmp; - pDb->nDfltPgsz = LSM_DFLT_PAGE_SIZE; - pDb->nDfltBlksz = LSM_DFLT_BLOCK_SIZE; - pDb->nMerge = LSM_DFLT_AUTOMERGE; - pDb->nMaxFreelist = LSM_MAX_FREELIST_ENTRIES; - pDb->bUseLog = LSM_DFLT_USE_LOG; - pDb->iReader = -1; - pDb->iRwclient = -1; - pDb->bMultiProc = LSM_DFLT_MULTIPLE_PROCESSES; - pDb->iMmap = LSM_DFLT_MMAP; - pDb->xLog = xLog; - pDb->compress.iId = LSM_COMPRESSION_NONE; - return LSM_OK; -} - -lsm_env *lsm_get_env(lsm_db *pDb){ - assert( pDb->pEnv ); - return pDb->pEnv; -} - -/* -** If database handle pDb is currently holding a client snapshot, but does -** not have any open cursors or write transactions, release it. -*/ -static void dbReleaseClientSnapshot(lsm_db *pDb){ - if( pDb->nTransOpen==0 && pDb->pCsr==0 ){ - lsmFinishReadTrans(pDb); - } -} - -static int getFullpathname( - lsm_env *pEnv, - const char *zRel, - char **pzAbs -){ - int nAlloc = 0; - char *zAlloc = 0; - int nReq = 0; - int rc; - - do{ - nAlloc = nReq; - rc = pEnv->xFullpath(pEnv, zRel, zAlloc, &nReq); - if( nReq>nAlloc ){ - zAlloc = lsmReallocOrFreeRc(pEnv, zAlloc, nReq, &rc); - } - }while( nReq>nAlloc && rc==LSM_OK ); - - if( rc!=LSM_OK ){ - lsmFree(pEnv, zAlloc); - zAlloc = 0; - } - *pzAbs = zAlloc; - return rc; -} - -/* -** Check that the bits in the db->mLock mask are consistent with the -** value stored in db->iRwclient. An assert shall fail otherwise. -*/ -static void assertRwclientLockValue(lsm_db *db){ -#ifndef NDEBUG - u64 msk; /* Mask of mLock bits for RWCLIENT locks */ - u64 rwclient = 0; /* Bit corresponding to db->iRwclient */ - - if( db->iRwclient>=0 ){ - rwclient = ((u64)1 << (LSM_LOCK_RWCLIENT(db->iRwclient)-1)); - } - msk = ((u64)1 << (LSM_LOCK_RWCLIENT(LSM_LOCK_NRWCLIENT)-1)) - 1; - msk -= (((u64)1 << (LSM_LOCK_RWCLIENT(0)-1)) - 1); - - assert( (db->mLock & msk)==rwclient ); -#endif -} - -/* -** Open a new connection to database zFilename. -*/ -int lsm_open(lsm_db *pDb, const char *zFilename){ - int rc; - - if( pDb->pDatabase ){ - rc = LSM_MISUSE; - }else{ - char *zFull; - - /* Translate the possibly relative pathname supplied by the user into - ** an absolute pathname. This is required because the supplied path - ** is used (either directly or with "-log" appended to it) for more - ** than one purpose - to open both the database and log files, and - ** perhaps to unlink the log file during disconnection. An absolute - ** path is required to ensure that the correct files are operated - ** on even if the application changes the cwd. */ - rc = getFullpathname(pDb->pEnv, zFilename, &zFull); - assert( rc==LSM_OK || zFull==0 ); - - /* Connect to the database. */ - if( rc==LSM_OK ){ - rc = lsmDbDatabaseConnect(pDb, zFull); - } - - if( pDb->bReadonly==0 ){ - /* Configure the file-system connection with the page-size and block-size - ** of this database. Even if the database file is zero bytes in size - ** on disk, these values have been set in shared-memory by now, and so - ** are guaranteed not to change during the lifetime of this connection. - */ - if( rc==LSM_OK && LSM_OK==(rc = lsmCheckpointLoad(pDb, 0)) ){ - lsmFsSetPageSize(pDb->pFS, lsmCheckpointPgsz(pDb->aSnapshot)); - lsmFsSetBlockSize(pDb->pFS, lsmCheckpointBlksz(pDb->aSnapshot)); - } - } - - lsmFree(pDb->pEnv, zFull); - assertRwclientLockValue(pDb); - } - - assert( pDb->bReadonly==0 || pDb->bReadonly==1 ); - assert( rc!=LSM_OK || (pDb->pShmhdr==0)==(pDb->bReadonly==1) ); - - return rc; -} - -int lsm_close(lsm_db *pDb){ - int rc = LSM_OK; - if( pDb ){ - assert_db_state(pDb); - if( pDb->pCsr || pDb->nTransOpen ){ - rc = LSM_MISUSE_BKPT; - }else{ - lsmMCursorFreeCache(pDb); - lsmFreeSnapshot(pDb->pEnv, pDb->pClient); - pDb->pClient = 0; - - assertRwclientLockValue(pDb); - - lsmDbDatabaseRelease(pDb); - lsmLogClose(pDb); - lsmFsClose(pDb->pFS); - /* assert( pDb->mLock==0 ); */ - - /* Invoke any destructors registered for the compression or - ** compression factory callbacks. */ - if( pDb->factory.xFree ) pDb->factory.xFree(pDb->factory.pCtx); - if( pDb->compress.xFree ) pDb->compress.xFree(pDb->compress.pCtx); - - lsmFree(pDb->pEnv, pDb->rollback.aArray); - lsmFree(pDb->pEnv, pDb->aTrans); - lsmFree(pDb->pEnv, pDb->apShm); - lsmFree(pDb->pEnv, pDb); - } - } - return rc; -} - -int lsm_config(lsm_db *pDb, int eParam, ...){ - int rc = LSM_OK; - va_list ap; - va_start(ap, eParam); - - switch( eParam ){ - case LSM_CONFIG_AUTOFLUSH: { - /* This parameter is read and written in KB. But all internal - ** processing is done in bytes. */ - int *piVal = va_arg(ap, int *); - int iVal = *piVal; - if( iVal>=0 && iVal<=(1024*1024) ){ - pDb->nTreeLimit = iVal*1024; - } - *piVal = (pDb->nTreeLimit / 1024); - break; - } - - case LSM_CONFIG_AUTOWORK: { - int *piVal = va_arg(ap, int *); - if( *piVal>=0 ){ - pDb->bAutowork = *piVal; - } - *piVal = pDb->bAutowork; - break; - } - - case LSM_CONFIG_AUTOCHECKPOINT: { - /* This parameter is read and written in KB. But all internal processing - ** (including the lsm_db.nAutockpt variable) is done in bytes. */ - int *piVal = va_arg(ap, int *); - if( *piVal>=0 ){ - int iVal = *piVal; - pDb->nAutockpt = (i64)iVal * 1024; - } - *piVal = (int)(pDb->nAutockpt / 1024); - break; - } - - case LSM_CONFIG_PAGE_SIZE: { - int *piVal = va_arg(ap, int *); - if( pDb->pDatabase ){ - /* If lsm_open() has been called, this is a read-only parameter. - ** Set the output variable to the page-size according to the - ** FileSystem object. */ - *piVal = lsmFsPageSize(pDb->pFS); - }else{ - if( *piVal>=256 && *piVal<=65536 && ((*piVal-1) & *piVal)==0 ){ - pDb->nDfltPgsz = *piVal; - }else{ - *piVal = pDb->nDfltPgsz; - } - } - break; - } - - case LSM_CONFIG_BLOCK_SIZE: { - /* This parameter is read and written in KB. But all internal - ** processing is done in bytes. */ - int *piVal = va_arg(ap, int *); - if( pDb->pDatabase ){ - /* If lsm_open() has been called, this is a read-only parameter. - ** Set the output variable to the block-size in KB according to the - ** FileSystem object. */ - *piVal = lsmFsBlockSize(pDb->pFS) / 1024; - }else{ - int iVal = *piVal; - if( iVal>=64 && iVal<=65536 && ((iVal-1) & iVal)==0 ){ - pDb->nDfltBlksz = iVal * 1024; - }else{ - *piVal = pDb->nDfltBlksz / 1024; - } - } - break; - } - - case LSM_CONFIG_SAFETY: { - int *piVal = va_arg(ap, int *); - if( *piVal>=0 && *piVal<=2 ){ - pDb->eSafety = *piVal; - } - *piVal = pDb->eSafety; - break; - } - - case LSM_CONFIG_MMAP: { - int *piVal = va_arg(ap, int *); - if( pDb->iReader<0 && *piVal>=0 ){ - pDb->iMmap = *piVal; - rc = lsmFsConfigure(pDb); - } - *piVal = pDb->iMmap; - break; - } - - case LSM_CONFIG_USE_LOG: { - int *piVal = va_arg(ap, int *); - if( pDb->nTransOpen==0 && (*piVal==0 || *piVal==1) ){ - pDb->bUseLog = *piVal; - } - *piVal = pDb->bUseLog; - break; - } - - case LSM_CONFIG_AUTOMERGE: { - int *piVal = va_arg(ap, int *); - if( *piVal>1 ) pDb->nMerge = *piVal; - *piVal = pDb->nMerge; - break; - } - - case LSM_CONFIG_MAX_FREELIST: { - int *piVal = va_arg(ap, int *); - if( *piVal>=2 && *piVal<=LSM_MAX_FREELIST_ENTRIES ){ - pDb->nMaxFreelist = *piVal; - } - *piVal = pDb->nMaxFreelist; - break; - } - - case LSM_CONFIG_MULTIPLE_PROCESSES: { - int *piVal = va_arg(ap, int *); - if( pDb->pDatabase ){ - /* If lsm_open() has been called, this is a read-only parameter. - ** Set the output variable to true if this connection is currently - ** in multi-process mode. */ - *piVal = lsmDbMultiProc(pDb); - }else{ - pDb->bMultiProc = *piVal = (*piVal!=0); - } - break; - } - - case LSM_CONFIG_READONLY: { - int *piVal = va_arg(ap, int *); - /* If lsm_open() has been called, this is a read-only parameter. */ - if( pDb->pDatabase==0 && *piVal>=0 ){ - pDb->bReadonly = *piVal = (*piVal!=0); - } - *piVal = pDb->bReadonly; - break; - } - - case LSM_CONFIG_SET_COMPRESSION: { - lsm_compress *p = va_arg(ap, lsm_compress *); - if( pDb->iReader>=0 && pDb->bInFactory==0 ){ - /* May not change compression schemes with an open transaction */ - rc = LSM_MISUSE_BKPT; - }else{ - if( pDb->compress.xFree ){ - /* Invoke any destructor belonging to the current compression. */ - pDb->compress.xFree(pDb->compress.pCtx); - } - if( p->xBound==0 ){ - memset(&pDb->compress, 0, sizeof(lsm_compress)); - pDb->compress.iId = LSM_COMPRESSION_NONE; - }else{ - memcpy(&pDb->compress, p, sizeof(lsm_compress)); - } - rc = lsmFsConfigure(pDb); - } - break; - } - - case LSM_CONFIG_SET_COMPRESSION_FACTORY: { - lsm_compress_factory *p = va_arg(ap, lsm_compress_factory *); - if( pDb->factory.xFree ){ - /* Invoke any destructor belonging to the current factory. */ - pDb->factory.xFree(pDb->factory.pCtx); - } - memcpy(&pDb->factory, p, sizeof(lsm_compress_factory)); - break; - } - - case LSM_CONFIG_GET_COMPRESSION: { - lsm_compress *p = va_arg(ap, lsm_compress *); - memcpy(p, &pDb->compress, sizeof(lsm_compress)); - break; - } - - default: - rc = LSM_MISUSE; - break; - } - - va_end(ap); - return rc; -} - -void lsmAppendSegmentList(LsmString *pStr, char *zPre, Segment *pSeg){ - lsmStringAppendf(pStr, "%s{%d %d %d %d}", zPre, - pSeg->iFirst, pSeg->iLastPg, pSeg->iRoot, pSeg->nSize - ); -} - -static int infoGetWorker(lsm_db *pDb, Snapshot **pp, int *pbUnlock){ - int rc = LSM_OK; - - assert( *pbUnlock==0 ); - if( !pDb->pWorker ){ - rc = lsmBeginWork(pDb); - if( rc!=LSM_OK ) return rc; - *pbUnlock = 1; - } - if( pp ) *pp = pDb->pWorker; - return rc; -} - -static void infoFreeWorker(lsm_db *pDb, int bUnlock){ - if( bUnlock ){ - int rcdummy = LSM_BUSY; - lsmFinishWork(pDb, 0, &rcdummy); - } -} - -int lsmStructList( - lsm_db *pDb, /* Database handle */ - char **pzOut /* OUT: Nul-terminated string (tcl list) */ -){ - Level *pTopLevel = 0; /* Top level of snapshot to report on */ - int rc = LSM_OK; - Level *p; - LsmString s; - Snapshot *pWorker; /* Worker snapshot */ - int bUnlock = 0; - - /* Obtain the worker snapshot */ - rc = infoGetWorker(pDb, &pWorker, &bUnlock); - if( rc!=LSM_OK ) return rc; - - /* Format the contents of the snapshot as text */ - pTopLevel = lsmDbSnapshotLevel(pWorker); - lsmStringInit(&s, pDb->pEnv); - for(p=pTopLevel; rc==LSM_OK && p; p=p->pNext){ - int i; - lsmStringAppendf(&s, "%s{%d", (s.n ? " " : ""), (int)p->iAge); - lsmAppendSegmentList(&s, " ", &p->lhs); - for(i=0; rc==LSM_OK && inRight; i++){ - lsmAppendSegmentList(&s, " ", &p->aRhs[i]); - } - lsmStringAppend(&s, "}", 1); - } - rc = s.n>=0 ? LSM_OK : LSM_NOMEM; - - /* Release the snapshot and return */ - infoFreeWorker(pDb, bUnlock); - *pzOut = s.z; - return rc; -} - -static int infoFreelistCb(void *pCtx, int iBlk, i64 iSnapshot){ - LsmString *pStr = (LsmString *)pCtx; - lsmStringAppendf(pStr, "%s{%d %lld}", (pStr->n?" ":""), iBlk, iSnapshot); - return 0; -} - -int lsmInfoFreelist(lsm_db *pDb, char **pzOut){ - Snapshot *pWorker; /* Worker snapshot */ - int bUnlock = 0; - LsmString s; - int rc; - - /* Obtain the worker snapshot */ - rc = infoGetWorker(pDb, &pWorker, &bUnlock); - if( rc!=LSM_OK ) return rc; - - lsmStringInit(&s, pDb->pEnv); - rc = lsmWalkFreelist(pDb, 0, infoFreelistCb, &s); - if( rc!=LSM_OK ){ - lsmFree(pDb->pEnv, s.z); - }else{ - *pzOut = s.z; - } - - /* Release the snapshot and return */ - infoFreeWorker(pDb, bUnlock); - return rc; -} - -static int infoTreeSize(lsm_db *db, int *pnOldKB, int *pnNewKB){ - ShmHeader *pShm = db->pShmhdr; - TreeHeader *p = &pShm->hdr1; - - /* The following code suffers from two race conditions, as it accesses and - ** trusts the contents of shared memory without verifying checksums: - ** - ** * The two values read - TreeHeader.root.nByte and oldroot.nByte - are - ** 32-bit fields. It is assumed that reading from one of these - ** is atomic - that it is not possible to read a partially written - ** garbage value. However the two values may be mutually inconsistent. - ** - ** * TreeHeader.iLogOff is a 64-bit value. And lsmCheckpointLogOffset() - ** reads a 64-bit value from a snapshot stored in shared memory. It - ** is assumed that in each case it is possible to read a partially - ** written garbage value. If this occurs, then the value returned - ** for the size of the "old" tree may reflect the size of an "old" - ** tree that was recently flushed to disk. - ** - ** Given the context in which this function is called (as a result of an - ** lsm_info(LSM_INFO_TREE_SIZE) request), neither of these are considered to - ** be problems. - */ - *pnNewKB = ((int)p->root.nByte + 1023) / 1024; - if( p->iOldShmid ){ - if( p->iOldLog==lsmCheckpointLogOffset(pShm->aSnap1) ){ - *pnOldKB = 0; - }else{ - *pnOldKB = ((int)p->oldroot.nByte + 1023) / 1024; - } - }else{ - *pnOldKB = 0; - } - - return LSM_OK; -} - -int lsm_info(lsm_db *pDb, int eParam, ...){ - int rc = LSM_OK; - va_list ap; - va_start(ap, eParam); - - switch( eParam ){ - case LSM_INFO_NWRITE: { - int *piVal = va_arg(ap, int *); - *piVal = lsmFsNWrite(pDb->pFS); - break; - } - - case LSM_INFO_NREAD: { - int *piVal = va_arg(ap, int *); - *piVal = lsmFsNRead(pDb->pFS); - break; - } - - case LSM_INFO_DB_STRUCTURE: { - char **pzVal = va_arg(ap, char **); - rc = lsmStructList(pDb, pzVal); - break; - } - - case LSM_INFO_ARRAY_STRUCTURE: { - LsmPgno pgno = va_arg(ap, LsmPgno); - char **pzVal = va_arg(ap, char **); - rc = lsmInfoArrayStructure(pDb, 0, pgno, pzVal); - break; - } - - case LSM_INFO_ARRAY_PAGES: { - LsmPgno pgno = va_arg(ap, LsmPgno); - char **pzVal = va_arg(ap, char **); - rc = lsmInfoArrayPages(pDb, pgno, pzVal); - break; - } - - case LSM_INFO_PAGE_HEX_DUMP: - case LSM_INFO_PAGE_ASCII_DUMP: { - LsmPgno pgno = va_arg(ap, LsmPgno); - char **pzVal = va_arg(ap, char **); - int bUnlock = 0; - rc = infoGetWorker(pDb, 0, &bUnlock); - if( rc==LSM_OK ){ - int bHex = (eParam==LSM_INFO_PAGE_HEX_DUMP); - rc = lsmInfoPageDump(pDb, pgno, bHex, pzVal); - } - infoFreeWorker(pDb, bUnlock); - break; - } - - case LSM_INFO_LOG_STRUCTURE: { - char **pzVal = va_arg(ap, char **); - rc = lsmInfoLogStructure(pDb, pzVal); - break; - } - - case LSM_INFO_FREELIST: { - char **pzVal = va_arg(ap, char **); - rc = lsmInfoFreelist(pDb, pzVal); - break; - } - - case LSM_INFO_CHECKPOINT_SIZE: { - int *pnKB = va_arg(ap, int *); - rc = lsmCheckpointSize(pDb, pnKB); - break; - } - - case LSM_INFO_TREE_SIZE: { - int *pnOld = va_arg(ap, int *); - int *pnNew = va_arg(ap, int *); - rc = infoTreeSize(pDb, pnOld, pnNew); - break; - } - - case LSM_INFO_COMPRESSION_ID: { - unsigned int *piOut = va_arg(ap, unsigned int *); - if( pDb->pClient ){ - *piOut = pDb->pClient->iCmpId; - }else{ - rc = lsmInfoCompressionId(pDb, piOut); - } - break; - } - - default: - rc = LSM_MISUSE; - break; - } - - va_end(ap); - return rc; -} - -static int doWriteOp( - lsm_db *pDb, - int bDeleteRange, - const void *pKey, int nKey, /* Key to write or delete */ - const void *pVal, int nVal /* Value to write. Or nVal==-1 for a delete */ -){ - int rc = LSM_OK; /* Return code */ - int bCommit = 0; /* True to commit before returning */ - - if( pDb->nTransOpen==0 ){ - bCommit = 1; - rc = lsm_begin(pDb, 1); - } - - if( rc==LSM_OK ){ - int eType = (bDeleteRange ? LSM_DRANGE : (nVal>=0?LSM_WRITE:LSM_DELETE)); - rc = lsmLogWrite(pDb, eType, (void *)pKey, nKey, (void *)pVal, nVal); - } - - lsmSortedSaveTreeCursors(pDb); - - if( rc==LSM_OK ){ - int pgsz = lsmFsPageSize(pDb->pFS); - int nQuant = LSM_AUTOWORK_QUANT * pgsz; - int nBefore; - int nAfter; - int nDiff; - - if( nQuant>pDb->nTreeLimit ){ - nQuant = LSM_MAX(pDb->nTreeLimit, pgsz); - } - - nBefore = lsmTreeSize(pDb); - if( bDeleteRange ){ - rc = lsmTreeDelete(pDb, (void *)pKey, nKey, (void *)pVal, nVal); - }else{ - rc = lsmTreeInsert(pDb, (void *)pKey, nKey, (void *)pVal, nVal); - } - - nAfter = lsmTreeSize(pDb); - nDiff = (nAfter/nQuant) - (nBefore/nQuant); - if( rc==LSM_OK && pDb->bAutowork && nDiff!=0 ){ - rc = lsmSortedAutoWork(pDb, nDiff * LSM_AUTOWORK_QUANT); - } - } - - /* If a transaction was opened at the start of this function, commit it. - ** Or, if an error has occurred, roll it back. */ - if( bCommit ){ - if( rc==LSM_OK ){ - rc = lsm_commit(pDb, 0); - }else{ - lsm_rollback(pDb, 0); - } - } - - return rc; -} - -/* -** Write a new value into the database. -*/ -int lsm_insert( - lsm_db *db, /* Database connection */ - const void *pKey, int nKey, /* Key to write or delete */ - const void *pVal, int nVal /* Value to write. Or nVal==-1 for a delete */ -){ - return doWriteOp(db, 0, pKey, nKey, pVal, nVal); -} - -/* -** Delete a value from the database. -*/ -int lsm_delete(lsm_db *db, const void *pKey, int nKey){ - return doWriteOp(db, 0, pKey, nKey, 0, -1); -} - -/* -** Delete a range of database keys. -*/ -int lsm_delete_range( - lsm_db *db, /* Database handle */ - const void *pKey1, int nKey1, /* Lower bound of range to delete */ - const void *pKey2, int nKey2 /* Upper bound of range to delete */ -){ - int rc = LSM_OK; - if( db->xCmp((void *)pKey1, nKey1, (void *)pKey2, nKey2)<0 ){ - rc = doWriteOp(db, 1, pKey1, nKey1, pKey2, nKey2); - } - return rc; -} - -/* -** Open a new cursor handle. -** -** If there are currently no other open cursor handles, and no open write -** transaction, open a read transaction here. -*/ -int lsm_csr_open(lsm_db *pDb, lsm_cursor **ppCsr){ - int rc = LSM_OK; /* Return code */ - MultiCursor *pCsr = 0; /* New cursor object */ - - /* Open a read transaction if one is not already open. */ - assert_db_state(pDb); - - if( pDb->pShmhdr==0 ){ - assert( pDb->bReadonly ); - rc = lsmBeginRoTrans(pDb); - }else if( pDb->iReader<0 ){ - rc = lsmBeginReadTrans(pDb); - } - - /* Allocate the multi-cursor. */ - if( rc==LSM_OK ){ - rc = lsmMCursorNew(pDb, &pCsr); - } - - /* If an error has occured, set the output to NULL and delete any partially - ** allocated cursor. If this means there are no open cursors, release the - ** client snapshot. */ - if( rc!=LSM_OK ){ - lsmMCursorClose(pCsr, 0); - dbReleaseClientSnapshot(pDb); - } - - assert_db_state(pDb); - *ppCsr = (lsm_cursor *)pCsr; - return rc; -} - -/* -** Close a cursor opened using lsm_csr_open(). -*/ -int lsm_csr_close(lsm_cursor *p){ - if( p ){ - lsm_db *pDb = lsmMCursorDb((MultiCursor *)p); - assert_db_state(pDb); - lsmMCursorClose((MultiCursor *)p, 1); - dbReleaseClientSnapshot(pDb); - assert_db_state(pDb); - } - return LSM_OK; -} - -/* -** Attempt to seek the cursor to the database entry specified by pKey/nKey. -** If an error occurs (e.g. an OOM or IO error), return an LSM error code. -** Otherwise, return LSM_OK. -*/ -int lsm_csr_seek(lsm_cursor *pCsr, const void *pKey, int nKey, int eSeek){ - return lsmMCursorSeek((MultiCursor *)pCsr, 0, (void *)pKey, nKey, eSeek); -} - -int lsm_csr_next(lsm_cursor *pCsr){ - return lsmMCursorNext((MultiCursor *)pCsr); -} - -int lsm_csr_prev(lsm_cursor *pCsr){ - return lsmMCursorPrev((MultiCursor *)pCsr); -} - -int lsm_csr_first(lsm_cursor *pCsr){ - return lsmMCursorFirst((MultiCursor *)pCsr); -} - -int lsm_csr_last(lsm_cursor *pCsr){ - return lsmMCursorLast((MultiCursor *)pCsr); -} - -int lsm_csr_valid(lsm_cursor *pCsr){ - return lsmMCursorValid((MultiCursor *)pCsr); -} - -int lsm_csr_key(lsm_cursor *pCsr, const void **ppKey, int *pnKey){ - return lsmMCursorKey((MultiCursor *)pCsr, (void **)ppKey, pnKey); -} - -int lsm_csr_value(lsm_cursor *pCsr, const void **ppVal, int *pnVal){ - return lsmMCursorValue((MultiCursor *)pCsr, (void **)ppVal, pnVal); -} - -void lsm_config_log( - lsm_db *pDb, - void (*xLog)(void *, int, const char *), - void *pCtx -){ - pDb->xLog = xLog; - pDb->pLogCtx = pCtx; -} - -void lsm_config_work_hook( - lsm_db *pDb, - void (*xWork)(lsm_db *, void *), - void *pCtx -){ - pDb->xWork = xWork; - pDb->pWorkCtx = pCtx; -} - -void lsmLogMessage(lsm_db *pDb, int rc, const char *zFormat, ...){ - if( pDb->xLog ){ - LsmString s; - va_list ap, ap2; - lsmStringInit(&s, pDb->pEnv); - va_start(ap, zFormat); - va_start(ap2, zFormat); - lsmStringVAppendf(&s, zFormat, ap, ap2); - va_end(ap); - va_end(ap2); - pDb->xLog(pDb->pLogCtx, rc, s.z); - lsmStringClear(&s); - } -} - -int lsm_begin(lsm_db *pDb, int iLevel){ - int rc; - - assert_db_state( pDb ); - rc = (pDb->bReadonly ? LSM_READONLY : LSM_OK); - - /* A value less than zero means open one more transaction. */ - if( iLevel<0 ) iLevel = pDb->nTransOpen + 1; - if( iLevel>pDb->nTransOpen ){ - int i; - - /* Extend the pDb->aTrans[] array if required. */ - if( rc==LSM_OK && pDb->nTransAllocpEnv, pDb->aTrans, nByte); - if( !aNew ){ - rc = LSM_NOMEM; - }else{ - nByte = sizeof(TransMark) * (iLevel+1 - pDb->nTransAlloc); - memset(&aNew[pDb->nTransAlloc], 0, nByte); - pDb->nTransAlloc = iLevel+1; - pDb->aTrans = aNew; - } - } - - if( rc==LSM_OK && pDb->nTransOpen==0 ){ - rc = lsmBeginWriteTrans(pDb); - } - - if( rc==LSM_OK ){ - for(i=pDb->nTransOpen; iaTrans[i].tree); - lsmLogTell(pDb, &pDb->aTrans[i].log); - } - pDb->nTransOpen = iLevel; - } - } - - return rc; -} - -int lsm_commit(lsm_db *pDb, int iLevel){ - int rc = LSM_OK; - - assert_db_state( pDb ); - - /* A value less than zero means close the innermost nested transaction. */ - if( iLevel<0 ) iLevel = LSM_MAX(0, pDb->nTransOpen - 1); - - if( iLevelnTransOpen ){ - if( iLevel==0 ){ - int rc2; - /* Commit the transaction to disk. */ - if( rc==LSM_OK ) rc = lsmLogCommit(pDb); - if( rc==LSM_OK && pDb->eSafety==LSM_SAFETY_FULL ){ - rc = lsmFsSyncLog(pDb->pFS); - } - rc2 = lsmFinishWriteTrans(pDb, (rc==LSM_OK)); - if( rc==LSM_OK ) rc = rc2; - } - pDb->nTransOpen = iLevel; - } - dbReleaseClientSnapshot(pDb); - return rc; -} - -int lsm_rollback(lsm_db *pDb, int iLevel){ - int rc = LSM_OK; - assert_db_state( pDb ); - - if( pDb->nTransOpen ){ - /* A value less than zero means close the innermost nested transaction. */ - if( iLevel<0 ) iLevel = LSM_MAX(0, pDb->nTransOpen - 1); - - if( iLevel<=pDb->nTransOpen ){ - TransMark *pMark = &pDb->aTrans[(iLevel==0 ? 0 : iLevel-1)]; - lsmTreeRollback(pDb, &pMark->tree); - if( iLevel ) lsmLogSeek(pDb, &pMark->log); - pDb->nTransOpen = iLevel; - } - - if( pDb->nTransOpen==0 ){ - lsmFinishWriteTrans(pDb, 0); - } - dbReleaseClientSnapshot(pDb); - } - - return rc; -} - -int lsm_get_user_version(lsm_db *pDb, unsigned int *piUsr){ - int rc = LSM_OK; /* Return code */ - - /* Open a read transaction if one is not already open. */ - assert_db_state(pDb); - if( pDb->pShmhdr==0 ){ - assert( pDb->bReadonly ); - rc = lsmBeginRoTrans(pDb); - }else if( pDb->iReader<0 ){ - rc = lsmBeginReadTrans(pDb); - } - - /* Allocate the multi-cursor. */ - if( rc==LSM_OK ){ - *piUsr = pDb->treehdr.iUsrVersion; - } - - dbReleaseClientSnapshot(pDb); - assert_db_state(pDb); - return rc; -} - -int lsm_set_user_version(lsm_db *pDb, unsigned int iUsr){ - int rc = LSM_OK; /* Return code */ - int bCommit = 0; /* True to commit before returning */ - - if( pDb->nTransOpen==0 ){ - bCommit = 1; - rc = lsm_begin(pDb, 1); - } - - if( rc==LSM_OK ){ - pDb->treehdr.iUsrVersion = iUsr; - } - - /* If a transaction was opened at the start of this function, commit it. - ** Or, if an error has occurred, roll it back. */ - if( bCommit ){ - if( rc==LSM_OK ){ - rc = lsm_commit(pDb, 0); - }else{ - lsm_rollback(pDb, 0); - } - } - - return rc; -} diff --git a/ext/lsm1/lsm_mem.c b/ext/lsm1/lsm_mem.c deleted file mode 100644 index 13dd9fe312..0000000000 --- a/ext/lsm1/lsm_mem.c +++ /dev/null @@ -1,104 +0,0 @@ -/* -** 2011-08-18 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** -** Helper routines for memory allocation. -*/ -#include "lsmInt.h" - -/* -** The following routines are called internally by LSM sub-routines. In -** this case a valid environment pointer must be supplied. -*/ -void *lsmMalloc(lsm_env *pEnv, size_t N){ - assert( pEnv ); - return pEnv->xMalloc(pEnv, N); -} -void lsmFree(lsm_env *pEnv, void *p){ - assert( pEnv ); - pEnv->xFree(pEnv, p); -} -void *lsmRealloc(lsm_env *pEnv, void *p, size_t N){ - assert( pEnv ); - return pEnv->xRealloc(pEnv, p, N); -} - -/* -** Core memory allocation routines for LSM. -*/ -void *lsm_malloc(lsm_env *pEnv, size_t N){ - return lsmMalloc(pEnv ? pEnv : lsm_default_env(), N); -} -void lsm_free(lsm_env *pEnv, void *p){ - lsmFree(pEnv ? pEnv : lsm_default_env(), p); -} -void *lsm_realloc(lsm_env *pEnv, void *p, size_t N){ - return lsmRealloc(pEnv ? pEnv : lsm_default_env(), p, N); -} - -void *lsmMallocZero(lsm_env *pEnv, size_t N){ - void *pRet; - assert( pEnv ); - pRet = lsmMalloc(pEnv, N); - if( pRet ) memset(pRet, 0, N); - return pRet; -} - -void *lsmMallocRc(lsm_env *pEnv, size_t N, int *pRc){ - void *pRet = 0; - if( *pRc==LSM_OK ){ - pRet = lsmMalloc(pEnv, N); - if( pRet==0 ){ - *pRc = LSM_NOMEM_BKPT; - } - } - return pRet; -} - -void *lsmMallocZeroRc(lsm_env *pEnv, size_t N, int *pRc){ - void *pRet = 0; - if( *pRc==LSM_OK ){ - pRet = lsmMallocZero(pEnv, N); - if( pRet==0 ){ - *pRc = LSM_NOMEM_BKPT; - } - } - return pRet; -} - -void *lsmReallocOrFree(lsm_env *pEnv, void *p, size_t N){ - void *pNew; - pNew = lsm_realloc(pEnv, p, N); - if( !pNew ) lsm_free(pEnv, p); - return pNew; -} - -void *lsmReallocOrFreeRc(lsm_env *pEnv, void *p, size_t N, int *pRc){ - void *pRet = 0; - if( *pRc ){ - lsmFree(pEnv, p); - }else{ - pRet = lsmReallocOrFree(pEnv, p, N); - if( !pRet ) *pRc = LSM_NOMEM_BKPT; - } - return pRet; -} - -char *lsmMallocStrdup(lsm_env *pEnv, const char *zIn){ - int nByte; - char *zRet; - nByte = strlen(zIn); - zRet = lsmMalloc(pEnv, nByte+1); - if( zRet ){ - memcpy(zRet, zIn, nByte+1); - } - return zRet; -} diff --git a/ext/lsm1/lsm_mutex.c b/ext/lsm1/lsm_mutex.c deleted file mode 100644 index cb99b2a61e..0000000000 --- a/ext/lsm1/lsm_mutex.c +++ /dev/null @@ -1,88 +0,0 @@ -/* -** 2012-01-30 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** -** Mutex functions for LSM. -*/ -#include "lsmInt.h" - -/* -** Allocate a new mutex. -*/ -int lsmMutexNew(lsm_env *pEnv, lsm_mutex **ppNew){ - return pEnv->xMutexNew(pEnv, ppNew); -} - -/* -** Return a handle for one of the static mutexes. -*/ -int lsmMutexStatic(lsm_env *pEnv, int iMutex, lsm_mutex **ppStatic){ - return pEnv->xMutexStatic(pEnv, iMutex, ppStatic); -} - -/* -** Free a mutex allocated by lsmMutexNew(). -*/ -void lsmMutexDel(lsm_env *pEnv, lsm_mutex *pMutex){ - if( pMutex ) pEnv->xMutexDel(pMutex); -} - -/* -** Enter a mutex. -*/ -void lsmMutexEnter(lsm_env *pEnv, lsm_mutex *pMutex){ - pEnv->xMutexEnter(pMutex); -} - -/* -** Attempt to enter a mutex, but do not block. If successful, return zero. -** Otherwise, if the mutex is already held by some other thread and is not -** entered, return non zero. -** -** Each successful call to this function must be matched by a call to -** lsmMutexLeave(). -*/ -int lsmMutexTry(lsm_env *pEnv, lsm_mutex *pMutex){ - return pEnv->xMutexTry(pMutex); -} - -/* -** Leave a mutex. -*/ -void lsmMutexLeave(lsm_env *pEnv, lsm_mutex *pMutex){ - pEnv->xMutexLeave(pMutex); -} - -#ifndef NDEBUG -/* -** Return non-zero if the mutex passed as the second argument is held -** by the calling thread, or zero otherwise. If the implementation is not -** able to tell if the mutex is held by the caller, it should return -** non-zero. -** -** This function is only used as part of assert() statements. -*/ -int lsmMutexHeld(lsm_env *pEnv, lsm_mutex *pMutex){ - return pEnv->xMutexHeld ? pEnv->xMutexHeld(pMutex) : 1; -} - -/* -** Return non-zero if the mutex passed as the second argument is not -** held by the calling thread, or zero otherwise. If the implementation -** is not able to tell if the mutex is held by the caller, it should -** return non-zero. -** -** This function is only used as part of assert() statements. -*/ -int lsmMutexNotHeld(lsm_env *pEnv, lsm_mutex *pMutex){ - return pEnv->xMutexNotHeld ? pEnv->xMutexNotHeld(pMutex) : 1; -} -#endif diff --git a/ext/lsm1/lsm_shared.c b/ext/lsm1/lsm_shared.c deleted file mode 100644 index 2fdacf1eca..0000000000 --- a/ext/lsm1/lsm_shared.c +++ /dev/null @@ -1,1976 +0,0 @@ -/* -** 2012-01-23 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** -** Utilities used to help multiple LSM clients to coexist within the -** same process space. -*/ -#include "lsmInt.h" - -/* -** Global data. All global variables used by code in this file are grouped -** into the following structure instance. -** -** pDatabase: -** Linked list of all Database objects allocated within this process. -** This list may not be traversed without holding the global mutex (see -** functions enterGlobalMutex() and leaveGlobalMutex()). -*/ -static struct SharedData { - Database *pDatabase; /* Linked list of all Database objects */ -} gShared; - -/* -** Database structure. There is one such structure for each distinct -** database accessed by this process. They are stored in the singly linked -** list starting at global variable gShared.pDatabase. Database objects are -** reference counted. Once the number of connections to the associated -** database drops to zero, they are removed from the linked list and deleted. -** -** pFile: -** In multi-process mode, this file descriptor is used to obtain locks -** and to access shared-memory. In single process mode, its only job is -** to hold the exclusive lock on the file. -** -*/ -struct Database { - /* Protected by the global mutex (enterGlobalMutex/leaveGlobalMutex): */ - char *zName; /* Canonical path to database file */ - int nName; /* strlen(zName) */ - int nDbRef; /* Number of associated lsm_db handles */ - Database *pDbNext; /* Next Database structure in global list */ - - /* Protected by the local mutex (pClientMutex) */ - int bReadonly; /* True if Database.pFile is read-only */ - int bMultiProc; /* True if running in multi-process mode */ - lsm_file *pFile; /* Used for locks/shm in multi-proc mode */ - LsmFile *pLsmFile; /* List of deferred closes */ - lsm_mutex *pClientMutex; /* Protects the apShmChunk[] and pConn */ - int nShmChunk; /* Number of entries in apShmChunk[] array */ - void **apShmChunk; /* Array of "shared" memory regions */ - lsm_db *pConn; /* List of connections to this db. */ -}; - -/* -** Functions to enter and leave the global mutex. This mutex is used -** to protect the global linked-list headed at gShared.pDatabase. -*/ -static int enterGlobalMutex(lsm_env *pEnv){ - lsm_mutex *p; - int rc = lsmMutexStatic(pEnv, LSM_MUTEX_GLOBAL, &p); - if( rc==LSM_OK ) lsmMutexEnter(pEnv, p); - return rc; -} -static void leaveGlobalMutex(lsm_env *pEnv){ - lsm_mutex *p; - lsmMutexStatic(pEnv, LSM_MUTEX_GLOBAL, &p); - lsmMutexLeave(pEnv, p); -} - -#ifdef LSM_DEBUG -static int holdingGlobalMutex(lsm_env *pEnv){ - lsm_mutex *p; - lsmMutexStatic(pEnv, LSM_MUTEX_GLOBAL, &p); - return lsmMutexHeld(pEnv, p); -} -#endif - -#if 0 -static void assertNotInFreelist(Freelist *p, int iBlk){ - int i; - for(i=0; inEntry; i++){ - assert( p->aEntry[i].iBlk!=iBlk ); - } -} -#else -# define assertNotInFreelist(x,y) -#endif - -/* -** Append an entry to the free-list. If (iId==-1), this is a delete. -*/ -int freelistAppend(lsm_db *db, u32 iBlk, i64 iId){ - lsm_env *pEnv = db->pEnv; - Freelist *p; - int i; - - assert( iId==-1 || iId>=0 ); - p = db->bUseFreelist ? db->pFreelist : &db->pWorker->freelist; - - /* Extend the space allocated for the freelist, if required */ - assert( p->nAlloc>=p->nEntry ); - if( p->nAlloc==p->nEntry ){ - int nNew; - int nByte; - FreelistEntry *aNew; - - nNew = (p->nAlloc==0 ? 4 : p->nAlloc*2); - nByte = sizeof(FreelistEntry) * nNew; - aNew = (FreelistEntry *)lsmRealloc(pEnv, p->aEntry, nByte); - if( !aNew ) return LSM_NOMEM_BKPT; - p->nAlloc = nNew; - p->aEntry = aNew; - } - - for(i=0; inEntry; i++){ - assert( i==0 || p->aEntry[i].iBlk > p->aEntry[i-1].iBlk ); - if( p->aEntry[i].iBlk>=iBlk ) break; - } - - if( inEntry && p->aEntry[i].iBlk==iBlk ){ - /* Clobber an existing entry */ - p->aEntry[i].iId = iId; - }else{ - /* Insert a new entry into the list */ - int nByte = sizeof(FreelistEntry)*(p->nEntry-i); - memmove(&p->aEntry[i+1], &p->aEntry[i], nByte); - p->aEntry[i].iBlk = iBlk; - p->aEntry[i].iId = iId; - p->nEntry++; - } - - return LSM_OK; -} - -/* -** This function frees all resources held by the Database structure passed -** as the only argument. -*/ -static void freeDatabase(lsm_env *pEnv, Database *p){ - assert( holdingGlobalMutex(pEnv) ); - if( p ){ - /* Free the mutexes */ - lsmMutexDel(pEnv, p->pClientMutex); - - if( p->pFile ){ - lsmEnvClose(pEnv, p->pFile); - } - - /* Free the array of shm pointers */ - lsmFree(pEnv, p->apShmChunk); - - /* Free the memory allocated for the Database struct itself */ - lsmFree(pEnv, p); - } -} - -typedef struct DbTruncateCtx DbTruncateCtx; -struct DbTruncateCtx { - int nBlock; - i64 iInUse; -}; - -static int dbTruncateCb(void *pCtx, int iBlk, i64 iSnapshot){ - DbTruncateCtx *p = (DbTruncateCtx *)pCtx; - if( iBlk!=p->nBlock || (p->iInUse>=0 && iSnapshot>=p->iInUse) ) return 1; - p->nBlock--; - return 0; -} - -static int dbTruncate(lsm_db *pDb, i64 iInUse){ - int rc = LSM_OK; -#if 0 - int i; - DbTruncateCtx ctx; - - assert( pDb->pWorker ); - ctx.nBlock = pDb->pWorker->nBlock; - ctx.iInUse = iInUse; - - rc = lsmWalkFreelist(pDb, 1, dbTruncateCb, (void *)&ctx); - for(i=ctx.nBlock+1; rc==LSM_OK && i<=pDb->pWorker->nBlock; i++){ - rc = freelistAppend(pDb, i, -1); - } - - if( rc==LSM_OK ){ -#ifdef LSM_LOG_FREELIST - if( ctx.nBlock!=pDb->pWorker->nBlock ){ - lsmLogMessage(pDb, 0, - "dbTruncate(): truncated db to %d blocks",ctx.nBlock - ); - } -#endif - pDb->pWorker->nBlock = ctx.nBlock; - } -#endif - return rc; -} - - -/* -** This function is called during database shutdown (when the number of -** connections drops from one to zero). It truncates the database file -** to as small a size as possible without truncating away any blocks that -** contain data. -*/ -static int dbTruncateFile(lsm_db *pDb){ - int rc; - - assert( pDb->pWorker==0 ); - assert( lsmShmAssertLock(pDb, LSM_LOCK_DMS1, LSM_LOCK_EXCL) ); - rc = lsmCheckpointLoadWorker(pDb); - - if( rc==LSM_OK ){ - DbTruncateCtx ctx; - - /* Walk the database free-block-list in reverse order. Set ctx.nBlock - ** to the block number of the last block in the database that actually - ** contains data. */ - ctx.nBlock = pDb->pWorker->nBlock; - ctx.iInUse = -1; - rc = lsmWalkFreelist(pDb, 1, dbTruncateCb, (void *)&ctx); - - /* If the last block that contains data is not already the last block in - ** the database file, truncate the database file so that it is. */ - if( rc==LSM_OK ){ - rc = lsmFsTruncateDb( - pDb->pFS, (i64)ctx.nBlock*lsmFsBlockSize(pDb->pFS) - ); - } - } - - lsmFreeSnapshot(pDb->pEnv, pDb->pWorker); - pDb->pWorker = 0; - return rc; -} - -static void doDbDisconnect(lsm_db *pDb){ - int rc; - - if( pDb->bReadonly ){ - lsmShmLock(pDb, LSM_LOCK_DMS3, LSM_LOCK_UNLOCK, 0); - }else{ - /* Block for an exclusive lock on DMS1. This lock serializes all calls - ** to doDbConnect() and doDbDisconnect() across all processes. */ - rc = lsmShmLock(pDb, LSM_LOCK_DMS1, LSM_LOCK_EXCL, 1); - if( rc==LSM_OK ){ - - lsmShmLock(pDb, LSM_LOCK_DMS2, LSM_LOCK_UNLOCK, 0); - - /* Try an exclusive lock on DMS2. If successful, this is the last - ** connection to the database. In this case flush the contents of the - ** in-memory tree to disk and write a checkpoint. */ - rc = lsmShmTestLock(pDb, LSM_LOCK_DMS2, 1, LSM_LOCK_EXCL); - if( rc==LSM_OK ){ - rc = lsmShmTestLock(pDb, LSM_LOCK_CHECKPOINTER, 1, LSM_LOCK_EXCL); - } - if( rc==LSM_OK ){ - int bReadonly = 0; /* True if there exist read-only conns. */ - - /* Flush the in-memory tree, if required. If there is data to flush, - ** this will create a new client snapshot in Database.pClient. The - ** checkpoint (serialization) of this snapshot may be written to disk - ** by the following block. - ** - ** There is no need to take a WRITER lock here. That there are no - ** other locks on DMS2 guarantees that there are no other read-write - ** connections at this time (and the lock on DMS1 guarantees that - ** no new ones may appear). - */ - rc = lsmTreeLoadHeader(pDb, 0); - if( rc==LSM_OK && (lsmTreeHasOld(pDb) || lsmTreeSize(pDb)>0) ){ - rc = lsmFlushTreeToDisk(pDb); - } - - /* Now check if there are any read-only connections. If there are, - ** then do not truncate the db file or unlink the shared-memory - ** region. */ - if( rc==LSM_OK ){ - rc = lsmShmTestLock(pDb, LSM_LOCK_DMS3, 1, LSM_LOCK_EXCL); - if( rc==LSM_BUSY ){ - bReadonly = 1; - rc = LSM_OK; - } - } - - /* Write a checkpoint to disk. */ - if( rc==LSM_OK ){ - rc = lsmCheckpointWrite(pDb, 0); - } - - /* If the checkpoint was written successfully, delete the log file - ** and, if possible, truncate the database file. */ - if( rc==LSM_OK ){ - int bRotrans = 0; - Database *p = pDb->pDatabase; - - /* The log file may only be deleted if there are no clients - ** read-only clients running rotrans transactions. */ - rc = lsmDetectRoTrans(pDb, &bRotrans); - if( rc==LSM_OK && bRotrans==0 ){ - lsmFsCloseAndDeleteLog(pDb->pFS); - } - - /* The database may only be truncated if there exist no read-only - ** clients - either connected or running rotrans transactions. */ - if( bReadonly==0 && bRotrans==0 ){ - lsmFsUnmap(pDb->pFS); - dbTruncateFile(pDb); - if( p->pFile && p->bMultiProc ){ - lsmEnvShmUnmap(pDb->pEnv, p->pFile, 1); - } - } - } - } - } - - if( pDb->iRwclient>=0 ){ - lsmShmLock(pDb, LSM_LOCK_RWCLIENT(pDb->iRwclient), LSM_LOCK_UNLOCK, 0); - pDb->iRwclient = -1; - } - - lsmShmLock(pDb, LSM_LOCK_DMS1, LSM_LOCK_UNLOCK, 0); - } - pDb->pShmhdr = 0; -} - -static int doDbConnect(lsm_db *pDb){ - const int nUsMax = 100000; /* Max value for nUs */ - int nUs = 1000; /* us to wait between DMS1 attempts */ - int rc; - - /* Obtain a pointer to the shared-memory header */ - assert( pDb->pShmhdr==0 ); - assert( pDb->bReadonly==0 ); - - /* Block for an exclusive lock on DMS1. This lock serializes all calls - ** to doDbConnect() and doDbDisconnect() across all processes. */ - while( 1 ){ - rc = lsmShmLock(pDb, LSM_LOCK_DMS1, LSM_LOCK_EXCL, 1); - if( rc!=LSM_BUSY ) break; - lsmEnvSleep(pDb->pEnv, nUs); - nUs = nUs * 2; - if( nUs>nUsMax ) nUs = nUsMax; - } - if( rc==LSM_OK ){ - rc = lsmShmCacheChunks(pDb, 1); - } - if( rc!=LSM_OK ) return rc; - pDb->pShmhdr = (ShmHeader *)pDb->apShm[0]; - - /* Try an exclusive lock on DMS2/DMS3. If successful, this is the first - ** and only connection to the database. In this case initialize the - ** shared-memory and run log file recovery. */ - assert( LSM_LOCK_DMS3==1+LSM_LOCK_DMS2 ); - rc = lsmShmTestLock(pDb, LSM_LOCK_DMS2, 2, LSM_LOCK_EXCL); - if( rc==LSM_OK ){ - memset(pDb->pShmhdr, 0, sizeof(ShmHeader)); - rc = lsmCheckpointRecover(pDb); - if( rc==LSM_OK ){ - rc = lsmLogRecover(pDb); - } - if( rc==LSM_OK ){ - ShmHeader *pShm = pDb->pShmhdr; - pShm->aReader[0].iLsmId = lsmCheckpointId(pShm->aSnap1, 0); - pShm->aReader[0].iTreeId = pDb->treehdr.iUsedShmid; - } - }else if( rc==LSM_BUSY ){ - rc = LSM_OK; - } - - /* Take a shared lock on DMS2. In multi-process mode this lock "cannot" - ** fail, as connections may only hold an exclusive lock on DMS2 if they - ** first hold an exclusive lock on DMS1. And this connection is currently - ** holding the exclusive lock on DSM1. - ** - ** However, if some other connection has the database open in single-process - ** mode, this operation will fail. In this case, return the error to the - ** caller - the attempt to connect to the db has failed. - */ - if( rc==LSM_OK ){ - rc = lsmShmLock(pDb, LSM_LOCK_DMS2, LSM_LOCK_SHARED, 0); - } - - /* If anything went wrong, unlock DMS2. Otherwise, try to take an exclusive - ** lock on one of the LSM_LOCK_RWCLIENT() locks. Unlock DMS1 in any case. */ - if( rc!=LSM_OK ){ - pDb->pShmhdr = 0; - }else{ - int i; - for(i=0; iiRwclient = i; - if( rc2!=LSM_BUSY ){ - rc = rc2; - break; - } - } - } - lsmShmLock(pDb, LSM_LOCK_DMS1, LSM_LOCK_UNLOCK, 0); - - return rc; -} - -static int dbOpenSharedFd(lsm_env *pEnv, Database *p, int bRoOk){ - int rc; - - rc = lsmEnvOpen(pEnv, p->zName, 0, &p->pFile); - if( rc==LSM_IOERR && bRoOk ){ - rc = lsmEnvOpen(pEnv, p->zName, LSM_OPEN_READONLY, &p->pFile); - p->bReadonly = 1; - } - - return rc; -} - -/* -** Return a reference to the shared Database handle for the database -** identified by canonical path zName. If this is the first connection to -** the named database, a new Database object is allocated. Otherwise, a -** pointer to an existing object is returned. -** -** If successful, *ppDatabase is set to point to the shared Database -** structure and LSM_OK returned. Otherwise, *ppDatabase is set to NULL -** and and LSM error code returned. -** -** Each successful call to this function should be (eventually) matched -** by a call to lsmDbDatabaseRelease(). -*/ -int lsmDbDatabaseConnect( - lsm_db *pDb, /* Database handle */ - const char *zName /* Full-path to db file */ -){ - lsm_env *pEnv = pDb->pEnv; - int rc; /* Return code */ - Database *p = 0; /* Pointer returned via *ppDatabase */ - int nName = lsmStrlen(zName); - - assert( pDb->pDatabase==0 ); - rc = enterGlobalMutex(pEnv); - if( rc==LSM_OK ){ - - /* Search the global list for an existing object. TODO: Need something - ** better than the memcmp() below to figure out if a given Database - ** object represents the requested file. */ - for(p=gShared.pDatabase; p; p=p->pDbNext){ - if( nName==p->nName && 0==memcmp(zName, p->zName, nName) ) break; - } - - /* If no suitable Database object was found, allocate a new one. */ - if( p==0 ){ - p = (Database *)lsmMallocZeroRc(pEnv, sizeof(Database)+nName+1, &rc); - - /* If the allocation was successful, fill in other fields and - ** allocate the client mutex. */ - if( rc==LSM_OK ){ - p->bMultiProc = pDb->bMultiProc; - p->zName = (char *)&p[1]; - p->nName = nName; - memcpy((void *)p->zName, zName, nName+1); - rc = lsmMutexNew(pEnv, &p->pClientMutex); - } - - /* If nothing has gone wrong so far, open the shared fd. And if that - ** succeeds and this connection requested single-process mode, - ** attempt to take the exclusive lock on DMS2. */ - if( rc==LSM_OK ){ - int bReadonly = (pDb->bReadonly && pDb->bMultiProc); - rc = dbOpenSharedFd(pDb->pEnv, p, bReadonly); - } - - if( rc==LSM_OK && p->bMultiProc==0 ){ - /* Hold an exclusive lock DMS1 while grabbing DMS2. This ensures - ** that any ongoing call to doDbDisconnect() (even one in another - ** process) is finished before proceeding. */ - assert( p->bReadonly==0 ); - rc = lsmEnvLock(pDb->pEnv, p->pFile, LSM_LOCK_DMS1, LSM_LOCK_EXCL); - if( rc==LSM_OK ){ - rc = lsmEnvLock(pDb->pEnv, p->pFile, LSM_LOCK_DMS2, LSM_LOCK_EXCL); - lsmEnvLock(pDb->pEnv, p->pFile, LSM_LOCK_DMS1, LSM_LOCK_UNLOCK); - } - } - - if( rc==LSM_OK ){ - p->pDbNext = gShared.pDatabase; - gShared.pDatabase = p; - }else{ - freeDatabase(pEnv, p); - p = 0; - } - } - - if( p ){ - p->nDbRef++; - } - leaveGlobalMutex(pEnv); - - if( p ){ - lsmMutexEnter(pDb->pEnv, p->pClientMutex); - pDb->pNext = p->pConn; - p->pConn = pDb; - lsmMutexLeave(pDb->pEnv, p->pClientMutex); - } - } - - pDb->pDatabase = p; - if( rc==LSM_OK ){ - assert( p ); - rc = lsmFsOpen(pDb, zName, p->bReadonly); - } - - /* If the db handle is read-write, then connect to the system now. Run - ** recovery as necessary. Or, if this is a read-only database handle, - ** defer attempting to connect to the system until a read-transaction - ** is opened. */ - if( rc==LSM_OK ){ - rc = lsmFsConfigure(pDb); - } - if( rc==LSM_OK && pDb->bReadonly==0 ){ - rc = doDbConnect(pDb); - } - - return rc; -} - -static void dbDeferClose(lsm_db *pDb){ - if( pDb->pFS ){ - LsmFile *pLsmFile; - Database *p = pDb->pDatabase; - pLsmFile = lsmFsDeferClose(pDb->pFS); - pLsmFile->pNext = p->pLsmFile; - p->pLsmFile = pLsmFile; - } -} - -LsmFile *lsmDbRecycleFd(lsm_db *db){ - LsmFile *pRet; - Database *p = db->pDatabase; - lsmMutexEnter(db->pEnv, p->pClientMutex); - if( (pRet = p->pLsmFile)!=0 ){ - p->pLsmFile = pRet->pNext; - } - lsmMutexLeave(db->pEnv, p->pClientMutex); - return pRet; -} - -/* -** Release a reference to a Database object obtained from -** lsmDbDatabaseConnect(). There should be exactly one call to this function -** for each successful call to Find(). -*/ -void lsmDbDatabaseRelease(lsm_db *pDb){ - Database *p = pDb->pDatabase; - if( p ){ - lsm_db **ppDb; - - if( pDb->pShmhdr ){ - doDbDisconnect(pDb); - } - - lsmFsUnmap(pDb->pFS); - lsmMutexEnter(pDb->pEnv, p->pClientMutex); - for(ppDb=&p->pConn; *ppDb!=pDb; ppDb=&((*ppDb)->pNext)); - *ppDb = pDb->pNext; - dbDeferClose(pDb); - lsmMutexLeave(pDb->pEnv, p->pClientMutex); - - enterGlobalMutex(pDb->pEnv); - p->nDbRef--; - if( p->nDbRef==0 ){ - LsmFile *pIter; - LsmFile *pNext; - Database **pp; - - /* Remove the Database structure from the linked list. */ - for(pp=&gShared.pDatabase; *pp!=p; pp=&((*pp)->pDbNext)); - *pp = p->pDbNext; - - /* If they were allocated from the heap, free the shared memory chunks */ - if( p->bMultiProc==0 ){ - int i; - for(i=0; inShmChunk; i++){ - lsmFree(pDb->pEnv, p->apShmChunk[i]); - } - } - - /* Close any outstanding file descriptors */ - for(pIter=p->pLsmFile; pIter; pIter=pNext){ - pNext = pIter->pNext; - lsmEnvClose(pDb->pEnv, pIter->pFile); - lsmFree(pDb->pEnv, pIter); - } - freeDatabase(pDb->pEnv, p); - } - leaveGlobalMutex(pDb->pEnv); - } -} - -Level *lsmDbSnapshotLevel(Snapshot *pSnapshot){ - return pSnapshot->pLevel; -} - -void lsmDbSnapshotSetLevel(Snapshot *pSnap, Level *pLevel){ - pSnap->pLevel = pLevel; -} - -/* TODO: Shuffle things around to get rid of this */ -static int firstSnapshotInUse(lsm_db *, i64 *); - -/* -** Context object used by the lsmWalkFreelist() utility. -*/ -typedef struct WalkFreelistCtx WalkFreelistCtx; -struct WalkFreelistCtx { - lsm_db *pDb; - int bReverse; - Freelist *pFreelist; - int iFree; - int (*xUsr)(void *, int, i64); /* User callback function */ - void *pUsrctx; /* User callback context */ - int bDone; /* Set to true after xUsr() returns true */ -}; - -/* -** Callback used by lsmWalkFreelist(). -*/ -static int walkFreelistCb(void *pCtx, int iBlk, i64 iSnapshot){ - WalkFreelistCtx *p = (WalkFreelistCtx *)pCtx; - const int iDir = (p->bReverse ? -1 : 1); - Freelist *pFree = p->pFreelist; - - assert( p->bDone==0 ); - assert( iBlk>=0 ); - if( pFree ){ - while( (p->iFree < pFree->nEntry) && p->iFree>=0 ){ - FreelistEntry *pEntry = &pFree->aEntry[p->iFree]; - if( (p->bReverse==0 && pEntry->iBlk>(u32)iBlk) - || (p->bReverse!=0 && pEntry->iBlk<(u32)iBlk) - ){ - break; - }else{ - p->iFree += iDir; - if( pEntry->iId>=0 - && p->xUsr(p->pUsrctx, pEntry->iBlk, pEntry->iId) - ){ - p->bDone = 1; - return 1; - } - if( pEntry->iBlk==(u32)iBlk ) return 0; - } - } - } - - if( p->xUsr(p->pUsrctx, iBlk, iSnapshot) ){ - p->bDone = 1; - return 1; - } - return 0; -} - -/* -** The database handle passed as the first argument must be the worker -** connection. This function iterates through the contents of the current -** free block list, invoking the supplied callback once for each list -** element. -** -** The difference between this function and lsmSortedWalkFreelist() is -** that lsmSortedWalkFreelist() only considers those free-list elements -** stored within the LSM. This function also merges in any in-memory -** elements. -*/ -int lsmWalkFreelist( - lsm_db *pDb, /* Database handle (must be worker) */ - int bReverse, /* True to iterate from largest to smallest */ - int (*x)(void *, int, i64), /* Callback function */ - void *pCtx /* First argument to pass to callback */ -){ - const int iDir = (bReverse ? -1 : 1); - int rc; - int iCtx; - - WalkFreelistCtx ctx[2]; - - ctx[0].pDb = pDb; - ctx[0].bReverse = bReverse; - ctx[0].pFreelist = &pDb->pWorker->freelist; - if( ctx[0].pFreelist && bReverse ){ - ctx[0].iFree = ctx[0].pFreelist->nEntry-1; - }else{ - ctx[0].iFree = 0; - } - ctx[0].xUsr = walkFreelistCb; - ctx[0].pUsrctx = (void *)&ctx[1]; - ctx[0].bDone = 0; - - ctx[1].pDb = pDb; - ctx[1].bReverse = bReverse; - ctx[1].pFreelist = pDb->pFreelist; - if( ctx[1].pFreelist && bReverse ){ - ctx[1].iFree = ctx[1].pFreelist->nEntry-1; - }else{ - ctx[1].iFree = 0; - } - ctx[1].xUsr = x; - ctx[1].pUsrctx = pCtx; - ctx[1].bDone = 0; - - rc = lsmSortedWalkFreelist(pDb, bReverse, walkFreelistCb, (void *)&ctx[0]); - - if( ctx[0].bDone==0 ){ - for(iCtx=0; iCtx<2; iCtx++){ - int i; - WalkFreelistCtx *p = &ctx[iCtx]; - for(i=p->iFree; - p->pFreelist && rc==LSM_OK && ipFreelist->nEntry && i>=0; - i += iDir - ){ - FreelistEntry *pEntry = &p->pFreelist->aEntry[i]; - if( pEntry->iId>=0 && p->xUsr(p->pUsrctx, pEntry->iBlk, pEntry->iId) ){ - return LSM_OK; - } - } - } - } - - return rc; -} - - -typedef struct FindFreeblockCtx FindFreeblockCtx; -struct FindFreeblockCtx { - i64 iInUse; - int iRet; - int bNotOne; -}; - -static int findFreeblockCb(void *pCtx, int iBlk, i64 iSnapshot){ - FindFreeblockCtx *p = (FindFreeblockCtx *)pCtx; - if( iSnapshotiInUse && (iBlk!=1 || p->bNotOne==0) ){ - p->iRet = iBlk; - return 1; - } - return 0; -} - -static int findFreeblock(lsm_db *pDb, i64 iInUse, int bNotOne, int *piRet){ - int rc; /* Return code */ - FindFreeblockCtx ctx; /* Context object */ - - ctx.iInUse = iInUse; - ctx.iRet = 0; - ctx.bNotOne = bNotOne; - rc = lsmWalkFreelist(pDb, 0, findFreeblockCb, (void *)&ctx); - *piRet = ctx.iRet; - - return rc; -} - -/* -** Allocate a new database file block to write data to, either by extending -** the database file or by recycling a free-list entry. The worker snapshot -** must be held in order to call this function. -** -** If successful, *piBlk is set to the block number allocated and LSM_OK is -** returned. Otherwise, *piBlk is zeroed and an lsm error code returned. -*/ -int lsmBlockAllocate(lsm_db *pDb, int iBefore, int *piBlk){ - Snapshot *p = pDb->pWorker; - int iRet = 0; /* Block number of allocated block */ - int rc = LSM_OK; - i64 iInUse = 0; /* Snapshot id still in use */ - i64 iSynced = 0; /* Snapshot id synced to disk */ - - assert( p ); - -#ifdef LSM_LOG_FREELIST - { - static int nCall = 0; - char *zFree = 0; - nCall++; - rc = lsmInfoFreelist(pDb, &zFree); - if( rc!=LSM_OK ) return rc; - lsmLogMessage(pDb, 0, "lsmBlockAllocate(): %d freelist: %s", nCall, zFree); - lsmFree(pDb->pEnv, zFree); - } -#endif - - /* Set iInUse to the smallest snapshot id that is either: - ** - ** * Currently in use by a database client, - ** * May be used by a database client in the future, or - ** * Is the most recently checkpointed snapshot (i.e. the one that will - ** be used following recovery if a failure occurs at this point). - */ - rc = lsmCheckpointSynced(pDb, &iSynced, 0, 0); - if( rc==LSM_OK && iSynced==0 ) iSynced = p->iId; - iInUse = iSynced; - if( rc==LSM_OK && pDb->iReader>=0 ){ - assert( pDb->pClient ); - iInUse = LSM_MIN(iInUse, pDb->pClient->iId); - } - if( rc==LSM_OK ) rc = firstSnapshotInUse(pDb, &iInUse); - -#ifdef LSM_LOG_FREELIST - { - lsmLogMessage(pDb, 0, "lsmBlockAllocate(): " - "snapshot-in-use: %lld (iSynced=%lld) (client-id=%lld)", - iInUse, iSynced, (pDb->iReader>=0 ? pDb->pClient->iId : 0) - ); - } -#endif - - - /* Unless there exists a read-only transaction (which prevents us from - ** recycling any blocks regardless, query the free block list for a - ** suitable block to reuse. - ** - ** It might seem more natural to check for a read-only transaction at - ** the start of this function. However, it is better do wait until after - ** the call to lsmCheckpointSynced() to do so. - */ - if( rc==LSM_OK ){ - int bRotrans; - rc = lsmDetectRoTrans(pDb, &bRotrans); - - if( rc==LSM_OK && bRotrans==0 ){ - rc = findFreeblock(pDb, iInUse, (iBefore>0), &iRet); - } - } - - if( iBefore>0 && (iRet<=0 || iRet>=iBefore) ){ - iRet = 0; - - }else if( rc==LSM_OK ){ - /* If a block was found in the free block list, use it and remove it from - ** the list. Otherwise, if no suitable block was found, allocate one from - ** the end of the file. */ - if( iRet>0 ){ -#ifdef LSM_LOG_FREELIST - lsmLogMessage(pDb, 0, - "reusing block %d (snapshot-in-use=%lld)", iRet, iInUse); -#endif - rc = freelistAppend(pDb, iRet, -1); - if( rc==LSM_OK ){ - rc = dbTruncate(pDb, iInUse); - } - }else{ - iRet = ++(p->nBlock); -#ifdef LSM_LOG_FREELIST - lsmLogMessage(pDb, 0, "extending file to %d blocks", iRet); -#endif - } - } - - assert( iBefore>0 || iRet>0 || rc!=LSM_OK ); - *piBlk = iRet; - return rc; -} - -/* -** Free a database block. The worker snapshot must be held in order to call -** this function. -** -** If successful, LSM_OK is returned. Otherwise, an lsm error code (e.g. -** LSM_NOMEM). -*/ -int lsmBlockFree(lsm_db *pDb, int iBlk){ - Snapshot *p = pDb->pWorker; - assert( lsmShmAssertWorker(pDb) ); - -#ifdef LSM_LOG_FREELIST - lsmLogMessage(pDb, LSM_OK, "lsmBlockFree(): Free block %d", iBlk); -#endif - - return freelistAppend(pDb, iBlk, p->iId); -} - -/* -** Refree a database block. The worker snapshot must be held in order to call -** this function. -** -** Refreeing is required when a block is allocated using lsmBlockAllocate() -** but then not used. This function is used to push the block back onto -** the freelist. Refreeing a block is different from freeing is, as a refreed -** block may be reused immediately. Whereas a freed block can not be reused -** until (at least) after the next checkpoint. -*/ -int lsmBlockRefree(lsm_db *pDb, int iBlk){ - int rc = LSM_OK; /* Return code */ - -#ifdef LSM_LOG_FREELIST - lsmLogMessage(pDb, LSM_OK, "lsmBlockRefree(): Refree block %d", iBlk); -#endif - - rc = freelistAppend(pDb, iBlk, 0); - return rc; -} - -/* -** If required, copy a database checkpoint from shared memory into the -** database itself. -** -** The WORKER lock must not be held when this is called. This is because -** this function may indirectly call fsync(). And the WORKER lock should -** not be held that long (in case it is required by a client flushing an -** in-memory tree to disk). -*/ -int lsmCheckpointWrite(lsm_db *pDb, u32 *pnWrite){ - int rc; /* Return Code */ - u32 nWrite = 0; - - assert( pDb->pWorker==0 ); - assert( 1 || pDb->pClient==0 ); - assert( lsmShmAssertLock(pDb, LSM_LOCK_WORKER, LSM_LOCK_UNLOCK) ); - - rc = lsmShmLock(pDb, LSM_LOCK_CHECKPOINTER, LSM_LOCK_EXCL, 0); - if( rc!=LSM_OK ) return rc; - - rc = lsmCheckpointLoad(pDb, 0); - if( rc==LSM_OK ){ - int nBlock = lsmCheckpointNBlock(pDb->aSnapshot); - ShmHeader *pShm = pDb->pShmhdr; - int bDone = 0; /* True if checkpoint is already stored */ - - /* Check if this checkpoint has already been written to the database - ** file. If so, set variable bDone to true. */ - if( pShm->iMetaPage ){ - MetaPage *pPg; /* Meta page */ - u8 *aData; /* Meta-page data buffer */ - int nData; /* Size of aData[] in bytes */ - i64 iCkpt; /* Id of checkpoint just loaded */ - i64 iDisk = 0; /* Id of checkpoint already stored in db */ - iCkpt = lsmCheckpointId(pDb->aSnapshot, 0); - rc = lsmFsMetaPageGet(pDb->pFS, 0, pShm->iMetaPage, &pPg); - if( rc==LSM_OK ){ - aData = lsmFsMetaPageData(pPg, &nData); - iDisk = lsmCheckpointId((u32 *)aData, 1); - nWrite = lsmCheckpointNWrite((u32 *)aData, 1); - lsmFsMetaPageRelease(pPg); - } - bDone = (iDisk>=iCkpt); - } - - if( rc==LSM_OK && bDone==0 ){ - int iMeta = (pShm->iMetaPage % 2) + 1; - if( pDb->eSafety!=LSM_SAFETY_OFF ){ - rc = lsmFsSyncDb(pDb->pFS, nBlock); - } - if( rc==LSM_OK ) rc = lsmCheckpointStore(pDb, iMeta); - if( rc==LSM_OK && pDb->eSafety!=LSM_SAFETY_OFF){ - rc = lsmFsSyncDb(pDb->pFS, 0); - } - if( rc==LSM_OK ){ - pShm->iMetaPage = iMeta; - nWrite = lsmCheckpointNWrite(pDb->aSnapshot, 0) - nWrite; - } -#ifdef LSM_LOG_WORK - lsmLogMessage(pDb, 0, "finish checkpoint %d", - (int)lsmCheckpointId(pDb->aSnapshot, 0) - ); -#endif - } - } - - lsmShmLock(pDb, LSM_LOCK_CHECKPOINTER, LSM_LOCK_UNLOCK, 0); - if( pnWrite && rc==LSM_OK ) *pnWrite = nWrite; - return rc; -} - -int lsmBeginWork(lsm_db *pDb){ - int rc; - - /* Attempt to take the WORKER lock */ - rc = lsmShmLock(pDb, LSM_LOCK_WORKER, LSM_LOCK_EXCL, 0); - - /* Deserialize the current worker snapshot */ - if( rc==LSM_OK ){ - rc = lsmCheckpointLoadWorker(pDb); - } - return rc; -} - -void lsmFreeSnapshot(lsm_env *pEnv, Snapshot *p){ - if( p ){ - lsmSortedFreeLevel(pEnv, p->pLevel); - lsmFree(pEnv, p->freelist.aEntry); - lsmFree(pEnv, p->redirect.a); - lsmFree(pEnv, p); - } -} - -/* -** Attempt to populate one of the read-lock slots to contain lock values -** iLsm/iShm. Or, if such a slot exists already, this function is a no-op. -** -** It is not an error if no slot can be populated because the write-lock -** cannot be obtained. If any other error occurs, return an LSM error code. -** Otherwise, LSM_OK. -** -** This function is called at various points to try to ensure that there -** always exists at least one read-lock slot that can be used by a read-only -** client. And so that, in the usual case, there is an "exact match" available -** whenever a read transaction is opened by any client. At present this -** function is called when: -** -** * A write transaction that called lsmTreeDiscardOld() is committed, and -** * Whenever the working snapshot is updated (i.e. lsmFinishWork()). -*/ -static int dbSetReadLock(lsm_db *db, i64 iLsm, u32 iShm){ - int rc = LSM_OK; - ShmHeader *pShm = db->pShmhdr; - int i; - - /* Check if there is already a slot containing the required values. */ - for(i=0; iaReader[i]; - if( p->iLsmId==iLsm && p->iTreeId==iShm ) return LSM_OK; - } - - /* Iterate through all read-lock slots, attempting to take a write-lock - ** on each of them. If a write-lock succeeds, populate the locked slot - ** with the required values and break out of the loop. */ - for(i=0; rc==LSM_OK && iaReader[i]; - p->iLsmId = iLsm; - p->iTreeId = iShm; - lsmShmLock(db, LSM_LOCK_READER(i), LSM_LOCK_UNLOCK, 0); - break; - } - } - - return rc; -} - -/* -** Release the read-lock currently held by connection db. -*/ -int dbReleaseReadlock(lsm_db *db){ - int rc = LSM_OK; - if( db->iReader>=0 ){ - rc = lsmShmLock(db, LSM_LOCK_READER(db->iReader), LSM_LOCK_UNLOCK, 0); - db->iReader = -1; - } - db->bRoTrans = 0; - return rc; -} - - -/* -** Argument bFlush is true if the contents of the in-memory tree has just -** been flushed to disk. The significance of this is that once the snapshot -** created to hold the updated state of the database is synced to disk, log -** file space can be recycled. -*/ -void lsmFinishWork(lsm_db *pDb, int bFlush, int *pRc){ - int rc = *pRc; - assert( rc!=0 || pDb->pWorker ); - if( pDb->pWorker ){ - /* If no error has occurred, serialize the worker snapshot and write - ** it to shared memory. */ - if( rc==LSM_OK ){ - rc = lsmSaveWorker(pDb, bFlush); - } - - /* Assuming no error has occurred, update a read lock slot with the - ** new snapshot id (see comments above function dbSetReadLock()). */ - if( rc==LSM_OK ){ - if( pDb->iReader<0 ){ - rc = lsmTreeLoadHeader(pDb, 0); - } - if( rc==LSM_OK ){ - rc = dbSetReadLock(pDb, pDb->pWorker->iId, pDb->treehdr.iUsedShmid); - } - } - - /* Free the snapshot object. */ - lsmFreeSnapshot(pDb->pEnv, pDb->pWorker); - pDb->pWorker = 0; - } - - lsmShmLock(pDb, LSM_LOCK_WORKER, LSM_LOCK_UNLOCK, 0); - *pRc = rc; -} - -/* -** Called when recovery is finished. -*/ -int lsmFinishRecovery(lsm_db *pDb){ - lsmTreeEndTransaction(pDb, 1); - return LSM_OK; -} - -/* -** Check if the currently configured compression functions -** (LSM_CONFIG_SET_COMPRESSION) are compatible with a database that has its -** compression id set to iReq. Compression routines are compatible if iReq -** is zero (indicating the database is empty), or if it is equal to the -** compression id of the configured compression routines. -** -** If the check shows that the current compression are incompatible and there -** is a compression factory registered, give it a chance to install new -** compression routines. -** -** If, after any registered factory is invoked, the compression functions -** are still incompatible, return LSM_MISMATCH. Otherwise, LSM_OK. -*/ -int lsmCheckCompressionId(lsm_db *pDb, u32 iReq){ - if( iReq!=LSM_COMPRESSION_EMPTY && pDb->compress.iId!=iReq ){ - if( pDb->factory.xFactory ){ - pDb->bInFactory = 1; - pDb->factory.xFactory(pDb->factory.pCtx, pDb, iReq); - pDb->bInFactory = 0; - } - if( pDb->compress.iId!=iReq ){ - /* Incompatible */ - return LSM_MISMATCH; - } - } - /* Compatible */ - return LSM_OK; -} - -/* -** Begin a read transaction. This function is a no-op if the connection -** passed as the only argument already has an open read transaction. -*/ -int lsmBeginReadTrans(lsm_db *pDb){ - const int MAX_READLOCK_ATTEMPTS = 10; - const int nMaxAttempt = (pDb->bRoTrans ? 1 : MAX_READLOCK_ATTEMPTS); - - int rc = LSM_OK; /* Return code */ - int iAttempt = 0; - - assert( pDb->pWorker==0 ); - - while( rc==LSM_OK && pDb->iReader<0 && (iAttempt++)pCsr==0 && pDb->nTransOpen==0 ); - - /* Load the in-memory tree header. */ - rc = lsmTreeLoadHeader(pDb, &iTreehdr); - - /* Load the database snapshot */ - if( rc==LSM_OK ){ - if( lsmCheckpointClientCacheOk(pDb)==0 ){ - lsmFreeSnapshot(pDb->pEnv, pDb->pClient); - pDb->pClient = 0; - lsmMCursorFreeCache(pDb); - lsmFsPurgeCache(pDb->pFS); - rc = lsmCheckpointLoad(pDb, &iSnap); - }else{ - iSnap = 1; - } - } - - /* Take a read-lock on the tree and snapshot just loaded. Then check - ** that the shared-memory still contains the same values. If so, proceed. - ** Otherwise, relinquish the read-lock and retry the whole procedure - ** (starting with loading the in-memory tree header). */ - if( rc==LSM_OK ){ - u32 iShmMax = pDb->treehdr.iUsedShmid; - u32 iShmMin = pDb->treehdr.iNextShmid+1-LSM_MAX_SHMCHUNKS; - rc = lsmReadlock( - pDb, lsmCheckpointId(pDb->aSnapshot, 0), iShmMin, iShmMax - ); - if( rc==LSM_OK ){ - if( lsmTreeLoadHeaderOk(pDb, iTreehdr) - && lsmCheckpointLoadOk(pDb, iSnap) - ){ - /* Read lock has been successfully obtained. Deserialize the - ** checkpoint just loaded. TODO: This will be removed after - ** lsm_sorted.c is changed to work directly from the serialized - ** version of the snapshot. */ - if( pDb->pClient==0 ){ - rc = lsmCheckpointDeserialize(pDb, 0, pDb->aSnapshot,&pDb->pClient); - } - assert( (rc==LSM_OK)==(pDb->pClient!=0) ); - assert( pDb->iReader>=0 ); - - /* Check that the client has the right compression hooks loaded. - ** If not, set rc to LSM_MISMATCH. */ - if( rc==LSM_OK ){ - rc = lsmCheckCompressionId(pDb, pDb->pClient->iCmpId); - } - }else{ - rc = dbReleaseReadlock(pDb); - } - } - - if( rc==LSM_BUSY ){ - rc = LSM_OK; - } - } -#if 0 -if( rc==LSM_OK && pDb->pClient ){ - fprintf(stderr, - "reading %p: snapshot:%d used-shmid:%d trans-id:%d iOldShmid=%d\n", - (void *)pDb, - (int)pDb->pClient->iId, (int)pDb->treehdr.iUsedShmid, - (int)pDb->treehdr.root.iTransId, - (int)pDb->treehdr.iOldShmid - ); -} -#endif - } - - if( rc==LSM_OK ){ - rc = lsmShmCacheChunks(pDb, pDb->treehdr.nChunk); - } - if( rc!=LSM_OK ){ - dbReleaseReadlock(pDb); - } - if( pDb->pClient==0 && rc==LSM_OK ) rc = LSM_BUSY; - return rc; -} - -/* -** This function is used by a read-write connection to determine if there -** are currently one or more read-only transactions open on the database -** (in this context a read-only transaction is one opened by a read-only -** connection on a non-live database). -** -** If no error occurs, LSM_OK is returned and *pbExists is set to true if -** some other connection has a read-only transaction open, or false -** otherwise. If an error occurs an LSM error code is returned and the final -** value of *pbExist is undefined. -*/ -int lsmDetectRoTrans(lsm_db *db, int *pbExist){ - int rc; - - /* Only a read-write connection may use this function. */ - assert( db->bReadonly==0 ); - - rc = lsmShmTestLock(db, LSM_LOCK_ROTRANS, 1, LSM_LOCK_EXCL); - if( rc==LSM_BUSY ){ - *pbExist = 1; - rc = LSM_OK; - }else{ - *pbExist = 0; - } - - return rc; -} - -/* -** db is a read-only database handle in the disconnected state. This function -** attempts to open a read-transaction on the database. This may involve -** connecting to the database system (opening shared memory etc.). -*/ -int lsmBeginRoTrans(lsm_db *db){ - int rc = LSM_OK; - - assert( db->bReadonly && db->pShmhdr==0 ); - assert( db->iReader<0 ); - - if( db->bRoTrans==0 ){ - - /* Attempt a shared-lock on DMS1. */ - rc = lsmShmLock(db, LSM_LOCK_DMS1, LSM_LOCK_SHARED, 0); - if( rc!=LSM_OK ) return rc; - - rc = lsmShmTestLock( - db, LSM_LOCK_RWCLIENT(0), LSM_LOCK_NREADER, LSM_LOCK_SHARED - ); - if( rc==LSM_OK ){ - /* System is not live. Take a SHARED lock on the ROTRANS byte and - ** release DMS1. Locking ROTRANS tells all read-write clients that they - ** may not recycle any disk space from within the database or log files, - ** as a read-only client may be using it. */ - rc = lsmShmLock(db, LSM_LOCK_ROTRANS, LSM_LOCK_SHARED, 0); - lsmShmLock(db, LSM_LOCK_DMS1, LSM_LOCK_UNLOCK, 0); - - if( rc==LSM_OK ){ - db->bRoTrans = 1; - rc = lsmShmCacheChunks(db, 1); - if( rc==LSM_OK ){ - db->pShmhdr = (ShmHeader *)db->apShm[0]; - memset(db->pShmhdr, 0, sizeof(ShmHeader)); - rc = lsmCheckpointRecover(db); - if( rc==LSM_OK ){ - rc = lsmLogRecover(db); - } - } - } - }else if( rc==LSM_BUSY ){ - /* System is live! */ - rc = lsmShmLock(db, LSM_LOCK_DMS3, LSM_LOCK_SHARED, 0); - lsmShmLock(db, LSM_LOCK_DMS1, LSM_LOCK_UNLOCK, 0); - if( rc==LSM_OK ){ - rc = lsmShmCacheChunks(db, 1); - if( rc==LSM_OK ){ - db->pShmhdr = (ShmHeader *)db->apShm[0]; - } - } - } - - if( rc==LSM_OK ){ - rc = lsmBeginReadTrans(db); - } - } - - return rc; -} - -/* -** Close the currently open read transaction. -*/ -void lsmFinishReadTrans(lsm_db *pDb){ - - /* Worker connections should not be closing read transactions. And - ** read transactions should only be closed after all cursors and write - ** transactions have been closed. Finally pClient should be non-NULL - ** only iff pDb->iReader>=0. */ - assert( pDb->pWorker==0 ); - assert( pDb->pCsr==0 && pDb->nTransOpen==0 ); - - if( pDb->bRoTrans ){ - int i; - for(i=0; inShm; i++){ - lsmFree(pDb->pEnv, pDb->apShm[i]); - } - lsmFree(pDb->pEnv, pDb->apShm); - pDb->apShm = 0; - pDb->nShm = 0; - pDb->pShmhdr = 0; - - lsmShmLock(pDb, LSM_LOCK_ROTRANS, LSM_LOCK_UNLOCK, 0); - } - dbReleaseReadlock(pDb); -} - -/* -** Open a write transaction. -*/ -int lsmBeginWriteTrans(lsm_db *pDb){ - int rc = LSM_OK; /* Return code */ - ShmHeader *pShm = pDb->pShmhdr; /* Shared memory header */ - - assert( pDb->nTransOpen==0 ); - assert( pDb->bDiscardOld==0 ); - assert( pDb->bReadonly==0 ); - - /* If there is no read-transaction open, open one now. */ - if( pDb->iReader<0 ){ - rc = lsmBeginReadTrans(pDb); - } - - /* Attempt to take the WRITER lock */ - if( rc==LSM_OK ){ - rc = lsmShmLock(pDb, LSM_LOCK_WRITER, LSM_LOCK_EXCL, 0); - } - - /* If the previous writer failed mid-transaction, run emergency rollback. */ - if( rc==LSM_OK && pShm->bWriter ){ - rc = lsmTreeRepair(pDb); - if( rc==LSM_OK ) pShm->bWriter = 0; - } - - /* Check that this connection is currently reading from the most recent - ** version of the database. If not, return LSM_BUSY. */ - if( rc==LSM_OK && memcmp(&pShm->hdr1, &pDb->treehdr, sizeof(TreeHeader)) ){ - rc = LSM_BUSY; - } - - if( rc==LSM_OK ){ - rc = lsmLogBegin(pDb); - } - - /* If everything was successful, set the "transaction-in-progress" flag - ** and return LSM_OK. Otherwise, if some error occurred, relinquish the - ** WRITER lock and return an error code. */ - if( rc==LSM_OK ){ - TreeHeader *p = &pDb->treehdr; - pShm->bWriter = 1; - p->root.iTransId++; - if( lsmTreeHasOld(pDb) && p->iOldLog==pDb->pClient->iLogOff ){ - lsmTreeDiscardOld(pDb); - pDb->bDiscardOld = 1; - } - }else{ - lsmShmLock(pDb, LSM_LOCK_WRITER, LSM_LOCK_UNLOCK, 0); - if( pDb->pCsr==0 ) lsmFinishReadTrans(pDb); - } - return rc; -} - -/* -** End the current write transaction. The connection is left with an open -** read transaction. It is an error to call this if there is no open write -** transaction. -** -** If the transaction was committed, then a commit record has already been -** written into the log file when this function is called. Or, if the -** transaction was rolled back, both the log file and in-memory tree -** structure have already been restored. In either case, this function -** merely releases locks and other resources held by the write-transaction. -** -** LSM_OK is returned if successful, or an LSM error code otherwise. -*/ -int lsmFinishWriteTrans(lsm_db *pDb, int bCommit){ - int rc = LSM_OK; - int bFlush = 0; - - lsmLogEnd(pDb, bCommit); - if( rc==LSM_OK && bCommit && lsmTreeSize(pDb)>pDb->nTreeLimit ){ - bFlush = 1; - lsmTreeMakeOld(pDb); - } - lsmTreeEndTransaction(pDb, bCommit); - - if( rc==LSM_OK ){ - if( bFlush && pDb->bAutowork ){ - rc = lsmSortedAutoWork(pDb, 1); - }else if( bCommit && pDb->bDiscardOld ){ - rc = dbSetReadLock(pDb, pDb->pClient->iId, pDb->treehdr.iUsedShmid); - } - } - pDb->bDiscardOld = 0; - lsmShmLock(pDb, LSM_LOCK_WRITER, LSM_LOCK_UNLOCK, 0); - - if( bFlush && pDb->bAutowork==0 && pDb->xWork ){ - pDb->xWork(pDb, pDb->pWorkCtx); - } - return rc; -} - - -/* -** Return non-zero if the caller is holding the client mutex. -*/ -#ifdef LSM_DEBUG -int lsmHoldingClientMutex(lsm_db *pDb){ - return lsmMutexHeld(pDb->pEnv, pDb->pDatabase->pClientMutex); -} -#endif - -static int slotIsUsable(ShmReader *p, i64 iLsm, u32 iShmMin, u32 iShmMax){ - return( - p->iLsmId && p->iLsmId<=iLsm - && shm_sequence_ge(iShmMax, p->iTreeId) - && shm_sequence_ge(p->iTreeId, iShmMin) - ); -} - -/* -** Obtain a read-lock on database version identified by the combination -** of snapshot iLsm and tree iTree. Return LSM_OK if successful, or -** an LSM error code otherwise. -*/ -int lsmReadlock(lsm_db *db, i64 iLsm, u32 iShmMin, u32 iShmMax){ - int rc = LSM_OK; - ShmHeader *pShm = db->pShmhdr; - int i; - - assert( db->iReader<0 ); - assert( shm_sequence_ge(iShmMax, iShmMin) ); - - /* This is a no-op if the read-only transaction flag is set. */ - if( db->bRoTrans ){ - db->iReader = 0; - return LSM_OK; - } - - /* Search for an exact match. */ - for(i=0; db->iReader<0 && rc==LSM_OK && iaReader[i]; - if( p->iLsmId==iLsm && p->iTreeId==iShmMax ){ - rc = lsmShmLock(db, LSM_LOCK_READER(i), LSM_LOCK_SHARED, 0); - if( rc==LSM_OK && p->iLsmId==iLsm && p->iTreeId==iShmMax ){ - db->iReader = i; - }else if( rc==LSM_BUSY ){ - rc = LSM_OK; - } - } - } - - /* Try to obtain a write-lock on each slot, in order. If successful, set - ** the slot values to iLsm/iTree. */ - for(i=0; db->iReader<0 && rc==LSM_OK && iaReader[i]; - p->iLsmId = iLsm; - p->iTreeId = iShmMax; - rc = lsmShmLock(db, LSM_LOCK_READER(i), LSM_LOCK_SHARED, 0); - assert( rc!=LSM_BUSY ); - if( rc==LSM_OK ) db->iReader = i; - } - } - - /* Search for any usable slot */ - for(i=0; db->iReader<0 && rc==LSM_OK && iaReader[i]; - if( slotIsUsable(p, iLsm, iShmMin, iShmMax) ){ - rc = lsmShmLock(db, LSM_LOCK_READER(i), LSM_LOCK_SHARED, 0); - if( rc==LSM_OK && slotIsUsable(p, iLsm, iShmMin, iShmMax) ){ - db->iReader = i; - }else if( rc==LSM_BUSY ){ - rc = LSM_OK; - } - } - } - - if( rc==LSM_OK && db->iReader<0 ){ - rc = LSM_BUSY; - } - return rc; -} - -/* -** This is used to check if there exists a read-lock locking a particular -** version of either the in-memory tree or database file. -** -** If iLsmId is non-zero, then it is a snapshot id. If there exists a -** read-lock using this snapshot or newer, set *pbInUse to true. Or, -** if there is no such read-lock, set it to false. -** -** Or, if iLsmId is zero, then iShmid is a shared-memory sequence id. -** Search for a read-lock using this sequence id or newer. etc. -*/ -static int isInUse(lsm_db *db, i64 iLsmId, u32 iShmid, int *pbInUse){ - ShmHeader *pShm = db->pShmhdr; - int i; - int rc = LSM_OK; - - for(i=0; rc==LSM_OK && iaReader[i]; - if( p->iLsmId ){ - if( (iLsmId!=0 && p->iLsmId!=0 && iLsmId>=p->iLsmId) - || (iLsmId==0 && shm_sequence_ge(p->iTreeId, iShmid)) - ){ - rc = lsmShmLock(db, LSM_LOCK_READER(i), LSM_LOCK_EXCL, 0); - if( rc==LSM_OK ){ - p->iLsmId = 0; - lsmShmLock(db, LSM_LOCK_READER(i), LSM_LOCK_UNLOCK, 0); - } - } - } - } - - if( rc==LSM_BUSY ){ - *pbInUse = 1; - return LSM_OK; - } - *pbInUse = 0; - return rc; -} - -/* -** This function is called by worker connections to determine the smallest -** snapshot id that is currently in use by a database client. The worker -** connection uses this result to determine whether or not it is safe to -** recycle a database block. -*/ -static int firstSnapshotInUse( - lsm_db *db, /* Database handle */ - i64 *piInUse /* IN/OUT: Smallest snapshot id in use */ -){ - ShmHeader *pShm = db->pShmhdr; - i64 iInUse = *piInUse; - int i; - - assert( iInUse>0 ); - for(i=0; iaReader[i]; - if( p->iLsmId ){ - i64 iThis = p->iLsmId; - if( iThis!=0 && iInUse>iThis ){ - int rc = lsmShmLock(db, LSM_LOCK_READER(i), LSM_LOCK_EXCL, 0); - if( rc==LSM_OK ){ - p->iLsmId = 0; - lsmShmLock(db, LSM_LOCK_READER(i), LSM_LOCK_UNLOCK, 0); - }else if( rc==LSM_BUSY ){ - iInUse = iThis; - }else{ - /* Some error other than LSM_BUSY. Return the error code to - ** the caller in this case. */ - return rc; - } - } - } - } - - *piInUse = iInUse; - return LSM_OK; -} - -int lsmTreeInUse(lsm_db *db, u32 iShmid, int *pbInUse){ - if( db->treehdr.iUsedShmid==iShmid ){ - *pbInUse = 1; - return LSM_OK; - } - return isInUse(db, 0, iShmid, pbInUse); -} - -int lsmLsmInUse(lsm_db *db, i64 iLsmId, int *pbInUse){ - if( db->pClient && db->pClient->iId<=iLsmId ){ - *pbInUse = 1; - return LSM_OK; - } - return isInUse(db, iLsmId, 0, pbInUse); -} - -/* -** This function may only be called after a successful call to -** lsmDbDatabaseConnect(). It returns true if the connection is in -** multi-process mode, or false otherwise. -*/ -int lsmDbMultiProc(lsm_db *pDb){ - return pDb->pDatabase && pDb->pDatabase->bMultiProc; -} - - -/************************************************************************* -************************************************************************** -************************************************************************** -************************************************************************** -************************************************************************** -*************************************************************************/ - -/* -** Ensure that database connection db has cached pointers to at least the -** first nChunk chunks of shared memory. -*/ -int lsmShmCacheChunks(lsm_db *db, int nChunk){ - int rc = LSM_OK; - if( nChunk>db->nShm ){ - static const int NINCR = 16; - Database *p = db->pDatabase; - lsm_env *pEnv = db->pEnv; - int nAlloc; - int i; - - /* Ensure that the db->apShm[] array is large enough. If an attempt to - ** allocate memory fails, return LSM_NOMEM immediately. The apShm[] array - ** is always extended in multiples of 16 entries - so the actual allocated - ** size can be inferred from nShm. */ - nAlloc = ((db->nShm + NINCR - 1) / NINCR) * NINCR; - while( nChunk>=nAlloc ){ - void **apShm; - nAlloc += NINCR; - apShm = lsmRealloc(pEnv, db->apShm, sizeof(void*)*nAlloc); - if( !apShm ) return LSM_NOMEM_BKPT; - db->apShm = apShm; - } - - if( db->bRoTrans ){ - for(i=db->nShm; rc==LSM_OK && iapShm[i] = lsmMallocZeroRc(pEnv, LSM_SHM_CHUNK_SIZE, &rc); - db->nShm++; - } - - }else{ - - /* Enter the client mutex */ - lsmMutexEnter(pEnv, p->pClientMutex); - - /* Extend the Database objects apShmChunk[] array if necessary. Using the - ** same pattern as for the lsm_db.apShm[] array above. */ - nAlloc = ((p->nShmChunk + NINCR - 1) / NINCR) * NINCR; - while( nChunk>=nAlloc ){ - void **apShm; - nAlloc += NINCR; - apShm = lsmRealloc(pEnv, p->apShmChunk, sizeof(void*)*nAlloc); - if( !apShm ){ - rc = LSM_NOMEM_BKPT; - break; - } - p->apShmChunk = apShm; - } - - for(i=db->nShm; rc==LSM_OK && i=p->nShmChunk ){ - void *pChunk = 0; - if( p->bMultiProc==0 ){ - /* Single process mode */ - pChunk = lsmMallocZeroRc(pEnv, LSM_SHM_CHUNK_SIZE, &rc); - }else{ - /* Multi-process mode */ - rc = lsmEnvShmMap(pEnv, p->pFile, i, LSM_SHM_CHUNK_SIZE, &pChunk); - } - if( rc==LSM_OK ){ - p->apShmChunk[i] = pChunk; - p->nShmChunk++; - } - } - if( rc==LSM_OK ){ - db->apShm[i] = p->apShmChunk[i]; - db->nShm++; - } - } - - /* Release the client mutex */ - lsmMutexLeave(pEnv, p->pClientMutex); - } - } - - return rc; -} - -static int lockSharedFile(lsm_env *pEnv, Database *p, int iLock, int eOp){ - int rc = LSM_OK; - if( p->bMultiProc ){ - rc = lsmEnvLock(pEnv, p->pFile, iLock, eOp); - } - return rc; -} - -/* -** Test if it would be possible for connection db to obtain a lock of type -** eType on the nLock locks starting at iLock. If so, return LSM_OK. If it -** would not be possible to obtain the lock due to a lock held by another -** connection, return LSM_BUSY. If an IO or other error occurs (i.e. in the -** lsm_env.xTestLock function), return some other LSM error code. -** -** Note that this function never actually locks the database - it merely -** queries the system to see if there exists a lock that would prevent -** it from doing so. -*/ -int lsmShmTestLock( - lsm_db *db, - int iLock, - int nLock, - int eOp -){ - int rc = LSM_OK; - lsm_db *pIter; - Database *p = db->pDatabase; - int i; - u64 mask = 0; - - for(i=iLock; i<(iLock+nLock); i++){ - mask |= ((u64)1 << (iLock-1)); - if( eOp==LSM_LOCK_EXCL ) mask |= ((u64)1 << (iLock+32-1)); - } - - lsmMutexEnter(db->pEnv, p->pClientMutex); - for(pIter=p->pConn; pIter; pIter=pIter->pNext){ - if( pIter!=db && (pIter->mLock & mask) ){ - assert( pIter!=db ); - break; - } - } - - if( pIter ){ - rc = LSM_BUSY; - }else if( p->bMultiProc ){ - rc = lsmEnvTestLock(db->pEnv, p->pFile, iLock, nLock, eOp); - } - - lsmMutexLeave(db->pEnv, p->pClientMutex); - return rc; -} - -/* -** Attempt to obtain the lock identified by the iLock and bExcl parameters. -** If successful, return LSM_OK. If the lock cannot be obtained because -** there exists some other conflicting lock, return LSM_BUSY. If some other -** error occurs, return an LSM error code. -** -** Parameter iLock must be one of LSM_LOCK_WRITER, WORKER or CHECKPOINTER, -** or else a value returned by the LSM_LOCK_READER macro. -*/ -int lsmShmLock( - lsm_db *db, - int iLock, - int eOp, /* One of LSM_LOCK_UNLOCK, SHARED or EXCL */ - int bBlock /* True for a blocking lock */ -){ - lsm_db *pIter; - const u64 me = ((u64)1 << (iLock-1)); - const u64 ms = ((u64)1 << (iLock+32-1)); - int rc = LSM_OK; - Database *p = db->pDatabase; - - assert( eOp!=LSM_LOCK_EXCL || p->bReadonly==0 ); - assert( iLock>=1 && iLock<=LSM_LOCK_RWCLIENT(LSM_LOCK_NRWCLIENT-1) ); - assert( LSM_LOCK_RWCLIENT(LSM_LOCK_NRWCLIENT-1)<=32 ); - assert( eOp==LSM_LOCK_UNLOCK || eOp==LSM_LOCK_SHARED || eOp==LSM_LOCK_EXCL ); - - /* Check for a no-op. Proceed only if this is not one of those. */ - if( (eOp==LSM_LOCK_UNLOCK && (db->mLock & (me|ms))!=0) - || (eOp==LSM_LOCK_SHARED && (db->mLock & (me|ms))!=ms) - || (eOp==LSM_LOCK_EXCL && (db->mLock & me)==0) - ){ - int nExcl = 0; /* Number of connections holding EXCLUSIVE */ - int nShared = 0; /* Number of connections holding SHARED */ - lsmMutexEnter(db->pEnv, p->pClientMutex); - - /* Figure out the locks currently held by this process on iLock, not - ** including any held by connection db. */ - for(pIter=p->pConn; pIter; pIter=pIter->pNext){ - assert( (pIter->mLock & me)==0 || (pIter->mLock & ms)!=0 ); - if( pIter!=db ){ - if( pIter->mLock & me ){ - nExcl++; - }else if( pIter->mLock & ms ){ - nShared++; - } - } - } - assert( nExcl==0 || nExcl==1 ); - assert( nExcl==0 || nShared==0 ); - assert( nExcl==0 || (db->mLock & (me|ms))==0 ); - - switch( eOp ){ - case LSM_LOCK_UNLOCK: - if( nShared==0 ){ - lockSharedFile(db->pEnv, p, iLock, LSM_LOCK_UNLOCK); - } - db->mLock &= ~(me|ms); - break; - - case LSM_LOCK_SHARED: - if( nExcl ){ - rc = LSM_BUSY; - }else{ - if( nShared==0 ){ - rc = lockSharedFile(db->pEnv, p, iLock, LSM_LOCK_SHARED); - } - if( rc==LSM_OK ){ - db->mLock |= ms; - db->mLock &= ~me; - } - } - break; - - default: - assert( eOp==LSM_LOCK_EXCL ); - if( nExcl || nShared ){ - rc = LSM_BUSY; - }else{ - rc = lockSharedFile(db->pEnv, p, iLock, LSM_LOCK_EXCL); - if( rc==LSM_OK ){ - db->mLock |= (me|ms); - } - } - break; - } - - lsmMutexLeave(db->pEnv, p->pClientMutex); - } - - return rc; -} - -#ifdef LSM_DEBUG - -int shmLockType(lsm_db *db, int iLock){ - const u64 me = ((u64)1 << (iLock-1)); - const u64 ms = ((u64)1 << (iLock+32-1)); - - if( db->mLock & me ) return LSM_LOCK_EXCL; - if( db->mLock & ms ) return LSM_LOCK_SHARED; - return LSM_LOCK_UNLOCK; -} - -/* -** The arguments passed to this function are similar to those passed to -** the lsmShmLock() function. However, instead of obtaining a new lock -** this function returns true if the specified connection already holds -** (or does not hold) such a lock, depending on the value of eOp. As -** follows: -** -** (eOp==LSM_LOCK_UNLOCK) -> true if db has no lock on iLock -** (eOp==LSM_LOCK_SHARED) -> true if db has at least a SHARED lock on iLock. -** (eOp==LSM_LOCK_EXCL) -> true if db has an EXCLUSIVE lock on iLock. -*/ -int lsmShmAssertLock(lsm_db *db, int iLock, int eOp){ - int ret = 0; - int eHave; - - assert( iLock>=1 && iLock<=LSM_LOCK_READER(LSM_LOCK_NREADER-1) ); - assert( iLock<=16 ); - assert( eOp==LSM_LOCK_UNLOCK || eOp==LSM_LOCK_SHARED || eOp==LSM_LOCK_EXCL ); - - eHave = shmLockType(db, iLock); - - switch( eOp ){ - case LSM_LOCK_UNLOCK: - ret = (eHave==LSM_LOCK_UNLOCK); - break; - case LSM_LOCK_SHARED: - ret = (eHave!=LSM_LOCK_UNLOCK); - break; - case LSM_LOCK_EXCL: - ret = (eHave==LSM_LOCK_EXCL); - break; - default: - assert( !"bad eOp value passed to lsmShmAssertLock()" ); - break; - } - - return ret; -} - -int lsmShmAssertWorker(lsm_db *db){ - return lsmShmAssertLock(db, LSM_LOCK_WORKER, LSM_LOCK_EXCL) && db->pWorker; -} - -/* -** This function does not contribute to library functionality, and is not -** included in release builds. It is intended to be called from within -** an interactive debugger. -** -** When called, this function prints a single line of human readable output -** to stdout describing the locks currently held by the connection. For -** example: -** -** (gdb) call print_db_locks(pDb) -** (shared on dms2) (exclusive on writer) -*/ -void print_db_locks(lsm_db *db){ - int iLock; - for(iLock=0; iLock<16; iLock++){ - int bOne = 0; - const char *azLock[] = {0, "shared", "exclusive"}; - const char *azName[] = { - 0, "dms1", "dms2", "writer", "worker", "checkpointer", - "reader0", "reader1", "reader2", "reader3", "reader4", "reader5" - }; - int eHave = shmLockType(db, iLock); - if( azLock[eHave] ){ - printf("%s(%s on %s)", (bOne?" ":""), azLock[eHave], azName[iLock]); - bOne = 1; - } - } - printf("\n"); -} -void print_all_db_locks(lsm_db *db){ - lsm_db *p; - for(p=db->pDatabase->pConn; p; p=p->pNext){ - printf("%s connection %p ", ((p==db)?"*":""), p); - print_db_locks(p); - } -} -#endif - -void lsmShmBarrier(lsm_db *db){ - lsmEnvShmBarrier(db->pEnv); -} - -int lsm_checkpoint(lsm_db *pDb, int *pnKB){ - int rc; /* Return code */ - u32 nWrite = 0; /* Number of pages checkpointed */ - - /* Attempt the checkpoint. If successful, nWrite is set to the number of - ** pages written between this and the previous checkpoint. */ - rc = lsmCheckpointWrite(pDb, &nWrite); - - /* If required, calculate the output variable (KB of data checkpointed). - ** Set it to zero if an error occured. */ - if( pnKB ){ - int nKB = 0; - if( rc==LSM_OK && nWrite ){ - nKB = (((i64)nWrite * lsmFsPageSize(pDb->pFS)) + 1023) / 1024; - } - *pnKB = nKB; - } - - return rc; -} diff --git a/ext/lsm1/lsm_sorted.c b/ext/lsm1/lsm_sorted.c deleted file mode 100644 index 4a24e4b829..0000000000 --- a/ext/lsm1/lsm_sorted.c +++ /dev/null @@ -1,6192 +0,0 @@ -/* -** 2011-08-14 -** -** The author disclaims copyright to this source code. In place of -** a legal notice, here is a blessing: -** -** May you do good and not evil. -** May you find forgiveness for yourself and forgive others. -** May you share freely, never taking more than you give. -** -************************************************************************* -** -** PAGE FORMAT: -** -** The maximum page size is 65536 bytes. -** -** Since all records are equal to or larger than 2 bytes in size, and -** some space within the page is consumed by the page footer, there must -** be less than 2^15 records on each page. -** -** Each page ends with a footer that describes the pages contents. This -** footer serves as similar purpose to the page header in an SQLite database. -** A footer is used instead of a header because it makes it easier to -** populate a new page based on a sorted list of key/value pairs. -** -** The footer consists of the following values (starting at the end of -** the page and continuing backwards towards the start). All values are -** stored as unsigned big-endian integers. -** -** * Number of records on page (2 bytes). -** * Flags field (2 bytes). -** * Left-hand pointer value (8 bytes). -** * The starting offset of each record (2 bytes per record). -** -** Records may span pages. Unless it happens to be an exact fit, the part -** of the final record that starts on page X that does not fit on page X -** is stored at the start of page (X+1). This means there may be pages where -** (N==0). And on most pages the first record that starts on the page will -** not start at byte offset 0. For example: -** -** aaaaa bbbbb ccc